2
0
Fork 0

feat(sentry10): Make on-premise work for Sentry 10 (#220)

This commit is contained in:
Burak Yigit Kaya 2019-11-12 02:18:59 +03:00 committed by GitHub
parent e2b7c743af
commit 5d064c2224
No known key found for this signature in database
GPG Key ID: 4AEE18F83AFDEB23
17 changed files with 489 additions and 468 deletions

View File

@ -1,12 +0,0 @@
.git
.gitignore
.dockerignore
Makefile
README.md
*.pyc
*.tar
docker-compose.yml
data/
.travis.yml
install.sh
test.sh

1
.env Normal file
View File

@ -0,0 +1 @@
SENTRY_EVENT_RETENTION_DAYS=90

View File

@ -1,3 +0,0 @@
# Run `docker-compose run web config generate-secret-key`
# to get the SENTRY_SECRET_KEY value.
SENTRY_SECRET_KEY=

8
.gitignore vendored
View File

@ -68,9 +68,11 @@ target/
# https://docs.docker.com/compose/extends/
docker-compose.override.yml
# env config
.env
*.tar
data/
.vscode/tags
# custom Sentry config
sentry/sentry.conf.py
sentry/config.yml
sentry/requirements.txt

View File

@ -2,12 +2,17 @@ language: bash
services: docker
env:
- SENTRY_IMAGE=sentry:9.1.2
- SENTRY_IMAGE=getsentry/sentry:latest
- DOCKER_COMPOSE_VERSION=1.24.1
before_install:
- sudo rm /usr/local/bin/docker-compose
- curl -L https://github.com/docker/compose/releases/download/${DOCKER_COMPOSE_VERSION}/docker-compose-`uname -s`-`uname -m` > docker-compose
- chmod +x docker-compose
- sudo mv docker-compose /usr/local/bin
script:
- ./install.sh
- docker-compose run --rm web createuser --superuser --email test@sentry.io --password test123TEST
- docker-compose run --rm web createuser --superuser --email test@example.com --password test123TEST
- docker-compose up -d
- timeout 60 bash -c 'until $(curl -Isf -o /dev/null http://localhost:9000); do printf '.'; sleep 0.5; done'
- ./test.sh

View File

@ -1,2 +0,0 @@
ARG SENTRY_IMAGE
FROM ${SENTRY_IMAGE:-sentry:9.1.2}-onbuild

View File

@ -1,21 +1,23 @@
# Sentry On-Premise [![Build Status][build-status-image]][build-status-url]
# Sentry 10 On-Premise BETA [![Build Status][build-status-image]][build-status-url]
Official bootstrap for running your own [Sentry](https://sentry.io/) with [Docker](https://www.docker.com/).
**NOTE:** If you are not installing Sentry from scratch, visit [On-Premise Stable for Sentry 9.1.2](https://github.com/getsentry/onpremise/tree/stable) as this version is not fully backward compatible.
## Requirements
* Docker 17.05.0+
* Compose 1.17.0+
* Compose 1.19.0+
## Minimum Hardware Requirements:
* You need at least 3GB RAM
* You need at least 2400MB RAM
## Setup
To get started with all the defaults, simply clone the repo and run `./install.sh` in your local check-out.
There may need to be modifications to the included `docker-compose.yml` file to accommodate your needs or your environment (such as adding GitHub credentials). If you want to perform these, do them before you run the install script.
There may need to be modifications to the included example config files (`sentry/config.example.yml` and `sentry/sentry.conf.example.py`) to accommodate your needs or your environment (such as adding GitHub credentials). If you want to perform these, do them before you run the install script and copy them without the `.example` extensions in the name (such as `sentry/sentry.conf.py`) before running the `install.sh` script.
The recommended way to customize your configuration is using the files below, in that order:
@ -23,8 +25,14 @@ The recommended way to customize your configuration is using the files below, in
* `sentry.conf.py`
* `.env` w/ environment variables
We currently support a very minimal set of environment variables to promote other means of configuration.
If you have any issues or questions, our [Community Forum](https://forum.sentry.io/c/on-premise) is at your service!
## Event Retention
Sentry comes with a cleanup cron job that prunes events older than `90 days` by default. If you want to change that, you can change the `SENTRY_EVENT_RETENTION_DAYS` environment variable in `.env` or simply override it in your environment. If you do not want the cleanup cron, you can remove the `sentry-cleanup` service from the `docker-compose.yml`file.
## Securing Sentry with SSL/TLS
If you'd like to protect your Sentry install with SSL/TLS, there are

6
cron/Dockerfile Normal file
View File

@ -0,0 +1,6 @@
ARG BASE_IMAGE
FROM ${BASE_IMAGE}
RUN apt-get update && apt-get install -y --no-install-recommends cron && \
rm -r /var/lib/apt/lists/*
COPY entrypoint.sh /entrypoint.sh
ENTRYPOINT ["/entrypoint.sh"]

15
cron/entrypoint.sh Executable file
View File

@ -0,0 +1,15 @@
#!/usr/bin/env bash
# Prior art:
# - https://git.io/fjNOg
# - https://blog.knoldus.com/running-a-cron-job-in-docker-container/
declare -p | grep -Ev 'BASHOPTS|BASH_VERSINFO|EUID|PPID|SHELLOPTS|UID' > /container.env
{ for cron_job in "$@"; do echo -e "SHELL=/bin/bash
BASH_ENV=/container.env
${cron_job} > /proc/1/fd/1 2>/proc/1/fd/2"; done } \
| sed --regexp-extended 's/\\(.)/\1/g' \
| crontab -
crontab -l
exec cron -f -l -L 15

View File

@ -1,67 +1,159 @@
# NOTE: This docker-compose.yml is meant to be just an example of how
# you could accomplish this on your own. It is not intended to work in
# all use-cases and must be adapted to fit your needs. This is merely
# a guideline.
# See docs.getsentry.com/on-premise/server/ for full
# instructions
version: '3.4'
x-defaults: &defaults
x-restart-policy: &restart_policy
restart: unless-stopped
x-sentry-defaults: &sentry_defaults
<< : *restart_policy
build:
context: .
context: ./sentry
args:
- SENTRY_IMAGE
image: sentry-onpremise-local
depends_on:
- redis
- postgres
- memcached
- smtp
env_file: .env
- snuba-api
- snuba-consumer
- snuba-replacer
- symbolicator
- kafka
environment:
SENTRY_MEMCACHED_HOST: memcached
SENTRY_REDIS_HOST: redis
SENTRY_POSTGRES_HOST: postgres
SENTRY_EMAIL_HOST: smtp
SNUBA: 'http://snuba-api:1218'
volumes:
- sentry-data:/var/lib/sentry/files
- 'sentry-data:/var/lib/sentry/files'
x-snuba-defaults: &snuba_defaults
<< : *restart_policy
depends_on:
- redis
- clickhouse
- kafka
image: 'getsentry/snuba:latest'
environment:
SNUBA_SETTINGS: docker
CLICKHOUSE_HOST: clickhouse
DEFAULT_BROKERS: 'kafka:9093'
REDIS_HOST: redis
# TODO: Remove these after getsentry/snuba#353
UWSGI_MAX_REQUESTS: '10000'
UWSGI_DISABLE_LOGGING: 'true'
UWSGI_ENABLE_THREADS: 'true'
UWSGI_DIE_ON_TERM: 'true'
UWSGI_NEED_APP: 'true'
UWSGI_IGNORE_SIGPIPE: 'true'
UWSGI_IGNORE_WRITE_ERRORS: 'true'
UWSGI_DISABLE_WRITE_EXCEPTION: 'true'
services:
smtp:
restart: unless-stopped
<< : *restart_policy
image: tianon/exim4
memcached:
restart: unless-stopped
image: memcached:1.5-alpine
<< : *restart_policy
image: 'memcached:1.5-alpine'
redis:
restart: unless-stopped
image: redis:3.2-alpine
postgres:
restart: unless-stopped
image: postgres:9.5
<< : *restart_policy
image: 'redis:5.0-alpine'
volumes:
- sentry-postgres:/var/lib/postgresql/data
- 'sentry-redis:/data'
postgres:
<< : *restart_policy
image: 'postgres:9.6'
volumes:
- 'sentry-postgres:/var/lib/postgresql/data'
zookeeper:
<< : *restart_policy
image: 'confluentinc/cp-zookeeper:5.1.2'
environment:
ZOOKEEPER_CLIENT_PORT: '2181'
ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'ERROR'
ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'ERROR'
volumes:
- 'sentry-zookeeper:/var/lib/zookeeper'
kafka:
<< : *restart_policy
depends_on:
- zookeeper
image: 'confluentinc/cp-kafka:5.1.2'
environment:
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_LISTENERS: 'INTERNAL://0.0.0.0:9093,EXTERNAL://0.0.0.0:9092'
KAFKA_ADVERTISED_LISTENERS: 'INTERNAL://kafka:9093,EXTERNAL://kafka:9092'
KAFKA_LISTENER_SECURITY_PROTOCOL_MAP: 'INTERNAL:PLAINTEXT,EXTERNAL:PLAINTEXT'
KAFKA_INTER_BROKER_LISTENER_NAME: INTERNAL
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1'
KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN'
KAFKA_LOG4J_ROOT_LOGLEVEL: 'ERROR'
KAFKA_TOOLS_LOG4J_LOGLEVEL: 'ERROR'
volumes:
- 'sentry-kafka:/var/lib/kafka'
clickhouse:
<< : *restart_policy
image: 'yandex/clickhouse-server:19.4'
ulimits:
nofile:
soft: 262144
hard: 262144
volumes:
- 'sentry-clickhouse:/var/lib/clickhouse'
snuba-api:
<< : *snuba_defaults
snuba-consumer:
<< : *snuba_defaults
command: consumer --auto-offset-reset=latest --max-batch-time-ms 750
snuba-replacer:
<< : *snuba_defaults
command: replacer --auto-offset-reset=latest --max-batch-size 3
snuba-cleanup:
<< : *snuba_defaults
image: snuba-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: 'getsentry/snuba:latest'
command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"'
symbolicator:
<< : *restart_policy
image: us.gcr.io/sentryio/symbolicator:latest
volumes:
- 'sentry-symbolicator:/data'
command: run
symbolicator-cleanup:
image: symbolicator-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: 'us.gcr.io/sentryio/symbolicator:latest'
command: '"55 23 * * * gosu symbolicator symbolicator cleanup"'
web:
<<: *defaults
<< : *sentry_defaults
ports:
- '9000:9000'
- '9000:9000/tcp'
cron:
<<: *defaults
<< : *sentry_defaults
command: run cron
worker:
<<: *defaults
<< : *sentry_defaults
command: run worker
sentry-cleanup:
<< : *sentry_defaults
image: sentry-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: 'sentry-onpremise-local'
command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"'
volumes:
sentry-data:
external: true
sentry-postgres:
external: true
sentry-data:
external: true
sentry-postgres:
external: true
sentry-redis:
external: true
sentry-zookeeper:
external: true
sentry-kafka:
external: true
sentry-clickhouse:
external: true
sentry-symbolicator:
external: true

View File

@ -2,9 +2,12 @@
set -e
MIN_DOCKER_VERSION='17.05.0'
MIN_COMPOSE_VERSION='1.17.0'
MIN_RAM=3072 # MB
ENV_FILE='.env'
MIN_COMPOSE_VERSION='1.19.0'
MIN_RAM=2400 # MB
SENTRY_CONFIG_PY='sentry/sentry.conf.py'
SENTRY_CONFIG_YML='sentry/config.yml'
SENTRY_EXTRA_REQUIREMENTS='sentry/requirements.txt'
DID_CLEAN_UP=0
# the cleanup function will be the exit point
@ -27,6 +30,16 @@ RAM_AVAILABLE_IN_DOCKER=$(docker run --rm busybox free -m 2>/dev/null | awk '/Me
# Compare dot-separated strings - function below is inspired by https://stackoverflow.com/a/37939589/808368
function ver () { echo "$@" | awk -F. '{ printf("%d%03d%03d", $1,$2,$3); }'; }
# Thanks to https://stackoverflow.com/a/25123013/90297 for the quick `sed` pattern
function ensure_file_from_example {
if [ -f "$1" ]; then
echo "$1 already exists, skipped creation."
else
echo "Creating $1..."
cp -n $(echo "$1" | sed 's/\.[^.]*$/.example&/') "$1"
fi
}
if [ $(ver $DOCKER_VERSION) -lt $(ver $MIN_DOCKER_VERSION) ]; then
echo "FAIL: Expected minimum Docker version to be $MIN_DOCKER_VERSION but found $DOCKER_VERSION"
exit -1
@ -42,32 +55,36 @@ if [ "$RAM_AVAILABLE_IN_DOCKER" -lt "$MIN_RAM" ]; then
exit -1
fi
echo ""
ensure_file_from_example $SENTRY_CONFIG_PY
ensure_file_from_example $SENTRY_CONFIG_YML
ensure_file_from_example $SENTRY_EXTRA_REQUIREMENTS
echo ""
echo "Creating volumes for persistent storage..."
echo "Created $(docker volume create --name=sentry-data)."
echo "Created $(docker volume create --name=sentry-postgres)."
echo ""
if [ -f "$ENV_FILE" ]; then
echo "$ENV_FILE already exists, skipped creation."
else
echo "Creating $ENV_FILE..."
cp -n .env.example "$ENV_FILE"
fi
echo ""
echo "Building and tagging Docker images..."
echo ""
docker-compose build
echo ""
echo "Docker images built."
echo "Created $(docker volume create --name=sentry-redis)."
echo "Created $(docker volume create --name=sentry-zookeeper)."
echo "Created $(docker volume create --name=sentry-kafka)."
echo "Created $(docker volume create --name=sentry-clickhouse)."
echo "Created $(docker volume create --name=sentry-symbolicator)."
echo ""
echo "Generating secret key..."
# This is to escape the secret key to be used in sed below
SECRET_KEY=$(docker-compose run --rm web config generate-secret-key 2> /dev/null | tail -n1 | sed -e 's/[\/&]/\\&/g')
sed -i -e 's/^SENTRY_SECRET_KEY=.*$/SENTRY_SECRET_KEY='"$SECRET_KEY"'/' $ENV_FILE
echo "Secret key written to $ENV_FILE"
SECRET_KEY=$(head /dev/urandom | tr -dc "a-z0-9@#%^&*(-_=+)" | head -c 50 | sed -e 's/[\/&]/\\&/g')
sed -i -e 's/^system.secret-key:.*$/system.secret-key: '"'$SECRET_KEY'"'/' $SENTRY_CONFIG_YML
echo "Secret key written to $SENTRY_CONFIG_YML"
echo ""
echo "Building and tagging Docker images..."
echo ""
# Build the sentry onpremise image first as it is needed for the cron image
docker-compose build --force-rm web
docker-compose build --force-rm
echo ""
echo "Docker images built."
echo ""
echo "Setting up database..."
@ -83,11 +100,18 @@ else
docker-compose run --rm web upgrade
fi
echo "Boostrapping Snuba..."
docker-compose up -d kafka redis clickhouse
until $(docker-compose run --rm clickhouse clickhouse-client -h clickhouse --query="SHOW TABLES;" | grep -q sentry_local); do
docker-compose run --rm snuba-api bootstrap --force || true;
done;
echo ""
cleanup
echo ""
echo "----------------"
echo "You're all done! Run the following command get Sentry running:"
echo "You're all done! Run the following command to get Sentry running:"
echo ""
echo " docker-compose up -d"
echo ""

View File

@ -1,363 +0,0 @@
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
# For Docker, the following environment variables are supported:
# SENTRY_POSTGRES_HOST
# SENTRY_POSTGRES_PORT
# SENTRY_DB_NAME
# SENTRY_DB_USER
# SENTRY_DB_PASSWORD
# SENTRY_RABBITMQ_HOST
# SENTRY_RABBITMQ_USERNAME
# SENTRY_RABBITMQ_PASSWORD
# SENTRY_RABBITMQ_VHOST
# SENTRY_REDIS_HOST
# SENTRY_REDIS_PASSWORD
# SENTRY_REDIS_PORT
# SENTRY_REDIS_DB
# SENTRY_MEMCACHED_HOST
# SENTRY_MEMCACHED_PORT
# SENTRY_FILESTORE_DIR
# SENTRY_SERVER_EMAIL
# SENTRY_EMAIL_HOST
# SENTRY_EMAIL_PORT
# SENTRY_EMAIL_USER
# SENTRY_EMAIL_PASSWORD
# SENTRY_EMAIL_USE_TLS
# SENTRY_EMAIL_LIST_NAMESPACE
# SENTRY_ENABLE_EMAIL_REPLIES
# SENTRY_SMTP_HOSTNAME
# SENTRY_MAILGUN_API_KEY
# SENTRY_SINGLE_ORGANIZATION
# SENTRY_SECRET_KEY
# (slack integration)
# SENTRY_SLACK_CLIENT_ID
# SENTRY_SLACK_CLIENT_SECRET
# SENTRY_SLACK_VERIFICATION_TOKEN
# (github plugin, sso)
# GITHUB_APP_ID
# GITHUB_API_SECRET
# (github integration)
# SENTRY_GITHUB_APP_ID
# SENTRY_GITHUB_APP_CLIENT_ID
# SENTRY_GITHUB_APP_CLIENT_SECRET
# SENTRY_GITHUB_APP_WEBHOOK_SECRET
# SENTRY_GITHUB_APP_PRIVATE_KEY
# (azure devops integration)
# SENTRY_VSTS_CLIENT_ID
# SENTRY_VSTS_CLIENT_SECRET
# (bitbucket plugin)
# BITBUCKET_CONSUMER_KEY
# BITBUCKET_CONSUMER_SECRET
from sentry.conf.server import * # NOQA
from sentry.utils.types import Bool, Int
import os
import os.path
import six
CONF_ROOT = os.path.dirname(__file__)
postgres = env('SENTRY_POSTGRES_HOST') or (env('POSTGRES_PORT_5432_TCP_ADDR') and 'postgres')
if postgres:
DATABASES = {
'default': {
'ENGINE': 'sentry.db.postgres',
'NAME': (
env('SENTRY_DB_NAME')
or env('POSTGRES_ENV_POSTGRES_USER')
or 'postgres'
),
'USER': (
env('SENTRY_DB_USER')
or env('POSTGRES_ENV_POSTGRES_USER')
or 'postgres'
),
'PASSWORD': (
env('SENTRY_DB_PASSWORD')
or env('POSTGRES_ENV_POSTGRES_PASSWORD')
or ''
),
'HOST': postgres,
'PORT': (
env('SENTRY_POSTGRES_PORT')
or ''
),
},
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = env('SENTRY_SINGLE_ORGANIZATION', True)
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
redis = env('SENTRY_REDIS_HOST') or (env('REDIS_PORT_6379_TCP_ADDR') and 'redis')
if not redis:
raise Exception('Error: REDIS_PORT_6379_TCP_ADDR (or SENTRY_REDIS_HOST) is undefined, did you forget to `--link` a redis container?')
redis_password = env('SENTRY_REDIS_PASSWORD') or ''
redis_port = env('SENTRY_REDIS_PORT') or '6379'
redis_db = env('SENTRY_REDIS_DB') or '0'
SENTRY_OPTIONS.update({
'redis.clusters': {
'default': {
'hosts': {
0: {
'host': redis,
'password': redis_password,
'port': redis_port,
'db': redis_db,
},
},
},
},
})
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
memcached = env('SENTRY_MEMCACHED_HOST') or (env('MEMCACHED_PORT_11211_TCP_ADDR') and 'memcached')
if memcached:
memcached_port = (
env('SENTRY_MEMCACHED_PORT')
or '11211'
)
CACHES = {
'default': {
'BACKEND': 'django.core.cache.backends.memcached.MemcachedCache',
'LOCATION': [memcached + ':' + memcached_port],
'TIMEOUT': 3600,
}
}
# A primary cache is required for things such as processing events
SENTRY_CACHE = 'sentry.cache.redis.RedisCache'
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
rabbitmq = env('SENTRY_RABBITMQ_HOST') or (env('RABBITMQ_PORT_5672_TCP_ADDR') and 'rabbitmq')
if rabbitmq:
BROKER_URL = (
'amqp://' + (
env('SENTRY_RABBITMQ_USERNAME')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_USER')
or 'guest'
) + ':' + (
env('SENTRY_RABBITMQ_PASSWORD')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_PASS')
or 'guest'
) + '@' + rabbitmq + '/' + (
env('SENTRY_RABBITMQ_VHOST')
or env('RABBITMQ_ENV_RABBITMQ_DEFAULT_VHOST')
or '/'
)
)
else:
BROKER_URL = 'redis://:' + redis_password + '@' + redis + ':' + redis_port + '/' + redis_db
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = 'sentry.ratelimits.redis.RedisRateLimiter'
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = 'sentry.buffer.redis.RedisBuffer'
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = 'sentry.quotas.redis.RedisQuota'
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = 'sentry.tsdb.redis.RedisTSDB'
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = 'sentry.digests.backends.redis.RedisBackend'
################
# File storage #
################
# Uploaded media uses these `filestore` settings. The available
# backends are either `filesystem` or `s3`.
SENTRY_OPTIONS['filestore.backend'] = 'filesystem'
SENTRY_OPTIONS['filestore.options'] = {
'location': env('SENTRY_FILESTORE_DIR'),
}
##############
# Web Server #
##############
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and set `SENTRY_USE_SSL=1`
if env('SENTRY_USE_SSL', False):
SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
SESSION_COOKIE_SECURE = True
CSRF_COOKIE_SECURE = True
SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
SENTRY_WEB_HOST = '0.0.0.0'
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
'http': '%s:%s' % (SENTRY_WEB_HOST, SENTRY_WEB_PORT),
'protocol': 'uwsgi',
# This is need to prevent https://git.io/fj7Lw
'uwsgi-socket': None,
'http-keepalive': True,
'memory-report': False,
# 'workers': 3, # the number of web workers
}
##########
# Docker #
##########
# Docker's environment configuration needs to happen
# prior to anything that might rely on these values to
# enable more "smart" configuration.
ENV_CONFIG_MAPPING = {
'SENTRY_EMAIL_PASSWORD': 'mail.password',
'SENTRY_EMAIL_USER': 'mail.username',
'SENTRY_EMAIL_PORT': ('mail.port', Int),
'SENTRY_EMAIL_USE_TLS': ('mail.use-tls', Bool),
'SENTRY_EMAIL_HOST': 'mail.host',
'SENTRY_SERVER_EMAIL': 'mail.from',
'SENTRY_ENABLE_EMAIL_REPLIES': ('mail.enable-replies', Bool),
'SENTRY_EMAIL_LIST_NAMESPACE': 'mail.list-namespace',
'SENTRY_SMTP_HOSTNAME': 'mail.reply-hostname',
'SENTRY_SECRET_KEY': 'system.secret-key',
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
'SENTRY_MAILGUN_API_KEY': 'mail.mailgun-api-key',
'SENTRY_SLACK_CLIENT_ID': 'slack.client-id',
'SENTRY_SLACK_CLIENT_SECRET': 'slack.client-secret',
'SENTRY_SLACK_VERIFICATION_TOKEN': 'slack.verification-token',
'SENTRY_GITHUB_APP_ID': ('github-app.id', Int),
'SENTRY_GITHUB_APP_CLIENT_ID': 'github-app.client-id',
'SENTRY_GITHUB_APP_CLIENT_SECRET': 'github-app.client-secret',
'SENTRY_GITHUB_APP_WEBHOOK_SECRET': 'github-app.webhook-secret',
'SENTRY_GITHUB_APP_PRIVATE_KEY': 'github-app.private-key',
'SENTRY_VSTS_CLIENT_ID': 'vsts.client-id',
'SENTRY_VSTS_CLIENT_SECRET': 'vsts.client-secret',
}
def bind_env_config(config=SENTRY_OPTIONS, mapping=ENV_CONFIG_MAPPING):
"""
Automatically bind SENTRY_OPTIONS from a set of environment variables.
"""
for env_var, item in six.iteritems(mapping):
# HACK: we need to check both in `os.environ` and `env._cache`.
# This is very much an implementation detail leaking out
# due to assumptions about how `env` would be used previously.
# `env` will pop values out of `os.environ` when they are seen,
# so checking against `os.environ` only means it's likely
# they won't exist if `env()` has been called on the variable
# before at any point. So we're choosing to check both, but this
# behavior is different since we're trying to only conditionally
# apply variables, instead of setting them always.
if env_var not in os.environ and env_var not in env._cache:
continue
if isinstance(item, tuple):
opt_key, type_ = item
else:
opt_key, type_ = item, None
config[opt_key] = env(env_var, type=type_)
# If this value ever becomes compromised, it's important to regenerate your
# SENTRY_SECRET_KEY. Changing this value will result in all current sessions
# being invalidated.
secret_key = env('SENTRY_SECRET_KEY')
if not secret_key:
raise Exception('Error: SENTRY_SECRET_KEY is undefined, run `generate-secret-key` and set to -e SENTRY_SECRET_KEY')
if 'SENTRY_RUNNING_UWSGI' not in os.environ and len(secret_key) < 32:
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
print('!! CAUTION !!')
print('!! Your SENTRY_SECRET_KEY is potentially insecure. !!')
print('!! We recommend at least 32 characters long. !!')
print('!! Regenerate with `generate-secret-key`. !!')
print('!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!!')
# Grab the easy configuration first - these are all fixed
# key=value with no logic behind them
bind_env_config()
# If you specify a MAILGUN_API_KEY, you definitely want EMAIL_REPLIES
if SENTRY_OPTIONS.get('mail.mailgun-api-key'):
SENTRY_OPTIONS.setdefault('mail.enable-replies', True)
if 'GITHUB_APP_ID' in os.environ:
GITHUB_EXTENDED_PERMISSIONS = ['repo']
GITHUB_APP_ID = env('GITHUB_APP_ID')
GITHUB_API_SECRET = env('GITHUB_API_SECRET')
if 'BITBUCKET_CONSUMER_KEY' in os.environ:
BITBUCKET_CONSUMER_KEY = env('BITBUCKET_CONSUMER_KEY')
BITBUCKET_CONSUMER_SECRET = env('BITBUCKET_CONSUMER_SECRET')

18
sentry/Dockerfile Normal file
View File

@ -0,0 +1,18 @@
ARG SENTRY_IMAGE
FROM ${SENTRY_IMAGE:-getsentry/sentry:latest}
WORKDIR /usr/src/sentry
# Add WORKDIR to PYTHONPATH so local python files don't need to be installed
ENV PYTHONPATH /usr/src/sentry
COPY . /usr/src/sentry
# Hook for installing additional plugins
RUN if [ -s requirements.txt ]; then pip install -r requirements.txt; fi
# Hook for installing a local app as an addon
RUN if [ -s setup.py ]; then pip install -e .; fi
# Hook for staging in custom configs
RUN if [ -s sentry.conf.py ]; then cp sentry.conf.py $SENTRY_CONF/; fi \
&& if [ -s config.yml ]; then cp config.yml $SENTRY_CONF/; fi

View File

@ -8,7 +8,7 @@
###############
# mail.backend: 'smtp' # Use dummy if you want to disable email entirely
# mail.host: 'localhost'
mail.host: 'smtp'
# mail.port: 25
# mail.username: ''
# mail.password: ''
@ -17,13 +17,14 @@
# mail.from: 'root@localhost'
# If you'd like to configure email replies, enable this.
# mail.enable-replies: false
# mail.enable-replies: true
# When email-replies are enabled, this value is used in the Reply-To header
# mail.reply-hostname: ''
# If you're using mailgun for inbound mail, set your API key and configure a
# route to forward to /api/hooks/mailgun/inbound/
# Also don't forget to set `mail.enable-replies: true` above.
# mail.mailgun-api-key: ''
###################
@ -33,7 +34,7 @@
# If this file ever becomes compromised, it's important to regenerate your a new key
# Changing this value will result in all current sessions being invalidated.
# A new key can be generated with `$ sentry config generate-secret-key`
# system.secret-key: 'changeme'
system.secret-key: '!!changeme!!'
# The ``redis.clusters`` setting is used, unsurprisingly, to configure Redis
# clusters. These clusters can be then referred to by name when configuring
@ -52,12 +53,17 @@
# Uploaded media uses these `filestore` settings. The available
# backends are either `filesystem` or `s3`.
# filestore.backend: 'filesystem'
# filestore.options:
# location: '/tmp/sentry-files'
filestore.backend: 'filesystem'
filestore.options:
location: '/var/lib/sentry/files'
# filestore.backend: 's3'
# filestore.options:
# access_key: 'AKIXXXXXX'
# secret_key: 'XXXXXXX'
# bucket_name: 's3-bucket-name'
system.internal-url-prefix: 'http://web:9000'
symbolicator.enabled: true
symbolicator.options:
url: "http://symbolicator:3021"

View File

@ -0,0 +1,223 @@
# This file is just Python, with a touch of Django which means
# you can inherit and tweak settings to your hearts content.
from sentry.conf.server import * # NOQA
DATABASES = {
"default": {
"ENGINE": "sentry.db.postgres",
"NAME": "postgres",
"USER": "postgres",
"PASSWORD": "",
"HOST": "postgres",
"PORT": "",
}
}
# You should not change this setting after your database has been created
# unless you have altered all schemas first
SENTRY_USE_BIG_INTS = True
# If you're expecting any kind of real traffic on Sentry, we highly recommend
# configuring the CACHES and Redis settings
###########
# General #
###########
# Instruct Sentry that this install intends to be run by a single organization
# and thus various UI optimizations should be enabled.
SENTRY_SINGLE_ORGANIZATION = True
#########
# Redis #
#########
# Generic Redis configuration used as defaults for various things including:
# Buffers, Quotas, TSDB
SENTRY_OPTIONS["redis.clusters"] = {
"default": {
"hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}}
}
}
#########
# Queue #
#########
# See https://docs.getsentry.com/on-premise/server/queue/ for more
# information on configuring your queue broker and workers. Sentry relies
# on a Python framework called Celery to manage queues.
rabbitmq_host = None
if rabbitmq_host:
BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format(
username="guest", password="guest", host=rabbitmq_host, vhost="/"
)
else:
BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format(
**SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0]
)
#########
# Cache #
#########
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
# requirement, it will optimize several high throughput patterns.
CACHES = {
"default": {
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
"LOCATION": ["memcached:11211"],
"TIMEOUT": 3600,
}
}
# A primary cache is required for things such as processing events
SENTRY_CACHE = "sentry.cache.redis.RedisCache"
DEFAULT_KAFKA_OPTIONS = {
"bootstrap.servers": "kafka:9092",
"message.max.bytes": 50000000,
"socket.timeout.ms": 1000,
}
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS}
KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS
###############
# Rate Limits #
###############
# Rate limits apply to notification handlers and are enforced per-project
# automatically.
SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter"
##################
# Update Buffers #
##################
# Buffers (combined with queueing) act as an intermediate layer between the
# database and the storage API. They will greatly improve efficiency on large
# numbers of the same events being sent to the API in a short amount of time.
# (read: if you send any kind of real data to Sentry, you should enable buffers)
SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer"
##########
# Quotas #
##########
# Quotas allow you to rate limit individual projects or the Sentry install as
# a whole.
SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota"
########
# TSDB #
########
# The TSDB is used for building charts as well as making things like per-rate
# alerts possible.
SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
#########
# SNUBA #
#########
SENTRY_SEARCH = "sentry.search.snuba.SnubaSearchBackend"
SENTRY_SEARCH_OPTIONS = {}
SENTRY_TAGSTORE_OPTIONS = {}
###########
# Digests #
###########
# The digest backend powers notification summaries.
SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend"
##############
# Web Server #
##############
SENTRY_WEB_HOST = "0.0.0.0"
SENTRY_WEB_PORT = 9000
SENTRY_WEB_OPTIONS = {
"http": "%s:%s" % (SENTRY_WEB_HOST, SENTRY_WEB_PORT),
"protocol": "uwsgi",
# This is needed to prevent https://git.io/fj7Lw
"uwsgi-socket": None,
"http-keepalive": True,
"memory-report": False,
# 'workers': 3, # the number of web workers
}
###########
# SSL/TLS #
###########
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
# header and enable the settings below
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
# SESSION_COOKIE_SECURE = True
# CSRF_COOKIE_SECURE = True
# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
# End of SSL/TLS settings
############
# Features #
############
SENTRY_FEATURES["projects:sample-events"] = False
SENTRY_FEATURES.update(
{
feature: True
for feature in (
"organizations:discover",
"organizations:events",
"organizations:global-views",
"organizations:integrations-issue-basic",
"organizations:integrations-issue-sync",
"organizations:invite-members",
"organizations:new-issue-ui",
"organizations:repos",
"organizations:require-2fa",
"organizations:sentry10",
"organizations:sso-basic",
"organizations:sso-rippling",
"organizations:sso-saml2",
"organizations:suggested-commits",
"projects:custom-inbound-filters",
"projects:data-forwarding",
"projects:discard-groups",
"projects:plugins",
"projects:rate-limits",
"projects:servicehooks",
)
}
)
######################
# GitHub Integration #
#####################
# GITHUB_APP_ID = 'YOUR_GITHUB_APP_ID'
# GITHUB_API_SECRET = 'YOUR_GITHUB_API_SECRET'
# GITHUB_EXTENDED_PERMISSIONS = ['repo']
#########################
# Bitbucket Integration #
########################
# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY'
# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'

13
test.sh
View File

@ -1,28 +1,29 @@
#!/usr/bin/env bash
set -e
TEST_USER='test@sentry.io'
SENTRY_TEST_HOST="${SENTRY_TEST_HOST:-http://localhost:9000}"
TEST_USER='test@example.com'
TEST_PASS='test123TEST'
COOKIE_FILE=$(mktemp)
declare -a TEST_STRINGS=(
'"isAuthenticated":true'
'"username":"test@sentry.io"'
'"username":"test@example.com"'
'"isSuperuser":true'
)
INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null http://localhost:9000 -w %{url_effective})
if [ "$INITIAL_AUTH_REDIRECT" != "http://localhost:9000/auth/login/sentry/" ]; then
INITIAL_AUTH_REDIRECT=$(curl -sL -o /dev/null $SENTRY_TEST_HOST -w %{url_effective})
if [ "$INITIAL_AUTH_REDIRECT" != "$SENTRY_TEST_HOST/auth/login/sentry/" ]; then
echo "Initial /auth/login/ redirect failed, exiting..."
echo "$INITIAL_AUTH_REDIRECT"
exit -1
fi
CSRF_TOKEN=$(curl http://localhost:9000 -sL -c "$COOKIE_FILE" | awk -F "'" '
CSRF_TOKEN=$(curl $SENTRY_TEST_HOST -sL -c "$COOKIE_FILE" | awk -F "'" '
/csrfmiddlewaretoken/ {
print $4 "=" $6;
exit;
}')
LOGIN_RESPONSE=$(curl -sL -F 'op=login' -F "username=$TEST_USER" -F "password=$TEST_PASS" -F "$CSRF_TOKEN" http://localhost:9000/auth/login/ -H 'Referer: http://localhost/auth/login/' -b "$COOKIE_FILE" -c "$COOKIE_FILE")
LOGIN_RESPONSE=$(curl -sL -F 'op=login' -F "username=$TEST_USER" -F "password=$TEST_PASS" -F "$CSRF_TOKEN" "$SENTRY_TEST_HOST/auth/login/" -H "Referer: $SENTRY_TEST_HOST/auth/login/" -b "$COOKIE_FILE" -c "$COOKIE_FILE")
TEST_RESULT=0
for i in "${TEST_STRINGS[@]}"