2
0
This repository has been archived on 2020-07-19. You can view files and clone it, but cannot push or open issues or pull requests.
sentry-onpremise/docker-compose.yml

201 lines
5.9 KiB
YAML
Raw Normal View History

version: '3.4'
x-restart-policy: &restart_policy
restart: unless-stopped
x-sentry-defaults: &sentry_defaults
<< : *restart_policy
build:
context: ./sentry
args:
- SENTRY_IMAGE
image: sentry-onpremise-local
depends_on:
- redis
- postgres
- memcached
- smtp
- snuba-api
- snuba-consumer
- snuba-outcomes-consumer
- snuba-sessions-consumer
- snuba-transactions-consumer
- snuba-replacer
- symbolicator
- kafka
environment:
SENTRY_CONF: '/etc/sentry'
SNUBA: 'http://snuba-api:1218'
volumes:
2020-07-19 01:49:00 +02:00
- './data/sentry-data:/data'
- './sentry:/etc/sentry'
x-snuba-defaults: &snuba_defaults
<< : *restart_policy
depends_on:
- redis
- clickhouse
- kafka
image: '$SNUBA_IMAGE'
environment:
SNUBA_SETTINGS: docker
CLICKHOUSE_HOST: clickhouse
DEFAULT_BROKERS: 'kafka:9092'
REDIS_HOST: redis
UWSGI_MAX_REQUESTS: '10000'
UWSGI_DISABLE_LOGGING: 'true'
services:
2016-06-24 21:18:58 +02:00
smtp:
<< : *restart_policy
2020-07-19 01:49:00 +02:00
image: fgribreau/smtp-to-sendgrid-gateway
environment:
2020-07-19 01:55:45 +02:00
- SENDGRID_API=REPLACEME
2016-05-28 00:52:58 +02:00
memcached:
<< : *restart_policy
2020-07-19 01:49:00 +02:00
image: 'memcached:1.6-alpine'
2016-05-28 00:52:58 +02:00
redis:
<< : *restart_policy
2020-07-19 01:49:00 +02:00
image: 'redis:6.0-alpine'
volumes:
2020-07-19 01:49:00 +02:00
- './data/redis:/data'
2016-05-28 00:52:58 +02:00
postgres:
<< : *restart_policy
2020-07-19 01:49:00 +02:00
image: 'postgres:12'
environment:
POSTGRES_HOST_AUTH_METHOD: 'trust'
volumes:
2020-07-19 01:49:00 +02:00
- './data/sentry-postgres:/var/lib/postgresql/data'
zookeeper:
<< : *restart_policy
image: 'confluentinc/cp-zookeeper:5.5.0'
environment:
ZOOKEEPER_CLIENT_PORT: '2181'
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'WARN'
ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'WARN'
volumes:
2020-07-19 01:49:00 +02:00
- './data/zookeeper:/var/lib/zookeeper/data'
- './data/sentry-secrets:/etc/zookeeper/secrets'
kafka:
<< : *restart_policy
depends_on:
- zookeeper
image: 'confluentinc/cp-kafka:5.5.0'
environment:
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092'
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1'
KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust
KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN'
KAFKA_LOG4J_ROOT_LOGLEVEL: 'WARN'
KAFKA_TOOLS_LOG4J_LOGLEVEL: 'WARN'
volumes:
2020-07-19 01:49:00 +02:00
- './data/sentry-kafka:/var/lib/kafka/data'
- './data/sentry-secrets:/etc/kafka/secrets'
clickhouse:
<< : *restart_policy
image: 'yandex/clickhouse-server:19.17'
ulimits:
nofile:
soft: 262144
hard: 262144
volumes:
2020-07-19 01:49:00 +02:00
- './data/clickhouse:/var/lib/clickhouse'
snuba-api:
<< : *snuba_defaults
# Kafka consumer responsible for feeding events into Clickhouse
snuba-consumer:
<< : *snuba_defaults
command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750
# Kafka consumer responsible for feeding outcomes into Clickhouse
# Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data
# since we did not do a proper migration
snuba-outcomes-consumer:
<< : *snuba_defaults
command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750
# Kafka consumer responsible for feeding session data into Clickhouse
snuba-sessions-consumer:
<< : *snuba_defaults
command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750
# Kafka consumer responsible for feeding transactions data into Clickhouse
snuba-transactions-consumer:
<< : *snuba_defaults
command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750
snuba-replacer:
<< : *snuba_defaults
command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3
snuba-cleanup:
<< : *snuba_defaults
image: snuba-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: '$SNUBA_IMAGE'
command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"'
symbolicator:
<< : *restart_policy
image: '$SYMBOLICATOR_IMAGE'
2016-08-03 18:21:59 +02:00
volumes:
2020-07-19 01:49:00 +02:00
- './data/symbolicator:/data'
- type: bind
read_only: true
source: ./symbolicator
target: /etc/symbolicator
command: run -c /etc/symbolicator/config.yml
symbolicator-cleanup:
<< : *restart_policy
image: symbolicator-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: '$SYMBOLICATOR_IMAGE'
command: '"55 23 * * * gosu symbolicator symbolicator cleanup"'
volumes:
2020-07-19 01:49:00 +02:00
- './data/symbolicator:/data'
2016-05-28 00:52:58 +02:00
web:
<< : *sentry_defaults
2016-05-28 00:52:58 +02:00
cron:
<< : *sentry_defaults
2016-06-24 21:18:58 +02:00
command: run cron
2016-05-28 00:52:58 +02:00
worker:
<< : *sentry_defaults
2016-06-24 21:18:58 +02:00
command: run worker
ingest-consumer:
<< : *sentry_defaults
command: run ingest-consumer --all-consumer-types
post-process-forwarder:
<< : *sentry_defaults
# Increase `--commit-batch-size 1` below to deal with high-load environments.
command: run post-process-forwarder --commit-batch-size 1
sentry-cleanup:
<< : *sentry_defaults
image: sentry-cleanup-onpremise-local
build:
context: ./cron
args:
BASE_IMAGE: 'sentry-onpremise-local'
command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"'
nginx:
<< : *restart_policy
ports:
2020-07-19 01:55:45 +02:00
- '127.0.0.1:9000:80/tcp'
2020-07-19 01:49:00 +02:00
image: 'nginx:1'
volumes:
- type: bind
read_only: true
source: ./nginx
target: /etc/nginx
depends_on:
- web
- relay
relay:
<< : *restart_policy
image: '$RELAY_IMAGE'
volumes:
- type: bind
read_only: true
source: ./relay
target: /work/.relay
depends_on:
- kafka
- redis