e82506f5b7
Without the consumer-group option, transactions consumer and events consumer will compete for messages on the same topic and usually events win, which is the first one. This may cause some data loss for performance and make it seem not work. Should address https://forum.sentry.io/t/perfomance-tracing-for-sentry-itself/10405/5?u=byk
227 lines
6.6 KiB
YAML
227 lines
6.6 KiB
YAML
version: '3.4'
|
|
x-restart-policy: &restart_policy
|
|
restart: unless-stopped
|
|
x-sentry-defaults: &sentry_defaults
|
|
<< : *restart_policy
|
|
build:
|
|
context: ./sentry
|
|
args:
|
|
- SENTRY_IMAGE
|
|
- SENTRY_VERSION
|
|
image: sentry-onpremise-local
|
|
depends_on:
|
|
- redis
|
|
- postgres
|
|
- memcached
|
|
- smtp
|
|
- snuba-api
|
|
- snuba-consumer
|
|
- snuba-outcomes-consumer
|
|
- snuba-sessions-consumer
|
|
- snuba-transactions-consumer
|
|
- snuba-replacer
|
|
- symbolicator
|
|
- kafka
|
|
environment:
|
|
SENTRY_CONF: '/etc/sentry'
|
|
SNUBA: 'http://snuba-api:1218'
|
|
volumes:
|
|
- 'sentry-data:/data'
|
|
- './sentry:/etc/sentry'
|
|
x-snuba-defaults: &snuba_defaults
|
|
<< : *restart_policy
|
|
depends_on:
|
|
- redis
|
|
- clickhouse
|
|
- kafka
|
|
image: 'getsentry/snuba:$SENTRY_VERSION'
|
|
environment:
|
|
SNUBA_SETTINGS: docker
|
|
CLICKHOUSE_HOST: clickhouse
|
|
DEFAULT_BROKERS: 'kafka:9092'
|
|
REDIS_HOST: redis
|
|
UWSGI_MAX_REQUESTS: '10000'
|
|
UWSGI_DISABLE_LOGGING: 'true'
|
|
services:
|
|
smtp:
|
|
<< : *restart_policy
|
|
image: tianon/exim4
|
|
volumes:
|
|
- 'sentry-smtp:/var/spool/exim4'
|
|
- 'sentry-smtp-log:/var/log/exim4'
|
|
memcached:
|
|
<< : *restart_policy
|
|
image: 'memcached:1.5-alpine'
|
|
redis:
|
|
<< : *restart_policy
|
|
image: 'redis:5.0-alpine'
|
|
volumes:
|
|
- 'sentry-redis:/data'
|
|
postgres:
|
|
<< : *restart_policy
|
|
image: 'postgres:9.6'
|
|
environment:
|
|
POSTGRES_HOST_AUTH_METHOD: 'trust'
|
|
volumes:
|
|
- 'sentry-postgres:/var/lib/postgresql/data'
|
|
zookeeper:
|
|
<< : *restart_policy
|
|
image: 'confluentinc/cp-zookeeper:5.5.0'
|
|
environment:
|
|
ZOOKEEPER_CLIENT_PORT: '2181'
|
|
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
|
|
ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'WARN'
|
|
ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'WARN'
|
|
volumes:
|
|
- 'sentry-zookeeper:/var/lib/zookeeper/data'
|
|
- 'sentry-zookeeper-log:/var/lib/zookeeper/log'
|
|
- 'sentry-secrets:/etc/zookeeper/secrets'
|
|
kafka:
|
|
<< : *restart_policy
|
|
depends_on:
|
|
- zookeeper
|
|
image: 'confluentinc/cp-kafka:5.5.0'
|
|
environment:
|
|
KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181'
|
|
KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092'
|
|
KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1'
|
|
KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust
|
|
KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too
|
|
CONFLUENT_SUPPORT_METRICS_ENABLE: 'false'
|
|
KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN'
|
|
KAFKA_LOG4J_ROOT_LOGLEVEL: 'WARN'
|
|
KAFKA_TOOLS_LOG4J_LOGLEVEL: 'WARN'
|
|
volumes:
|
|
- 'sentry-kafka:/var/lib/kafka/data'
|
|
- 'sentry-kafka-log:/var/lib/kafka/log'
|
|
- 'sentry-secrets:/etc/kafka/secrets'
|
|
clickhouse:
|
|
<< : *restart_policy
|
|
image: 'yandex/clickhouse-server:19.17'
|
|
ulimits:
|
|
nofile:
|
|
soft: 262144
|
|
hard: 262144
|
|
volumes:
|
|
- 'sentry-clickhouse:/var/lib/clickhouse'
|
|
- 'sentry-clickhouse-log:/var/log/clickhouse-server'
|
|
snuba-api:
|
|
<< : *snuba_defaults
|
|
# Kafka consumer responsible for feeding events into Clickhouse
|
|
snuba-consumer:
|
|
<< : *snuba_defaults
|
|
command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750
|
|
# Kafka consumer responsible for feeding outcomes into Clickhouse
|
|
# Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data
|
|
# since we did not do a proper migration
|
|
snuba-outcomes-consumer:
|
|
<< : *snuba_defaults
|
|
command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750
|
|
# Kafka consumer responsible for feeding session data into Clickhouse
|
|
snuba-sessions-consumer:
|
|
<< : *snuba_defaults
|
|
command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750
|
|
# Kafka consumer responsible for feeding transactions data into Clickhouse
|
|
snuba-transactions-consumer:
|
|
<< : *snuba_defaults
|
|
command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750
|
|
snuba-replacer:
|
|
<< : *snuba_defaults
|
|
command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3
|
|
snuba-cleanup:
|
|
<< : *snuba_defaults
|
|
image: snuba-cleanup-onpremise-local
|
|
build:
|
|
context: ./cron
|
|
args:
|
|
BASE_IMAGE: 'getsentry/snuba:$SENTRY_VERSION'
|
|
command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"'
|
|
symbolicator:
|
|
<< : *restart_policy
|
|
image: 'getsentry/symbolicator:$SYMBOLICATOR_VERSION'
|
|
volumes:
|
|
- 'sentry-symbolicator:/data'
|
|
- type: bind
|
|
read_only: true
|
|
source: ./symbolicator
|
|
target: /etc/symbolicator
|
|
command: run -c /etc/symbolicator/config.yml
|
|
symbolicator-cleanup:
|
|
<< : *restart_policy
|
|
image: symbolicator-cleanup-onpremise-local
|
|
build:
|
|
context: ./cron
|
|
args:
|
|
BASE_IMAGE: 'getsentry/symbolicator:$SYMBOLICATOR_VERSION'
|
|
command: '"55 23 * * * gosu symbolicator symbolicator cleanup"'
|
|
volumes:
|
|
- 'sentry-symbolicator:/data'
|
|
web:
|
|
<< : *sentry_defaults
|
|
cron:
|
|
<< : *sentry_defaults
|
|
command: run cron
|
|
worker:
|
|
<< : *sentry_defaults
|
|
command: run worker
|
|
ingest-consumer:
|
|
<< : *sentry_defaults
|
|
command: run ingest-consumer --all-consumer-types
|
|
post-process-forwarder:
|
|
<< : *sentry_defaults
|
|
# Increase `--commit-batch-size 1` below to deal with high-load environments.
|
|
command: run post-process-forwarder --commit-batch-size 1
|
|
sentry-cleanup:
|
|
<< : *sentry_defaults
|
|
image: sentry-cleanup-onpremise-local
|
|
build:
|
|
context: ./cron
|
|
args:
|
|
BASE_IMAGE: 'sentry-onpremise-local'
|
|
command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"'
|
|
nginx:
|
|
<< : *restart_policy
|
|
ports:
|
|
- '9000:80/tcp'
|
|
image: 'nginx:1.16'
|
|
volumes:
|
|
- type: bind
|
|
read_only: true
|
|
source: ./nginx
|
|
target: /etc/nginx
|
|
depends_on:
|
|
- web
|
|
- relay
|
|
relay:
|
|
<< : *restart_policy
|
|
image: 'getsentry/relay:$SENTRY_VERSION'
|
|
volumes:
|
|
- type: bind
|
|
read_only: true
|
|
source: ./relay
|
|
target: /work/.relay
|
|
depends_on:
|
|
- kafka
|
|
- redis
|
|
volumes:
|
|
sentry-data:
|
|
external: true
|
|
sentry-postgres:
|
|
external: true
|
|
sentry-redis:
|
|
external: true
|
|
sentry-zookeeper:
|
|
external: true
|
|
sentry-kafka:
|
|
external: true
|
|
sentry-clickhouse:
|
|
external: true
|
|
sentry-symbolicator:
|
|
external: true
|
|
sentry-secrets:
|
|
sentry-smtp:
|
|
sentry-zookeeper-log:
|
|
sentry-kafka-log:
|
|
sentry-smtp-log:
|
|
sentry-clickhouse-log:
|