version: '3.4' x-restart-policy: &restart_policy restart: unless-stopped x-sentry-defaults: &sentry_defaults << : *restart_policy build: context: ./sentry args: - SENTRY_IMAGE image: sentry-onpremise-local depends_on: - redis - postgres - memcached - smtp - snuba-api - snuba-consumer - snuba-outcomes-consumer - snuba-sessions-consumer - snuba-transactions-consumer - snuba-replacer - symbolicator - kafka environment: SENTRY_CONF: '/etc/sentry' SNUBA: 'http://snuba-api:1218' volumes: - './data/sentry-data:/data' - './sentry:/etc/sentry' x-snuba-defaults: &snuba_defaults << : *restart_policy depends_on: - redis - clickhouse - kafka image: '$SNUBA_IMAGE' environment: SNUBA_SETTINGS: docker CLICKHOUSE_HOST: clickhouse DEFAULT_BROKERS: 'kafka:9092' REDIS_HOST: redis UWSGI_MAX_REQUESTS: '10000' UWSGI_DISABLE_LOGGING: 'true' services: smtp: << : *restart_policy image: fgribreau/smtp-to-sendgrid-gateway environment: - SENDGRID_API=REPLACEME memcached: << : *restart_policy image: 'memcached:1' redis: << : *restart_policy image: 'redis:6.0' volumes: - './data/redis:/data' postgres: << : *restart_policy image: 'postgres:12' environment: POSTGRES_HOST_AUTH_METHOD: 'trust' volumes: - './data/sentry-postgres:/var/lib/postgresql/data' zookeeper: << : *restart_policy image: 'confluentinc/cp-zookeeper:5.5.0' environment: ZOOKEEPER_CLIENT_PORT: '2181' CONFLUENT_SUPPORT_METRICS_ENABLE: 'false' ZOOKEEPER_LOG4J_ROOT_LOGLEVEL: 'WARN' ZOOKEEPER_TOOLS_LOG4J_LOGLEVEL: 'WARN' volumes: - './data/zookeeper:/var/lib/zookeeper/data' - './data/sentry-secrets:/etc/zookeeper/secrets' kafka: << : *restart_policy depends_on: - zookeeper image: 'confluentinc/cp-kafka:5.5.0' environment: KAFKA_ZOOKEEPER_CONNECT: 'zookeeper:2181' KAFKA_ADVERTISED_LISTENERS: 'PLAINTEXT://kafka:9092' KAFKA_OFFSETS_TOPIC_REPLICATION_FACTOR: '1' KAFKA_MESSAGE_MAX_BYTES: '50000000' #50MB or bust KAFKA_MAX_REQUEST_SIZE: '50000000' #50MB on requests apparently too CONFLUENT_SUPPORT_METRICS_ENABLE: 'false' KAFKA_LOG4J_LOGGERS: 'kafka.cluster=WARN,kafka.controller=WARN,kafka.coordinator=WARN,kafka.log=WARN,kafka.server=WARN,kafka.zookeeper=WARN,state.change.logger=WARN' KAFKA_LOG4J_ROOT_LOGLEVEL: 'WARN' KAFKA_TOOLS_LOG4J_LOGLEVEL: 'WARN' volumes: - './data/sentry-kafka:/var/lib/kafka/data' - './data/sentry-secrets:/etc/kafka/secrets' clickhouse: << : *restart_policy image: 'yandex/clickhouse-server:19.17' ulimits: nofile: soft: 262144 hard: 262144 volumes: - './data/clickhouse:/var/lib/clickhouse' snuba-api: << : *snuba_defaults # Kafka consumer responsible for feeding events into Clickhouse snuba-consumer: << : *snuba_defaults command: consumer --storage events --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding outcomes into Clickhouse # Use --auto-offset-reset=earliest to recover up to 7 days of TSDB data # since we did not do a proper migration snuba-outcomes-consumer: << : *snuba_defaults command: consumer --storage outcomes_raw --auto-offset-reset=earliest --max-batch-time-ms 750 # Kafka consumer responsible for feeding session data into Clickhouse snuba-sessions-consumer: << : *snuba_defaults command: consumer --storage sessions_raw --auto-offset-reset=latest --max-batch-time-ms 750 # Kafka consumer responsible for feeding transactions data into Clickhouse snuba-transactions-consumer: << : *snuba_defaults command: consumer --storage transactions --consumer-group transactions_group --auto-offset-reset=latest --max-batch-time-ms 750 snuba-replacer: << : *snuba_defaults command: replacer --storage events --auto-offset-reset=latest --max-batch-size 3 snuba-cleanup: << : *snuba_defaults image: snuba-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: '$SNUBA_IMAGE' command: '"*/5 * * * * gosu snuba snuba cleanup --dry-run False"' symbolicator: << : *restart_policy image: '$SYMBOLICATOR_IMAGE' volumes: - './data/symbolicator:/data' - type: bind read_only: true source: ./symbolicator target: /etc/symbolicator command: run -c /etc/symbolicator/config.yml symbolicator-cleanup: << : *restart_policy image: symbolicator-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: '$SYMBOLICATOR_IMAGE' command: '"55 23 * * * gosu symbolicator symbolicator cleanup"' volumes: - './data/symbolicator:/data' web: << : *sentry_defaults cron: << : *sentry_defaults command: run cron worker: << : *sentry_defaults command: run worker ingest-consumer: << : *sentry_defaults command: run ingest-consumer --all-consumer-types post-process-forwarder: << : *sentry_defaults # Increase `--commit-batch-size 1` below to deal with high-load environments. command: run post-process-forwarder --commit-batch-size 1 sentry-cleanup: << : *sentry_defaults image: sentry-cleanup-onpremise-local build: context: ./cron args: BASE_IMAGE: 'sentry-onpremise-local' command: '"0 0 * * * gosu sentry sentry cleanup --days $SENTRY_EVENT_RETENTION_DAYS"' nginx: << : *restart_policy ports: - '127.0.0.1:9000:80/tcp' image: 'nginx:1' volumes: - type: bind read_only: true source: ./nginx target: /etc/nginx depends_on: - web - relay relay: << : *restart_policy image: '$RELAY_IMAGE' volumes: - type: bind read_only: true source: ./relay target: /work/.relay depends_on: - kafka - redis