diff --git a/nginx/nginx.conf b/nginx/nginx.conf index c3bb9e5..31b68e0 100644 --- a/nginx/nginx.conf +++ b/nginx/nginx.conf @@ -1,50 +1,73 @@ -user nginx; -worker_processes 1; +user nginx; +worker_processes 1; -error_log /var/log/nginx/error.log warn; -pid /var/run/nginx.pid; +error_log /var/log/nginx/error.log warn; +pid /var/run/nginx.pid; events { - worker_connections 1024; + worker_connections 1024; } http { - default_type application/octet-stream; + default_type application/octet-stream; - log_format main '$remote_addr - $remote_user [$time_local] "$request" ' - '$status $body_bytes_sent "$http_referer" ' - '"$http_user_agent" "$http_x_forwarded_for"'; + log_format main '$remote_addr - $remote_user [$time_local] "$request" ' + '$status $body_bytes_sent "$http_referer" ' + '"$http_user_agent" "$http_x_forwarded_for"'; - sendfile on; - keepalive_timeout 65; + sendfile on; + tcp_nopush on; + tcp_nodelay on; + reset_timedout_connection on; - upstream relay { - server relay:3000; - } + keepalive_timeout 75s; - upstream sentry { - server web:9000; - } + gzip off; + server_tokens off; - server { - listen 80; - # use the docker DNS server to resolve ips for relay and sentry containers - resolver 127.0.0.11 ipv6=off; - client_max_body_size 100M; + server_names_hash_bucket_size 64; + types_hash_max_size 2048; + types_hash_bucket_size 64; + client_max_body_size 5m; - proxy_redirect off; - proxy_set_header Host $host; + proxy_http_version 1.1; + proxy_redirect off; + proxy_buffering off; + proxy_next_upstream error timeout invalid_header http_502 http_503 non_idempotent; + proxy_next_upstream_tries 2; - location /api/store/ { - proxy_pass http://relay; - } - location ~ ^/api/[1-9]\d*/ { - proxy_pass http://relay; - } - location / { - proxy_pass http://sentry; - } - } + # Remove the Connection header if the client sends it, + # it could be "close" to close a keepalive connection + proxy_set_header Connection ''; + proxy_set_header Host $host; + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $remote_addr; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Request-Id $request_id; + proxy_read_timeout 30s; + proxy_send_timeout 5s; + + upstream relay { + server relay:3000; + } + + upstream sentry { + server web:9000; + } + + server { + listen 80; + + location /api/store/ { + proxy_pass http://relay; + } + location ~ ^/api/[1-9]\d*/ { + proxy_pass http://relay; + } + location / { + proxy_pass http://sentry; + } + } } diff --git a/sentry/sentry.conf.example.py b/sentry/sentry.conf.example.py index 09eb81f..32682aa 100644 --- a/sentry/sentry.conf.example.py +++ b/sentry/sentry.conf.example.py @@ -155,8 +155,30 @@ SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend" SENTRY_WEB_HOST = "0.0.0.0" SENTRY_WEB_PORT = 9000 SENTRY_WEB_OPTIONS = { + # These ase for proper HTTP/1.1 support from uWSGI + # Without these it doesn't do keep-alives causing + # issues with Relay's direct requests. + "http-keepalive": True, + "http-chunked-input": True, + # the number of web workers + 'workers': 3, + # Turn off memory reporting "memory-report": False, - # 'workers': 3, # the number of web workers + # Some stuff so uwsgi will cycle workers sensibly + 'max-requests': 100000, + 'max-requests-delta': 500, + 'max-worker-lifetime': 86400, + # Duplicate options from sentry default just so we don't get + # bit by sentry changing a default value that we depend on. + 'thunder-lock': True, + 'log-x-forwarded-for': False, + 'buffer-size': 32768, + 'limit-post': 209715200, + 'disable-logging': True, + 'reload-on-rss': 600, + 'ignore-sigpipe': True, + 'ignore-write-errors': True, + 'disable-write-exception': True, } ###########