2019-11-12 00:18:59 +01:00
|
|
|
# This file is just Python, with a touch of Django which means
|
|
|
|
# you can inherit and tweak settings to your hearts content.
|
|
|
|
|
|
|
|
from sentry.conf.server import * # NOQA
|
|
|
|
|
2020-07-10 22:53:50 +02:00
|
|
|
|
|
|
|
# Generously adapted from pynetlinux: https://git.io/JJmga
|
|
|
|
def get_internal_network():
|
|
|
|
import ctypes
|
|
|
|
import fcntl
|
|
|
|
import math
|
|
|
|
import socket
|
|
|
|
import struct
|
|
|
|
|
|
|
|
iface = 'eth0'
|
|
|
|
sockfd = socket.socket(socket.AF_INET, socket.SOCK_DGRAM)
|
|
|
|
ifreq = struct.pack('16sH14s', iface, socket.AF_INET, b'\x00' * 14)
|
|
|
|
|
|
|
|
try:
|
|
|
|
ip = struct.unpack(
|
|
|
|
"!I", struct.unpack('16sH2x4s8x', fcntl.ioctl(sockfd, 0x8915, ifreq))[2]
|
|
|
|
)[0]
|
|
|
|
netmask = socket.ntohl(
|
|
|
|
struct.unpack('16sH2xI8x', fcntl.ioctl(sockfd, 0x891B, ifreq))[2]
|
|
|
|
)
|
|
|
|
except IOError:
|
|
|
|
return ()
|
|
|
|
base = socket.inet_ntoa(struct.pack("!I", ip & netmask))
|
|
|
|
netmask_bits = 32 - int(round(math.log(ctypes.c_uint32(~netmask).value + 1, 2), 1))
|
|
|
|
return ('{0:s}/{1:d}'.format(base, netmask_bits),)
|
|
|
|
|
|
|
|
|
|
|
|
INTERNAL_IPS = get_internal_network()
|
|
|
|
INTERNAL_SYSTEM_IPS = INTERNAL_IPS
|
|
|
|
|
2019-11-12 00:18:59 +01:00
|
|
|
DATABASES = {
|
|
|
|
"default": {
|
|
|
|
"ENGINE": "sentry.db.postgres",
|
|
|
|
"NAME": "postgres",
|
|
|
|
"USER": "postgres",
|
|
|
|
"PASSWORD": "",
|
|
|
|
"HOST": "postgres",
|
|
|
|
"PORT": "",
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
# You should not change this setting after your database has been created
|
|
|
|
# unless you have altered all schemas first
|
|
|
|
SENTRY_USE_BIG_INTS = True
|
|
|
|
|
|
|
|
# If you're expecting any kind of real traffic on Sentry, we highly recommend
|
|
|
|
# configuring the CACHES and Redis settings
|
|
|
|
|
|
|
|
###########
|
|
|
|
# General #
|
|
|
|
###########
|
|
|
|
|
|
|
|
# Instruct Sentry that this install intends to be run by a single organization
|
|
|
|
# and thus various UI optimizations should be enabled.
|
|
|
|
SENTRY_SINGLE_ORGANIZATION = True
|
|
|
|
|
2020-05-04 12:36:05 +02:00
|
|
|
SENTRY_OPTIONS["system.event-retention-days"] = int(
|
|
|
|
env('SENTRY_EVENT_RETENTION_DAYS', '90')
|
|
|
|
)
|
2019-12-30 21:07:17 +01:00
|
|
|
|
2019-11-12 00:18:59 +01:00
|
|
|
#########
|
|
|
|
# Redis #
|
|
|
|
#########
|
|
|
|
|
|
|
|
# Generic Redis configuration used as defaults for various things including:
|
|
|
|
# Buffers, Quotas, TSDB
|
|
|
|
|
|
|
|
SENTRY_OPTIONS["redis.clusters"] = {
|
|
|
|
"default": {
|
|
|
|
"hosts": {0: {"host": "redis", "password": "", "port": "6379", "db": "0"}}
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
#########
|
|
|
|
# Queue #
|
|
|
|
#########
|
|
|
|
|
|
|
|
# See https://docs.getsentry.com/on-premise/server/queue/ for more
|
|
|
|
# information on configuring your queue broker and workers. Sentry relies
|
|
|
|
# on a Python framework called Celery to manage queues.
|
|
|
|
|
|
|
|
rabbitmq_host = None
|
|
|
|
if rabbitmq_host:
|
|
|
|
BROKER_URL = "amqp://{username}:{password}@{host}/{vhost}".format(
|
|
|
|
username="guest", password="guest", host=rabbitmq_host, vhost="/"
|
|
|
|
)
|
|
|
|
else:
|
|
|
|
BROKER_URL = "redis://:{password}@{host}:{port}/{db}".format(
|
|
|
|
**SENTRY_OPTIONS["redis.clusters"]["default"]["hosts"][0]
|
|
|
|
)
|
|
|
|
|
|
|
|
|
|
|
|
#########
|
|
|
|
# Cache #
|
|
|
|
#########
|
|
|
|
|
|
|
|
# Sentry currently utilizes two separate mechanisms. While CACHES is not a
|
|
|
|
# requirement, it will optimize several high throughput patterns.
|
|
|
|
|
|
|
|
CACHES = {
|
|
|
|
"default": {
|
|
|
|
"BACKEND": "django.core.cache.backends.memcached.MemcachedCache",
|
|
|
|
"LOCATION": ["memcached:11211"],
|
|
|
|
"TIMEOUT": 3600,
|
|
|
|
}
|
|
|
|
}
|
|
|
|
|
|
|
|
# A primary cache is required for things such as processing events
|
|
|
|
SENTRY_CACHE = "sentry.cache.redis.RedisCache"
|
|
|
|
|
|
|
|
DEFAULT_KAFKA_OPTIONS = {
|
|
|
|
"bootstrap.servers": "kafka:9092",
|
|
|
|
"message.max.bytes": 50000000,
|
|
|
|
"socket.timeout.ms": 1000,
|
|
|
|
}
|
|
|
|
|
|
|
|
SENTRY_EVENTSTREAM = "sentry.eventstream.kafka.KafkaEventStream"
|
|
|
|
SENTRY_EVENTSTREAM_OPTIONS = {"producer_configuration": DEFAULT_KAFKA_OPTIONS}
|
|
|
|
|
|
|
|
KAFKA_CLUSTERS["default"] = DEFAULT_KAFKA_OPTIONS
|
|
|
|
|
|
|
|
###############
|
|
|
|
# Rate Limits #
|
|
|
|
###############
|
|
|
|
|
|
|
|
# Rate limits apply to notification handlers and are enforced per-project
|
|
|
|
# automatically.
|
|
|
|
|
|
|
|
SENTRY_RATELIMITER = "sentry.ratelimits.redis.RedisRateLimiter"
|
|
|
|
|
|
|
|
##################
|
|
|
|
# Update Buffers #
|
|
|
|
##################
|
|
|
|
|
|
|
|
# Buffers (combined with queueing) act as an intermediate layer between the
|
|
|
|
# database and the storage API. They will greatly improve efficiency on large
|
|
|
|
# numbers of the same events being sent to the API in a short amount of time.
|
|
|
|
# (read: if you send any kind of real data to Sentry, you should enable buffers)
|
|
|
|
|
|
|
|
SENTRY_BUFFER = "sentry.buffer.redis.RedisBuffer"
|
|
|
|
|
|
|
|
##########
|
|
|
|
# Quotas #
|
|
|
|
##########
|
|
|
|
|
|
|
|
# Quotas allow you to rate limit individual projects or the Sentry install as
|
|
|
|
# a whole.
|
|
|
|
|
|
|
|
SENTRY_QUOTAS = "sentry.quotas.redis.RedisQuota"
|
|
|
|
|
|
|
|
########
|
|
|
|
# TSDB #
|
|
|
|
########
|
|
|
|
|
|
|
|
# The TSDB is used for building charts as well as making things like per-rate
|
|
|
|
# alerts possible.
|
|
|
|
|
|
|
|
SENTRY_TSDB = "sentry.tsdb.redissnuba.RedisSnubaTSDB"
|
|
|
|
|
|
|
|
#########
|
|
|
|
# SNUBA #
|
|
|
|
#########
|
|
|
|
|
2020-01-07 17:51:30 +01:00
|
|
|
SENTRY_SEARCH = "sentry.search.snuba.EventsDatasetSnubaSearchBackend"
|
2019-11-12 00:18:59 +01:00
|
|
|
SENTRY_SEARCH_OPTIONS = {}
|
|
|
|
SENTRY_TAGSTORE_OPTIONS = {}
|
|
|
|
|
|
|
|
###########
|
|
|
|
# Digests #
|
|
|
|
###########
|
|
|
|
|
|
|
|
# The digest backend powers notification summaries.
|
|
|
|
|
|
|
|
SENTRY_DIGESTS = "sentry.digests.backends.redis.RedisBackend"
|
|
|
|
|
|
|
|
##############
|
|
|
|
# Web Server #
|
|
|
|
##############
|
|
|
|
|
|
|
|
SENTRY_WEB_HOST = "0.0.0.0"
|
|
|
|
SENTRY_WEB_PORT = 9000
|
|
|
|
SENTRY_WEB_OPTIONS = {
|
2020-05-22 15:12:20 +02:00
|
|
|
# These ase for proper HTTP/1.1 support from uWSGI
|
|
|
|
# Without these it doesn't do keep-alives causing
|
|
|
|
# issues with Relay's direct requests.
|
|
|
|
"http-keepalive": True,
|
|
|
|
"http-chunked-input": True,
|
|
|
|
# the number of web workers
|
|
|
|
'workers': 3,
|
|
|
|
# Turn off memory reporting
|
2019-11-12 00:18:59 +01:00
|
|
|
"memory-report": False,
|
2020-05-22 15:12:20 +02:00
|
|
|
# Some stuff so uwsgi will cycle workers sensibly
|
|
|
|
'max-requests': 100000,
|
|
|
|
'max-requests-delta': 500,
|
|
|
|
'max-worker-lifetime': 86400,
|
|
|
|
# Duplicate options from sentry default just so we don't get
|
|
|
|
# bit by sentry changing a default value that we depend on.
|
|
|
|
'thunder-lock': True,
|
|
|
|
'log-x-forwarded-for': False,
|
|
|
|
'buffer-size': 32768,
|
|
|
|
'limit-post': 209715200,
|
|
|
|
'disable-logging': True,
|
|
|
|
'reload-on-rss': 600,
|
|
|
|
'ignore-sigpipe': True,
|
|
|
|
'ignore-write-errors': True,
|
|
|
|
'disable-write-exception': True,
|
2019-11-12 00:18:59 +01:00
|
|
|
}
|
|
|
|
|
|
|
|
###########
|
|
|
|
# SSL/TLS #
|
|
|
|
###########
|
|
|
|
|
|
|
|
# If you're using a reverse SSL proxy, you should enable the X-Forwarded-Proto
|
|
|
|
# header and enable the settings below
|
|
|
|
|
|
|
|
# SECURE_PROXY_SSL_HEADER = ('HTTP_X_FORWARDED_PROTO', 'https')
|
|
|
|
# SESSION_COOKIE_SECURE = True
|
|
|
|
# CSRF_COOKIE_SECURE = True
|
|
|
|
# SOCIAL_AUTH_REDIRECT_IS_HTTPS = True
|
|
|
|
|
|
|
|
# End of SSL/TLS settings
|
|
|
|
|
|
|
|
############
|
|
|
|
# Features #
|
|
|
|
############
|
|
|
|
|
|
|
|
SENTRY_FEATURES["projects:sample-events"] = False
|
|
|
|
SENTRY_FEATURES.update(
|
|
|
|
{
|
|
|
|
feature: True
|
|
|
|
for feature in (
|
|
|
|
"organizations:discover",
|
|
|
|
"organizations:events",
|
|
|
|
"organizations:global-views",
|
|
|
|
"organizations:integrations-issue-basic",
|
|
|
|
"organizations:integrations-issue-sync",
|
|
|
|
"organizations:invite-members",
|
|
|
|
"organizations:sso-basic",
|
|
|
|
"organizations:sso-rippling",
|
|
|
|
"organizations:sso-saml2",
|
|
|
|
"projects:custom-inbound-filters",
|
|
|
|
"projects:data-forwarding",
|
|
|
|
"projects:discard-groups",
|
|
|
|
"projects:plugins",
|
|
|
|
"projects:rate-limits",
|
|
|
|
"projects:servicehooks",
|
|
|
|
)
|
|
|
|
}
|
|
|
|
)
|
|
|
|
|
|
|
|
######################
|
|
|
|
# GitHub Integration #
|
2020-01-24 19:08:41 +01:00
|
|
|
######################
|
2019-11-12 00:18:59 +01:00
|
|
|
|
2020-01-24 19:08:41 +01:00
|
|
|
GITHUB_EXTENDED_PERMISSIONS = ['repo']
|
2019-11-12 00:18:59 +01:00
|
|
|
|
|
|
|
#########################
|
|
|
|
# Bitbucket Integration #
|
|
|
|
########################
|
|
|
|
|
|
|
|
# BITBUCKET_CONSUMER_KEY = 'YOUR_BITBUCKET_CONSUMER_KEY'
|
|
|
|
# BITBUCKET_CONSUMER_SECRET = 'YOUR_BITBUCKET_CONSUMER_SECRET'
|