Skip to content

Commit 76a89cf

Browse files
TartanLeGrandugo
authored and
ugo
committed
refactor: move to bitnami chart
1 parent f741466 commit 76a89cf

File tree

7 files changed

+66
-105
lines changed

7 files changed

+66
-105
lines changed

.gitignore

+2-1
Original file line numberDiff line numberDiff line change
@@ -1,3 +1,4 @@
11
.DS_Store
22
.idea
3-
3+
.vscode
4+
charts/**/charts/*.tgz

charts/sentry/Chart.lock

+4-4
Original file line numberDiff line numberDiff line change
@@ -9,8 +9,8 @@ dependencies:
99
repository: oci://registry-1.docker.io/bitnamicharts
1010
version: 29.3.14
1111
- name: clickhouse
12-
repository: https://sentry-kubernetes.github.io/charts
13-
version: 3.14.1
12+
repository: oci://registry-1.docker.io/bitnamicharts
13+
version: 9.2.0
1414
- name: zookeeper
1515
repository: oci://registry-1.docker.io/bitnamicharts
1616
version: 11.4.11
@@ -23,5 +23,5 @@ dependencies:
2323
- name: nginx
2424
repository: oci://registry-1.docker.io/bitnamicharts
2525
version: 18.2.5
26-
digest: sha256:2b19e9605468921ff96afb9f393ffe09d3121e5b91f32789e025851f0b66ff63
27-
generated: "2025-01-17T18:17:43.022337376+06:00"
26+
digest: sha256:02a8d4e7b6099210aa2c93e8502546e670d7d7f59a70953166cda17f7fb7fa12
27+
generated: "2025-05-05T17:19:56.987739062+02:00"

charts/sentry/Chart.yaml

+2-6
Original file line numberDiff line numberDiff line change
@@ -18,13 +18,9 @@ dependencies:
1818
version: 29.3.14
1919
condition: kafka.enabled
2020
- name: clickhouse
21-
repository: https://sentry-kubernetes.github.io/charts
22-
version: 3.14.1
23-
condition: clickhouse.enabled
24-
- name: zookeeper
2521
repository: oci://registry-1.docker.io/bitnamicharts
26-
version: 11.4.11
27-
condition: zookeeper.enabled
22+
version: 9.2.0
23+
condition: clickhouse.enabled
2824
- name: rabbitmq
2925
repository: oci://registry-1.docker.io/bitnamicharts
3026
version: 11.16.2

charts/sentry/templates/_helper.tpl

+4-11
Original file line numberDiff line numberDiff line change
@@ -344,7 +344,7 @@ Set ClickHouse port
344344
*/}}
345345
{{- define "sentry.clickhouse.port" -}}
346346
{{- if .Values.clickhouse.enabled -}}
347-
{{- default 9000 .Values.clickhouse.clickhouse.tcp_port }}
347+
{{- default 9000 .Values.clickhouse.containerPorts.tcp }}
348348
{{- else -}}
349349
{{ required "A valid .Values.externalClickhouse.tcpPort is required" .Values.externalClickhouse.tcpPort }}
350350
{{- end -}}
@@ -355,7 +355,7 @@ Set ClickHouse HTTP port
355355
*/}}
356356
{{- define "sentry.clickhouse.http_port" -}}
357357
{{- if .Values.clickhouse.enabled -}}
358-
{{- default 8123 .Values.clickhouse.clickhouse.http_port }}
358+
{{- default 8123 .Values.clickhouse.containerPorts.http }}
359359
{{- else -}}
360360
{{ required "A valid .Values.externalClickhouse.httpPort is required" .Values.externalClickhouse.httpPort }}
361361
{{- end -}}
@@ -377,11 +377,7 @@ Set ClickHouse User
377377
*/}}
378378
{{- define "sentry.clickhouse.username" -}}
379379
{{- if .Values.clickhouse.enabled -}}
380-
{{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}}
381-
{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).name }}
382-
{{- else -}}
383-
default
384-
{{- end -}}
380+
{{- .Values.clickhouse.auth.username }}
385381
{{- else -}}
386382
{{ required "A valid .Values.externalClickhouse.username is required" .Values.externalClickhouse.username }}
387383
{{- end -}}
@@ -392,10 +388,7 @@ Set ClickHouse Password
392388
*/}}
393389
{{- define "sentry.clickhouse.password" -}}
394390
{{- if .Values.clickhouse.enabled -}}
395-
{{- if .Values.clickhouse.clickhouse.configmap.users.enabled -}}
396-
{{ (index .Values.clickhouse.clickhouse.configmap.users.user 0).config.password }}
397-
{{- else -}}
398-
{{- end -}}
391+
{{- .Values.clickhouse.auth.password }}
399392
{{- else -}}
400393
{{ .Values.externalClickhouse.password }}
401394
{{- end -}}

charts/sentry/templates/hooks/sentry-db-check.job.yaml

+1-12
Original file line numberDiff line numberDiff line change
@@ -87,26 +87,15 @@ spec:
8787
CLICKHOUSE_STATUS=0
8888
while [ $CLICKHOUSE_STATUS -eq 0 ]; do
8989
CLICKHOUSE_STATUS=1
90-
CLICKHOUSE_REPLICAS={{ .Values.clickhouse.enabled | ternary .Values.clickhouse.clickhouse.replicas "1" }}
91-
i=0; while [ $i -lt $CLICKHOUSE_REPLICAS ]; do
9290
{{- if .Values.clickhouse.enabled }}
93-
CLICKHOUSE_HOST={{ $clickhouseHost }}-$i.{{ $clickhouseHost }}-headless
91+
CLICKHOUSE_HOST={{ $clickhouseHost }}-headless
9492
{{- else }}
9593
CLICKHOUSE_HOST={{ .Values.externalClickhouse.host }}
9694
{{- end }}
9795
if ! nc -z "$CLICKHOUSE_HOST" {{ $clickhousePort }}; then
9896
CLICKHOUSE_STATUS=0
9997
echo "$CLICKHOUSE_HOST is not available yet"
10098
fi
101-
{{- if and .Values.clickhouse.enabled .Values.clickhouse.clickhouse.configmap.remote_servers.replica.backup.enabled }}
102-
CLICKHOUSE_HOST={{ $clickhouseHost }}-replica-$i.{{ $clickhouseHost }}-replica-headless
103-
if ! nc -z "$CLICKHOUSE_HOST" {{ $clickhousePort }}; then
104-
CLICKHOUSE_STATUS=0
105-
echo "$CLICKHOUSE_HOST is not available yet"
106-
fi
107-
{{- end }}
108-
i=$((i+1))
109-
done
11099
if [ "$CLICKHOUSE_STATUS" -eq 0 ]; then
111100
echo "Clickhouse not ready. Sleeping for 10s before trying again"
112101
sleep 10;

charts/sentry/templates/snuba/_helper-snuba.tpl

+1-9
Original file line numberDiff line numberDiff line change
@@ -74,15 +74,7 @@ settings.py: |
7474
The default clickhouse installation runs in distributed mode, while the external
7575
clickhouse configured can be configured any way you choose
7676
*/}}
77-
{{- if and .Values.externalClickhouse.singleNode (not .Values.clickhouse.enabled) }}
78-
"single_node": True,
79-
{{- else }}
80-
"single_node": False,
81-
{{- end }}
82-
{{- if or .Values.clickhouse.enabled (not .Values.externalClickhouse.singleNode) }}
83-
"cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }},
84-
"distributed_cluster_name": {{ include "sentry.clickhouse.cluster.name" . | quote }},
85-
{{- end }}
77+
"single_node": True
8678
},
8779
]
8880

charts/sentry/values.yaml

+52-62
Original file line numberDiff line numberDiff line change
@@ -714,7 +714,7 @@ sentry:
714714
# noStrictOffsetReset: false
715715

716716
billingMetricsConsumer:
717-
enabled: true
717+
enabled: false
718718
replicas: 1
719719
env: []
720720
resources: {}
@@ -1253,7 +1253,7 @@ snuba:
12531253
# medium: Memory
12541254

12551255
outcomesBillingConsumer:
1256-
enabled: true
1256+
enabled: false
12571257
replicas: 1
12581258
env: []
12591259
resources: {}
@@ -2064,7 +2064,7 @@ filestore:
20642064
## GKE, AWS & OpenStack)
20652065
##
20662066
# storageClass: "-"
2067-
accessMode: ReadWriteOnce # Set ReadWriteMany for work Replays
2067+
accessMode: ReadWriteMany
20682068
size: 10Gi
20692069

20702070
## Whether to mount the persistent volume to the Sentry worker and
@@ -2075,7 +2075,7 @@ filestore:
20752075
## Please note that you may need to change your accessMode to ReadWriteMany
20762076
## if you plan on having the web, worker and cron deployments run on
20772077
## different nodes.
2078-
persistentWorkers: false
2078+
persistentWorkers: true
20792079

20802080
## If existingClaim is specified, no PVC will be created and this claim will
20812081
## be used
@@ -2119,49 +2119,50 @@ config:
21192119
maxWorkerLifetime: 86400
21202120

21212121
clickhouse:
2122+
image:
2123+
tag: 23.8.16-debian-12-r0
21222124
enabled: true
2123-
nodeSelector: {}
2124-
# tolerations: []
2125-
clickhouse:
2126-
replicas: "1"
2127-
configmap:
2128-
remote_servers:
2129-
internal_replication: true
2130-
replica:
2131-
backup:
2132-
enabled: false
2133-
zookeeper_servers:
2134-
enabled: true
2135-
config:
2136-
- index: "clickhouse"
2137-
hostTemplate: "{{ .Release.Name }}-zookeeper-clickhouse"
2138-
port: "2181"
2139-
users:
2140-
enabled: false
2141-
user:
2142-
# the first user will be used if enabled
2143-
- name: default
2144-
config:
2145-
password: ""
2146-
networks:
2147-
- ::/0
2148-
profile: default
2149-
quota: default
2150-
2151-
persistentVolumeClaim:
2152-
enabled: true
2153-
dataPersistentVolume:
2154-
enabled: true
2155-
accessModes:
2156-
- "ReadWriteOnce"
2157-
storage: "30Gi"
2158-
2159-
## Use this to enable an extra service account
2160-
# serviceAccount:
2161-
# annotations: {}
2162-
# enabled: false
2163-
# name: "sentry-clickhouse"
2164-
# automountServiceAccountToken: true
2125+
containerPorts:
2126+
http: 8123
2127+
https: 8443
2128+
tcp: 9000
2129+
replicaCount: 1
2130+
shards: 1
2131+
keeper:
2132+
replicaCount: 1
2133+
auth:
2134+
username: default
2135+
password: "thePassword"
2136+
configdFiles:
2137+
config.xml: |
2138+
<yandex>
2139+
<logger>
2140+
<level>warning</level>
2141+
<console>true</console>
2142+
</logger>
2143+
<query_thread_log remove="remove"/>
2144+
<query_log remove="remove"/>
2145+
<text_log remove="remove"/>
2146+
<trace_log remove="remove"/>
2147+
<metric_log remove="remove"/>
2148+
<asynchronous_metric_log remove="remove"/>
2149+
2150+
<session_log remove="remove"/>
2151+
<part_log remove="remove"/>
2152+
2153+
<allow_nullable_key>1</allow_nullable_key>
2154+
2155+
<profiles>
2156+
<default>
2157+
<log_queries>0</log_queries>
2158+
<log_query_threads>0</log_query_threads>
2159+
</default>
2160+
</profiles>
2161+
<merge_tree>
2162+
<enable_mixed_granularity_parts>1</enable_mixed_granularity_parts>
2163+
<max_suspicious_broken_parts>10</max_suspicious_broken_parts>
2164+
</merge_tree>
2165+
</yandex>
21652166
21662167
## This value is only used when clickhouse.enabled is set to false
21672168
##
@@ -2184,27 +2185,16 @@ externalClickhouse:
21842185
##
21852186
# clusterName: test_shard_localhost
21862187

2187-
# Settings for Zookeeper.
2188-
# See https://github.com/bitnami/charts/tree/master/bitnami/zookeeper
2189-
zookeeper:
2190-
enabled: true
2191-
nameOverride: zookeeper-clickhouse
2192-
replicaCount: 1
2193-
nodeSelector: {}
2194-
# tolerations: []
2195-
## When increasing the number of exceptions, you need to increase persistence.size
2196-
# persistence:
2197-
# size: 8Gi
2198-
21992188
# Settings for Kafka.
22002189
# See https://github.com/bitnami/charts/tree/master/bitnami/kafka
22012190
kafka:
22022191
enabled: true
22032192
provisioning:
22042193
## Increasing the replicationFactor enhances data reliability during Kafka pod failures by replicating data across multiple brokers.
22052194
# Note that existing topics will remain with replicationFactor: 1 when updated.
2206-
replicationFactor: 3
2207-
enabled: true
2195+
replicationFactor: 1
2196+
parallel: 2
2197+
enabled: false
22082198
# Topic list is based on files below.
22092199
# - https://github.com/getsentry/snuba/blob/master/snuba/utils/streams/topics.py
22102200
# - https://github.com/getsentry/sentry/blob/master/src/sentry/conf/types/kafka_definition.py
@@ -2352,7 +2342,7 @@ kafka:
23522342
kraft:
23532343
enabled: true
23542344
controller:
2355-
replicaCount: 3
2345+
replicaCount: 1
23562346
nodeSelector: {}
23572347
# tolerations: []
23582348
## if the load on the kafka controller increases, resourcesPreset must be increased
@@ -2421,7 +2411,7 @@ externalKafka:
24212411
ms: 1000
24222412

24232413
sourcemaps:
2424-
enabled: false
2414+
enabled: true
24252415

24262416
redis:
24272417
enabled: true

0 commit comments

Comments
 (0)