DirectorySecurity AdvisoriesPricing
Sign in
Directory
clickhouse-operator logoHELM

clickhouse-operator

Helm chart
Last changed
Request a free trial

Contact our team to test out this Helm chart and related images for free. Please also indicate any other images you would like to evaluate.

Overview
Chart versions
Default values
Chart metadata
Images

Tag:

1
namespaceOverride: ""
2
# commonLabels -- set of labels that will be applied to all the resources for the operator
3
commonLabels: {}
4
# commonAnnotations -- set of annotations that will be applied to all the resources for the operator
5
commonAnnotations: {}
6
deployment:
7
# look details in `kubectl explain deployment.spec.strategy`
8
strategy:
9
type: Recreate
10
crdHook:
11
# crdHook.enabled -- enable automatic CRD installation/update via pre-install/pre-upgrade hooks
12
# when disabled, CRDs must be installed manually using kubectl apply
13
enabled: true
14
image:
15
# crdHook.image.repository -- image repository for CRD installation job
16
repository: cgr.dev/chainguard-private/kubectl
17
# crdHook.image.tag -- image tag for CRD installation job
18
tag: latest-dev@sha256:6c9c805891b4366b9fe19c537945abbe5f1234b358201d4be622287896bcacd9
19
# crdHook.image.pullPolicy -- image pull policy for CRD installation job
20
pullPolicy: IfNotPresent
21
# crdHook.imagePullSecrets -- image pull secrets for CRD installation job
22
# possible value format `[{"name":"your-secret-name"}]`,
23
# check `kubectl explain pod.spec.imagePullSecrets` for details
24
imagePullSecrets: []
25
# crdHook.resources -- resource limits and requests for CRD installation job
26
resources: {}
27
# limits:
28
# cpu: 100m
29
# memory: 128Mi
30
# requests:
31
# cpu: 100m
32
# memory: 128Mi
33
# crdHook.nodeSelector -- node selector for CRD installation job
34
nodeSelector: {}
35
# crdHook.tolerations -- tolerations for CRD installation job
36
tolerations: []
37
# crdHook.affinity -- affinity for CRD installation job
38
affinity: {}
39
# crdHook.annotations -- additional annotations for CRD installation job
40
annotations: {}
41
operator:
42
image:
43
# operator.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com)
44
registry: ""
45
# operator.image.repository -- image repository
46
repository: cgr.dev/chainguard-private/clickhouse-operator
47
# operator.image.tag -- image tag (chart's appVersion value will be used if not set)
48
tag: latest@sha256:c08526e0fbf2fb68bbe8d58a42f5f76f9c5029ef7c8d9c14878e3c45dde23cee
49
# operator.image.pullPolicy -- image pull policy
50
pullPolicy: IfNotPresent
51
containerSecurityContext: {}
52
# operator.resources -- custom resource configuration, check `kubectl explain pod.spec.containers.resources` for details
53
resources: {}
54
# limits:
55
# cpu: 100m
56
# memory: 128Mi
57
# requests:
58
# cpu: 100m
59
# memory: 128Mi
60
61
# operator.priorityClassName -- priority class name for the clickhouse-operator deployment, check `kubectl explain pod.spec.priorityClassName` for details
62
# @default -- ""
63
priorityClassName: ""
64
# operator.env -- additional environment variables for the clickhouse-operator container in deployment
65
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
66
env: []
67
metrics:
68
enabled: true
69
image:
70
# metrics.image.registry -- optional image registry prefix (e.g. 1234567890.dkr.ecr.us-east-1.amazonaws.com)
71
registry: ""
72
# metrics.image.repository -- image repository
73
repository: cgr.dev/chainguard-private/clickhouse-operator-metrics-exporter
74
# metrics.image.tag -- image tag (chart's appVersion value will be used if not set)
75
tag: latest@sha256:d53336523396418c847a5658c2aac7eee43a82467134da8bc59b6c1c326ec811
76
# metrics.image.pullPolicy -- image pull policy
77
pullPolicy: IfNotPresent
78
containerSecurityContext: {}
79
# metrics.resources -- custom resource configuration
80
resources: {}
81
# limits:
82
# cpu: 100m
83
# memory: 128Mi
84
# requests:
85
# cpu: 100m
86
# memory: 128Mi
87
88
# metrics.env -- additional environment variables for the deployment of metrics-exporter containers
89
# possible format value `[{"name": "SAMPLE", "value": "text"}]`
90
env: []
91
# imagePullSecrets -- image pull secret for private images in clickhouse-operator pod
92
# possible value format `[{"name":"your-secret-name"}]`,
93
# check `kubectl explain pod.spec.imagePullSecrets` for details
94
imagePullSecrets: []
95
# podLabels -- labels to add to the clickhouse-operator pod
96
podLabels: {}
97
# podAnnotations -- annotations to add to the clickhouse-operator pod, check `kubectl explain pod.spec.annotations` for details
98
# @default -- check the `values.yaml` file
99
podAnnotations:
100
prometheus.io/port: '8888'
101
prometheus.io/scrape: 'true'
102
clickhouse-operator-metrics/port: '9999'
103
clickhouse-operator-metrics/scrape: 'true'
104
# nameOverride -- override name of the chart
105
nameOverride: ""
106
# fullnameOverride -- full name of the chart.
107
fullnameOverride: ""
108
serviceAccount:
109
# serviceAccount.create -- specifies whether a service account should be created
110
create: true
111
# serviceAccount.annotations -- annotations to add to the service account
112
annotations: {}
113
# serviceAccount.name -- the name of the service account to use; if not set and create is true, a name is generated using the fullname template
114
name:
115
rbac:
116
# rbac.create -- specifies whether rbac resources should be created
117
create: true
118
# rbac.namespaceScoped -- specifies whether to create roles and rolebindings at the cluster level or namespace level
119
namespaceScoped: false
120
secret:
121
# secret.create -- create a secret with operator credentials
122
create: true
123
# secret.username -- operator credentials username
124
username: clickhouse_operator
125
# secret.password -- operator credentials password
126
password: clickhouse_operator_password
127
# nodeSelector -- node for scheduler pod assignment, check `kubectl explain pod.spec.nodeSelector` for details
128
nodeSelector: {}
129
# tolerations -- tolerations for scheduler pod assignment, check `kubectl explain pod.spec.tolerations` for details
130
tolerations: []
131
# affinity -- affinity for scheduler pod assignment, check `kubectl explain pod.spec.affinity` for details
132
affinity: {}
133
# podSecurityContext - operator deployment SecurityContext, check `kubectl explain pod.spec.securityContext` for details
134
podSecurityContext: {}
135
# topologySpreadConstraints - topologySpreadConstraints affinity for scheduler pod assignment, check `kubectl explain pod.spec.topologySpreadConstraints` for details
136
topologySpreadConstraints: []
137
serviceMonitor:
138
# serviceMonitor.enabled -- ServiceMonitor Custom resource is created for a [prometheus-operator](https://github.com/prometheus-operator/prometheus-operator)
139
# In serviceMonitor will be created two endpoints ch-metrics on port 8888 and op-metrics # 9999. Ypu can specify interval, scrapeTimeout, relabelings, metricRelabelings for each endpoint below
140
enabled: false
141
# serviceMonitor.additionalLabels -- additional labels for service monitor
142
additionalLabels: {}
143
clickhouseMetrics:
144
# serviceMonitor.interval for ch-metrics endpoint --
145
interval: 30s
146
# serviceMonitor.scrapeTimeout for ch-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
147
scrapeTimeout: ""
148
# serviceMonitor.relabelings for ch-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
149
relabelings: []
150
# serviceMonitor.metricRelabelings for ch-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
151
metricRelabelings: []
152
operatorMetrics:
153
# serviceMonitor.interval for op-metrics endpoint --
154
interval: 30s
155
# serviceMonitor.scrapeTimeout for op-metrics endpoint -- Prometheus ServiceMonitor scrapeTimeout. If empty, Prometheus uses the global scrape timeout unless it is less than the target's scrape interval value in which the latter is used.
156
scrapeTimeout: ""
157
# serviceMonitor.relabelings for op-metrics endpoint -- Prometheus [RelabelConfigs] to apply to samples before scraping
158
relabelings: []
159
# serviceMonitor.metricRelabelings for op-metrics endpoint -- Prometheus [MetricRelabelConfigs] to apply to samples before ingestio
160
metricRelabelings: []
161
# configs -- clickhouse operator configs
162
# @default -- check the `values.yaml` file for the config content (auto-generated from latest operator release)
163
configs:
164
confdFiles: null
165
configdFiles:
166
01-clickhouse-01-listen.xml: |
167
<!-- IMPORTANT -->
168
<!-- This file is auto-generated -->
169
<!-- Do not edit this file - all changes would be lost -->
170
<!-- Edit appropriate template in the following folder: -->
171
<!-- deploy/builder/templates-config -->
172
<!-- IMPORTANT -->
173
<yandex>
174
<!-- Listen wildcard address to allow accepting connections from other containers and host network. -->
175
<listen_host>::</listen_host>
176
<listen_host>0.0.0.0</listen_host>
177
<listen_try>1</listen_try>
178
</yandex>
179
01-clickhouse-02-logger.xml: |
180
<!-- IMPORTANT -->
181
<!-- This file is auto-generated -->
182
<!-- Do not edit this file - all changes would be lost -->
183
<!-- Edit appropriate template in the following folder: -->
184
<!-- deploy/builder/templates-config -->
185
<!-- IMPORTANT -->
186
<yandex>
187
<logger>
188
<!-- Possible levels: https://github.com/pocoproject/poco/blob/devel/Foundation/include/Poco/Logger.h#L439 -->
189
<level>debug</level>
190
<log>/var/log/clickhouse-server/clickhouse-server.log</log>
191
<errorlog>/var/log/clickhouse-server/clickhouse-server.err.log</errorlog>
192
<size>1000M</size>
193
<count>10</count>
194
<!-- Default behavior is autodetection (log to console if not daemon mode and is tty) -->
195
<console>1</console>
196
</logger>
197
</yandex>
198
01-clickhouse-03-query_log.xml: |
199
<!-- IMPORTANT -->
200
<!-- This file is auto-generated -->
201
<!-- Do not edit this file - all changes would be lost -->
202
<!-- Edit appropriate template in the following folder: -->
203
<!-- deploy/builder/templates-config -->
204
<!-- IMPORTANT -->
205
<yandex>
206
<query_log replace="1">
207
<database>system</database>
208
<table>query_log</table>
209
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
210
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
211
</query_log>
212
<query_thread_log remove="1"/>
213
</yandex>
214
01-clickhouse-04-part_log.xml: |
215
<!-- IMPORTANT -->
216
<!-- This file is auto-generated -->
217
<!-- Do not edit this file - all changes would be lost -->
218
<!-- Edit appropriate template in the following folder: -->
219
<!-- deploy/builder/templates-config -->
220
<!-- IMPORTANT -->
221
<yandex>
222
<part_log replace="1">
223
<database>system</database>
224
<table>part_log</table>
225
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
226
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
227
</part_log>
228
</yandex>
229
01-clickhouse-05-trace_log.xml: |-
230
<!-- IMPORTANT -->
231
<!-- This file is auto-generated -->
232
<!-- Do not edit this file - all changes would be lost -->
233
<!-- Edit appropriate template in the following folder: -->
234
<!-- deploy/builder/templates-config -->
235
<!-- IMPORTANT -->
236
<yandex>
237
<trace_log replace="1">
238
<database>system</database>
239
<table>trace_log</table>
240
<engine>Engine = MergeTree PARTITION BY event_date ORDER BY event_time TTL event_date + interval 30 day</engine>
241
<flush_interval_milliseconds>7500</flush_interval_milliseconds>
242
</trace_log>
243
</yandex>
244
files:
245
config.yaml:
246
# IMPORTANT
247
# This file is auto-generated
248
# Do not edit this file - all changes would be lost
249
# Edit appropriate template in the following folder:
250
# deploy/builder/templates-config
251
# IMPORTANT
252
#
253
# Template parameters available:
254
# WATCH_NAMESPACES=
255
# CH_USERNAME_PLAIN=
256
# CH_PASSWORD_PLAIN=
257
# CH_CREDENTIALS_SECRET_NAMESPACE=
258
# CH_CREDENTIALS_SECRET_NAME=clickhouse-operator
259
# VERBOSITY=1
260
261
################################################
262
##
263
## Watch section
264
##
265
################################################
266
watch:
267
# List of namespaces where clickhouse-operator watches for events.
268
# Concurrently running operators should watch on different namespaces.
269
# IMPORTANT
270
# Regexp is applicable.
271
namespaces: []
272
clickhouse:
273
configuration:
274
################################################
275
##
276
## Configuration files section
277
##
278
################################################
279
file:
280
# Each 'path' can be either absolute or relative.
281
# In case path is absolute - it is used as is
282
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
283
path:
284
# Path to the folder where ClickHouse configuration files common for all instances within a CHI are located.
285
common: chi/config.d
286
# Path to the folder where ClickHouse configuration files unique for each instance (host) within a CHI are located.
287
host: chi/conf.d
288
# Path to the folder where ClickHouse configuration files with users' settings are located.
289
# Files are common for all instances within a CHI.
290
user: chi/users.d
291
################################################
292
##
293
## Configuration users section
294
##
295
################################################
296
user:
297
# Default settings for user accounts, created by the operator.
298
# IMPORTANT. These are not access credentials or settings for 'default' user account,
299
# it is a template for filling out missing fields for all user accounts to be created by the operator,
300
# with the following EXCEPTIONS:
301
# 1. 'default' user account DOES NOT use provided password, but uses all the rest of the fields.
302
# Password for 'default' user account has to be provided explicitly, if to be used.
303
# 2. CHOP user account DOES NOT use:
304
# - profile setting. It uses predefined profile called 'clickhouse_operator'
305
# - quota setting. It uses empty quota name.
306
# - networks IP setting. Operator specifies 'networks/ip' user setting to match operators' pod IP only.
307
# - password setting. Password for CHOP account is used from 'clickhouse.access.*' section
308
default:
309
# Default values for ClickHouse user account(s) created by the operator
310
# 1. user/profile - string
311
# 2. user/quota - string
312
# 3. user/networks/ip - multiple strings
313
# 4. user/password - string
314
# These values can be overwritten on per-user basis.
315
profile: "default"
316
quota: "default"
317
networksIP:
318
- "::1"
319
- "127.0.0.1"
320
password: "default"
321
################################################
322
##
323
## Configuration network section
324
##
325
################################################
326
network:
327
# Default host_regexp to limit network connectivity from outside
328
hostRegexpTemplate: "(chi-{chi}-[^.]+\\d+-\\d+|clickhouse\\-{chi})\\.{namespace}\\.svc\\.cluster\\.local$"
329
################################################
330
##
331
## Configuration restart policy section
332
## Configuration restart policy describes what configuration changes require ClickHouse restart
333
##
334
################################################
335
configurationRestartPolicy:
336
rules:
337
# IMPORTANT!
338
# Special version of "*" - default version - has to satisfy all ClickHouse versions.
339
# Default version will also be used in case ClickHouse version is unknown.
340
# ClickHouse version may be unknown due to host being down - for example, because of incorrect "settings" section.
341
# ClickHouse is not willing to start in case incorrect/unknown settings are provided in config file.
342
- version: "*"
343
rules:
344
# see https://kb.altinity.com/altinity-kb-setup-and-maintenance/altinity-kb-server-config-files/#server-config-configxml-sections-which-dont-require-restart
345
# to be replaced with "select * from system.server_settings where changeable_without_restart = 'No'"
346
- settings/*: "yes"
347
# single values
348
- settings/access_control_path: "no"
349
- settings/dictionaries_config: "no"
350
- settings/max_server_memory_*: "no"
351
- settings/max_*_to_drop: "no"
352
- settings/max_concurrent_queries: "no"
353
- settings/models_config: "no"
354
- settings/user_defined_executable_functions_config: "no"
355
# structured XML
356
- settings/logger/*: "no"
357
- settings/macros/*: "no"
358
- settings/remote_servers/*: "no"
359
- settings/user_directories/*: "no"
360
# these settings should not lead to pod restarts
361
- settings/display_secrets_in_show_and_select: "no"
362
- zookeeper/*: "no"
363
- files/*.xml: "yes"
364
- files/config.d/*.xml: "yes"
365
- files/config.d/*dict*.xml: "no"
366
- files/config.d/*no_restart*: "no"
367
# exceptions in default profile
368
- profiles/default/background_*_pool_size: "yes"
369
- profiles/default/max_*_for_server: "yes"
370
- version: "21.*"
371
rules:
372
- settings/logger: "yes"
373
#################################################
374
##
375
## Access to ClickHouse instances
376
##
377
################################################
378
access:
379
# Possible values for 'scheme' are:
380
# 1. http - force http to be used to connect to ClickHouse instances
381
# 2. https - force https to be used to connect to ClickHouse instances
382
# 3. auto - either http or https is selected based on open ports
383
scheme: "auto"
384
# ClickHouse credentials (username, password and port) to be used by the operator to connect to ClickHouse instances.
385
# These credentials are used for:
386
# 1. Metrics requests
387
# 2. Schema maintenance
388
# User with these credentials can be specified in additional ClickHouse .xml config files,
389
# located in 'clickhouse.configuration.file.path.user' folder
390
username: ""
391
password: ""
392
rootCA: ""
393
# Location of the k8s Secret with username and password to be used by the operator to connect to ClickHouse instances.
394
# Can be used instead of explicitly specified username and password available in sections:
395
# - clickhouse.access.username
396
# - clickhouse.access.password
397
# Secret should have two keys:
398
# 1. username
399
# 2. password
400
secret:
401
# Empty `namespace` means that k8s secret would be looked in the same namespace where operator's pod is running.
402
namespace: ""
403
# Empty `name` means no k8s Secret would be looked for
404
name: '{{ include "altinity-clickhouse-operator.fullname" . }}'
405
# Port where to connect to ClickHouse instances to
406
port: 8123
407
# Timeouts used to limit connection and queries from the operator to ClickHouse instances
408
# Specified in seconds.
409
timeouts:
410
# Timout to setup connection from the operator to ClickHouse instances. In seconds.
411
connect: 5
412
# Timout to perform SQL query from the operator to ClickHouse instances. In seconds.
413
query: 4
414
################################################
415
##
416
## Addons specifies additional configuration sections
417
## Should it be called something like "templates"?
418
##
419
################################################
420
addons:
421
rules:
422
- version: "*"
423
spec:
424
configuration:
425
users:
426
profiles:
427
quotas:
428
settings:
429
files:
430
- version: ">= 23.3"
431
spec:
432
configuration:
433
###
434
### users.d is global while description depends on CH version which may vary on per-host basis
435
### In case of global-ness this may be better to implement via auto-templates
436
###
437
### As a solution, this may be applied on the whole cluster based on any of its hosts
438
###
439
### What to do when host is just created? CH version is not known prior to CH started and user config is required before CH started.
440
### We do not have any info about the cluster on initial creation
441
###
442
users:
443
"{clickhouseOperatorUser}/access_management": 1
444
"{clickhouseOperatorUser}/named_collection_control": 1
445
"{clickhouseOperatorUser}/show_named_collections": 1
446
"{clickhouseOperatorUser}/show_named_collections_secrets": 1
447
profiles:
448
quotas:
449
settings:
450
files:
451
- version: ">= 23.5"
452
spec:
453
configuration:
454
users:
455
profiles:
456
clickhouse_operator/format_display_secrets_in_show_and_select: 1
457
quotas:
458
settings:
459
##
460
## this may be added on per-host basis into host's conf.d folder
461
##
462
display_secrets_in_show_and_select: 1
463
files:
464
#################################################
465
##
466
## Metrics collection
467
##
468
################################################
469
metrics:
470
# Timeouts used to limit connection and queries from the metrics exporter to ClickHouse instances
471
# Specified in seconds.
472
timeouts:
473
# Timeout used to limit metrics collection request. In seconds.
474
# Upon reaching this timeout metrics collection is aborted and no more metrics are collected in this cycle.
475
# All collected metrics are returned.
476
collect: 9
477
# Regexp to match tables in system database to fetch metrics from.
478
# Multiple tables can be matched using regexp. Matched tables are merged using merge() table function.
479
# Default is "^(metrics|custom_metrics)$" which fetches from both system.metrics and system.custom_metrics.
480
tablesRegexp: "^(metrics|custom_metrics)$"
481
keeper:
482
configuration:
483
################################################
484
##
485
## Configuration files section
486
##
487
################################################
488
file:
489
# Each 'path' can be either absolute or relative.
490
# In case path is absolute - it is used as is
491
# In case path is relative - it is relative to the folder where configuration file you are reading right now is located.
492
path:
493
# Path to the folder where Keeper configuration files common for all instances within a CHK are located.
494
common: chk/keeper_config.d
495
# Path to the folder where Keeper configuration files unique for each instance (host) within a CHK are located.
496
host: chk/conf.d
497
# Path to the folder where Keeper configuration files with users' settings are located.
498
# Files are common for all instances within a CHI.
499
user: chk/users.d
500
################################################
501
##
502
## Template(s) management section
503
##
504
################################################
505
template:
506
chi:
507
# CHI template updates handling policy
508
# Possible policy values:
509
# - ReadOnStart. Accept CHIT updates on the operator's start only.
510
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
511
policy: ApplyOnNextReconcile
512
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
513
# Templates are added to the list of all templates and used when CHI is reconciled.
514
# Templates are applied in sorted alpha-numeric order.
515
path: chi/templates.d
516
chk:
517
# CHK template updates handling policy
518
# Possible policy values:
519
# - ReadOnStart. Accept CHIT updates on the operators start only.
520
# - ApplyOnNextReconcile. Accept CHIT updates at all time. Apply new CHITs on next regular reconcile of the CHI
521
policy: ApplyOnNextReconcile
522
# Path to the folder where ClickHouseInstallation templates .yaml manifests are located.
523
# Templates are added to the list of all templates and used when CHI is reconciled.
524
# Templates are applied in sorted alpha-numeric order.
525
path: chk/templates.d
526
################################################
527
##
528
## Reconcile section
529
##
530
################################################
531
reconcile:
532
# Reconcile runtime settings
533
runtime:
534
# Max number of concurrent CHI reconciles in progress
535
reconcileCHIsThreadsNumber: 10
536
# The operator reconciles shards concurrently in each CHI with the following limitations:
537
# 1. Number of shards being reconciled (and thus having hosts down) in each CHI concurrently
538
# can not be greater than 'reconcileShardsThreadsNumber'.
539
# 2. Percentage of shards being reconciled (and thus having hosts down) in each CHI concurrently
540
# can not be greater than 'reconcileShardsMaxConcurrencyPercent'.
541
# 3. The first shard is always reconciled alone. Concurrency starts from the second shard and onward.
542
# Thus limiting number of shards being reconciled (and thus having hosts down) in each CHI by both number and percentage
543
544
# Max number of concurrent shard reconciles within one cluster in progress
545
reconcileShardsThreadsNumber: 5
546
# Max percentage of concurrent shard reconciles within one cluster in progress
547
reconcileShardsMaxConcurrencyPercent: 50
548
# Reconcile StatefulSet scenario
549
statefulSet:
550
# Create StatefulSet scenario
551
create:
552
# What to do in case created StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
553
# Possible options:
554
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
555
# do not try to fix or delete or update it, just abort reconcile cycle.
556
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
557
# 2. delete - delete newly created problematic StatefulSet and follow 'abort' path afterwards.
558
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
559
onFailure: ignore
560
# Update StatefulSet scenario
561
update:
562
# How many seconds to wait for created/updated StatefulSet to be 'Ready'
563
timeout: 300
564
# How many seconds to wait between checks/polls for created/updated StatefulSet status
565
pollInterval: 5
566
# What to do in case updated StatefulSet is not in 'Ready' after `reconcile.statefulSet.update.timeout` seconds
567
# Possible options:
568
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
569
# do not try to fix or delete or update it, just abort reconcile cycle.
570
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
571
# 2. rollback - delete Pod and rollback StatefulSet to previous Generation.
572
# Pod would be recreated by StatefulSet based on rollback-ed StatefulSet configuration.
573
# Follow 'abort' path afterwards.
574
# 3. ignore - ignore an error, pretend nothing happened, continue reconcile and move on to the next StatefulSet.
575
onFailure: abort
576
# Recreate StatefulSet scenario
577
recreate:
578
# What to do in case operator is in need to recreate StatefulSet?
579
# Possible options:
580
# 1. abort - abort the process, do nothing with the problematic StatefulSet, leave it as it is,
581
# do not try to fix or delete or update it, just abort reconcile cycle.
582
# Do not proceed to the next StatefulSet(s) and wait for an admin to assist.
583
# 2. recreate - proceed and recreate StatefulSet.
584
585
# Triggered when PVC data loss or missing volumes are detected
586
onDataLoss: recreate
587
# Triggered when StatefulSet update fails or StatefulSet is not ready
588
onUpdateFailure: recreate
589
# Reconcile Host scenario
590
host:
591
# The operator during reconcile procedure should wait for a ClickHouse host to achieve the following conditions:
592
wait:
593
# Whether the operator during reconcile procedure should wait for a ClickHouse host:
594
# - to be excluded from a ClickHouse cluster
595
# - to complete all running queries
596
# - to be included into a ClickHouse cluster
597
# respectfully before moving forward with host reconcile
598
exclude: true
599
queries: true
600
include: false
601
# The operator during reconcile procedure should wait for replicas to catch-up
602
# replication delay a.k.a replication lag for the following replicas
603
replicas:
604
# All replicas (new and known earlier) are explicitly requested to wait for replication to catch-up
605
all: no
606
# New replicas only are requested to wait for replication to catch-up
607
new: yes
608
# Replication catch-up is considered to be completed as soon as replication delay
609
# a.k.a replication lag - calculated as "MAX(absolute_delay) FROM system.replicas"
610
# is within this specified delay (in seconds)
611
delay: 10
612
probes:
613
# Whether the operator during host launch procedure should wait for startup probe to succeed.
614
# In case probe is unspecified wait is assumed to be completed successfully.
615
# Default option value is to do not wait.
616
startup: no
617
# Whether the operator during host launch procedure should wait for readiness probe to succeed.
618
# In case probe is unspecified wait is assumed to be completed successfully.
619
# Default option value is to wait.
620
readiness: yes
621
# The operator during reconcile procedure should drop the following entities:
622
drop:
623
replicas:
624
# Whether the operator during reconcile procedure should drop replicas when replica is deleted
625
onDelete: yes
626
# Whether the operator during reconcile procedure should drop replicas when replica volume is lost
627
onLostVolume: yes
628
# Whether the operator during reconcile procedure should drop active replicas when replica is deleted or recreated
629
active: no
630
################################################
631
##
632
## Annotations management section
633
##
634
################################################
635
annotation:
636
# Applied when:
637
# 1. Propagating annotations from the CHI's `metadata.annotations` to child objects' `metadata.annotations`,
638
# 2. Propagating annotations from the CHI Template's `metadata.annotations` to CHI's `metadata.annotations`,
639
# Include annotations from the following list:
640
# Applied only when not empty. Empty list means "include all, no selection"
641
include: []
642
# Exclude annotations from the following list:
643
exclude: []
644
################################################
645
##
646
## Labels management section
647
##
648
################################################
649
label:
650
# Applied when:
651
# 1. Propagating labels from the CHI's `metadata.labels` to child objects' `metadata.labels`,
652
# 2. Propagating labels from the CHI Template's `metadata.labels` to CHI's `metadata.labels`,
653
# Include labels from the following list:
654
# Applied only when not empty. Empty list means "include all, no selection"
655
include: []
656
# Exclude labels from the following list:
657
# Applied only when not empty. Empty list means "nothing to exclude, no selection"
658
exclude: []
659
# Whether to append *Scope* labels to StatefulSet and Pod.
660
# Full list of available *scope* labels check in 'labeler.go'
661
# LabelShardScopeIndex
662
# LabelReplicaScopeIndex
663
# LabelCHIScopeIndex
664
# LabelCHIScopeCycleSize
665
# LabelCHIScopeCycleIndex
666
# LabelCHIScopeCycleOffset
667
# LabelClusterScopeIndex
668
# LabelClusterScopeCycleSize
669
# LabelClusterScopeCycleIndex
670
# LabelClusterScopeCycleOffset
671
appendScope: "no"
672
################################################
673
##
674
## Metrics management section
675
##
676
################################################
677
metrics:
678
labels:
679
exclude: []
680
################################################
681
##
682
## Status management section
683
##
684
################################################
685
status:
686
fields:
687
action: false
688
actions: false
689
error: true
690
errors: true
691
################################################
692
##
693
## StatefulSet management section
694
##
695
################################################
696
statefulSet:
697
revisionHistoryLimit: 0
698
################################################
699
##
700
## Pod management section
701
##
702
################################################
703
pod:
704
# Grace period for Pod termination.
705
# How many seconds to wait between sending
706
# SIGTERM and SIGKILL during Pod termination process.
707
# Increase this number is case of slow shutdown.
708
terminationGracePeriod: 30
709
################################################
710
##
711
## Log parameters section
712
##
713
################################################
714
logger:
715
logtostderr: "true"
716
alsologtostderr: "false"
717
v: "1"
718
stderrthreshold: ""
719
vmodule: ""
720
log_backtrace_at: ""
721
templatesdFiles:
722
001-templates.json.example: |
723
{
724
"apiVersion": "clickhouse.altinity.com/v1",
725
"kind": "ClickHouseInstallationTemplate",
726
"metadata": {
727
"name": "01-default-volumeclaimtemplate"
728
},
729
"spec": {
730
"templates": {
731
"volumeClaimTemplates": [
732
{
733
"name": "chi-default-volume-claim-template",
734
"spec": {
735
"accessModes": [
736
"ReadWriteOnce"
737
],
738
"resources": {
739
"requests": {
740
"storage": "2Gi"
741
}
742
}
743
}
744
}
745
],
746
"podTemplates": [
747
{
748
"name": "chi-default-oneperhost-pod-template",
749
"distribution": "OnePerHost",
750
"spec": {
751
"containers" : [
752
{
753
"name": "clickhouse",
754
"image": "clickhouse/clickhouse-server:23.8",
755
"ports": [
756
{
757
"name": "http",
758
"containerPort": 8123
759
},
760
{
761
"name": "client",
762
"containerPort": 9000
763
},
764
{
765
"name": "interserver",
766
"containerPort": 9009
767
}
768
]
769
}
770
]
771
}
772
}
773
]
774
}
775
}
776
}
777
default-pod-template.yaml.example: |
778
apiVersion: "clickhouse.altinity.com/v1"
779
kind: "ClickHouseInstallationTemplate"
780
metadata:
781
name: "default-oneperhost-pod-template"
782
spec:
783
templates:
784
podTemplates:
785
- name: default-oneperhost-pod-template
786
distribution: "OnePerHost"
787
default-storage-template.yaml.example: |
788
apiVersion: "clickhouse.altinity.com/v1"
789
kind: "ClickHouseInstallationTemplate"
790
metadata:
791
name: "default-storage-template-2Gi"
792
spec:
793
templates:
794
volumeClaimTemplates:
795
- name: default-storage-template-2Gi
796
spec:
797
accessModes:
798
- ReadWriteOnce
799
resources:
800
requests:
801
storage: 2Gi
802
readme: |-
803
Templates in this folder are packaged with an operator and available via 'useTemplate'
804
usersdFiles:
805
01-clickhouse-operator-profile.xml: |
806
<!-- IMPORTANT -->
807
<!-- This file is auto-generated -->
808
<!-- Do not edit this file - all changes would be lost -->
809
<!-- Edit appropriate template in the following folder: -->
810
<!-- deploy/builder/templates-config -->
811
<!-- IMPORTANT -->
812
<!--
813
#
814
# Template parameters available:
815
#
816
-->
817
<yandex>
818
<!-- clickhouse-operator user is generated by the operator based on config.yaml in runtime -->
819
<profiles>
820
<clickhouse_operator>
821
<log_queries>0</log_queries>
822
<skip_unavailable_shards>1</skip_unavailable_shards>
823
<http_connection_timeout>10</http_connection_timeout>
824
<max_concurrent_queries_for_all_users>0</max_concurrent_queries_for_all_users>
825
<os_thread_priority>0</os_thread_priority>
826
</clickhouse_operator>
827
</profiles>
828
</yandex>
829
02-clickhouse-default-profile.xml: |-
830
<!-- IMPORTANT -->
831
<!-- This file is auto-generated -->
832
<!-- Do not edit this file - all changes would be lost -->
833
<!-- Edit appropriate template in the following folder: -->
834
<!-- deploy/builder/templates-config -->
835
<!-- IMPORTANT -->
836
<yandex>
837
<profiles>
838
<default>
839
<os_thread_priority>2</os_thread_priority>
840
<log_queries>1</log_queries>
841
<connect_timeout_with_failover_ms>1000</connect_timeout_with_failover_ms>
842
<distributed_aggregation_memory_efficient>1</distributed_aggregation_memory_efficient>
843
<parallel_view_processing>1</parallel_view_processing>
844
<do_not_merge_across_partitions_select_final>1</do_not_merge_across_partitions_select_final>
845
<load_balancing>nearest_hostname</load_balancing>
846
<prefer_localhost_replica>0</prefer_localhost_replica>
847
<!-- materialize_ttl_recalculate_only>1</materialize_ttl_recalculate_only> 21.10 and above -->
848
</default>
849
</profiles>
850
</yandex>
851
keeperConfdFiles: null
852
keeperConfigdFiles:
853
01-keeper-01-default-config.xml: |
854
<!-- IMPORTANT -->
855
<!-- This file is auto-generated -->
856
<!-- Do not edit this file - all changes would be lost -->
857
<!-- Edit appropriate template in the following folder: -->
858
<!-- deploy/builder/templates-config -->
859
<!-- IMPORTANT -->
860
<clickhouse>
861
<asynchronous_metrics_keeper_metrics_only>1</asynchronous_metrics_keeper_metrics_only>
862
<keeper_server>
863
<coordination_settings>
864
<min_session_timeout_ms>10000</min_session_timeout_ms>
865
<operation_timeout_ms>10000</operation_timeout_ms>
866
<raft_logs_level>information</raft_logs_level>
867
<session_timeout_ms>100000</session_timeout_ms>
868
</coordination_settings>
869
<hostname_checks_enabled>true</hostname_checks_enabled>
870
<log_storage_path>/var/lib/clickhouse-keeper/coordination/logs</log_storage_path>
871
<snapshot_storage_path>/var/lib/clickhouse-keeper/coordination/snapshots</snapshot_storage_path>
872
<storage_path>/var/lib/clickhouse-keeper</storage_path>
873
<tcp_port>2181</tcp_port>
874
</keeper_server>
875
<listen_host>::</listen_host>
876
<listen_host>0.0.0.0</listen_host>
877
<listen_try>1</listen_try>
878
<logger>
879
<console>1</console>
880
<level>information</level>
881
</logger>
882
<max_connections>4096</max_connections>
883
</clickhouse>
884
01-keeper-02-readiness.xml: |
885
<!-- IMPORTANT -->
886
<!-- This file is auto-generated -->
887
<!-- Do not edit this file - all changes would be lost -->
888
<!-- Edit appropriate template in the following folder: -->
889
<!-- deploy/builder/templates-config -->
890
<!-- IMPORTANT -->
891
<clickhouse>
892
<keeper_server>
893
<http_control>
894
<port>9182</port>
895
<readiness>
896
<endpoint>/ready</endpoint>
897
</readiness>
898
</http_control>
899
</keeper_server>
900
</clickhouse>
901
01-keeper-03-enable-reconfig.xml: |-
902
<!-- IMPORTANT -->
903
<!-- This file is auto-generated -->
904
<!-- Do not edit this file - all changes would be lost -->
905
<!-- Edit appropriate template in the following folder: -->
906
<!-- deploy/builder/templates-config -->
907
<!-- IMPORTANT -->
908
<clickhouse>
909
<keeper_server>
910
<enable_reconfiguration>false</enable_reconfiguration>
911
</keeper_server>
912
</clickhouse>
913
keeperTemplatesdFiles:
914
readme: |-
915
Templates in this folder are packaged with an operator and available via 'useTemplate'
916
keeperUsersdFiles: null
917
# additionalResources -- list of additional resources to create (processed via `tpl` function),
918
# useful for create ClickHouse clusters together with clickhouse-operator.
919
# check `kubectl explain chi` for details
920
additionalResources: []
921
# - |
922
# apiVersion: v1
923
# kind: ConfigMap
924
# metadata:
925
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-cm
926
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
927
# - |
928
# apiVersion: v1
929
# kind: Secret
930
# metadata:
931
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-s
932
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
933
# stringData:
934
# mykey: my-value
935
# - |
936
# apiVersion: clickhouse.altinity.com/v1
937
# kind: ClickHouseInstallation
938
# metadata:
939
# name: {{ include "altinity-clickhouse-operator.fullname" . }}-chi
940
# namespace: {{ include "altinity-clickhouse-operator.namespace" . }}
941
# spec:
942
# configuration:
943
# clusters:
944
# - name: default
945
# layout:
946
# shardsCount: 1
947
948
dashboards:
949
# dashboards.enabled -- provision grafana dashboards as configMaps (can be synced by grafana dashboards sidecar https://github.com/grafana/helm-charts/blob/grafana-8.3.4/charts/grafana/values.yaml#L778 )
950
enabled: false
951
# dashboards.additionalLabels -- labels to add to a secret with dashboards
952
additionalLabels:
953
# dashboards.additionalLabels.grafana_dashboard - will watch when official grafana helm chart sidecar.dashboards.enabled=true
954
grafana_dashboard: ""
955
# dashboards.annotations -- annotations to add to a secret with dashboards
956
annotations:
957
# dashboards.annotations.grafana_folder -- folder where will place dashboards, requires define values in official grafana helm chart sidecar.dashboards.folderAnnotation: grafana_folder
958
grafana_folder: clickhouse-operator
959

The trusted source for open source

Talk to an expert
PrivacyTerms

Product

Chainguard ContainersChainguard LibrariesChainguard VMsChainguard OS PackagesChainguard ActionsChainguard Agent SkillsIntegrationsPricing
© 2026 Chainguard, Inc. All Rights Reserved.
Chainguard® and the Chainguard logo are registered trademarks of Chainguard, Inc. in the United States and/or other countries.
The other respective trademarks mentioned on this page are owned by the respective companies and use of them does not imply any affiliation or endorsement.