DirectorySecurity AdvisoriesPricing
Sign in
Directory
kube-prometheus-stack logoHELM

kube-prometheus-stack

Helm chart
Last changed
Request a free trial

Contact our team to test out this Helm chart and related images for free. Please also indicate any other images you would like to evaluate.

Overview
Chart versions
Default values
Chart metadata
Images

Tag:

1
# Default values for kube-prometheus-stack.
2
# This is a YAML-formatted file.
3
# Declare variables to be passed into your templates.
4
5
## Provide a name in place of kube-prometheus-stack for `app:` labels
6
##
7
nameOverride: ""
8
## Override the deployment namespace
9
##
10
namespaceOverride: ""
11
## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6
12
##
13
kubeTargetVersionOverride: ""
14
## Allow kubeVersion to be overridden while creating the ingress
15
##
16
kubeVersionOverride: ""
17
## Provide a name to substitute for the full names of resources
18
##
19
fullnameOverride: ""
20
## Labels to apply to all resources
21
##
22
commonLabels: {}
23
# scmhash: abc123
24
# myLabel: aakkmd
25
26
## Install Prometheus Operator CRDs
27
##
28
crds:
29
enabled: true
30
## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
31
## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
32
## It deploys a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
33
## This feature is in preview, off by default and may change in the future.
34
upgradeJob:
35
enabled: false
36
forceConflicts: false
37
image:
38
busybox:
39
registry: cgr.dev
40
repository: chainguard-private/busybox
41
tag: latest
42
sha: sha256:a4df82542624593a943071c90310653381295bb95494ff58a4650101aefeafaf
43
pullPolicy: IfNotPresent
44
kubectl:
45
registry: cgr.dev
46
repository: chainguard-private/kubectl
47
tag: latest
48
sha: sha256:2ad180bbbcc8d809f3a9ab75202adeddec89ee5554a46aff8ed5d0429f18a151
49
pullPolicy: IfNotPresent
50
env: {}
51
## Define resources requests and limits for single Pods.
52
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
53
##
54
resources: {}
55
## Additional volumes
56
##
57
extraVolumes: []
58
## Additional volume mounts
59
##
60
extraVolumeMounts: []
61
## Define which Nodes the Pods are scheduled on.
62
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
63
##
64
nodeSelector: {}
65
## Assign custom affinity rules to the upgrade-crd job
66
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
67
##
68
affinity: {}
69
# nodeAffinity:
70
# requiredDuringSchedulingIgnoredDuringExecution:
71
# nodeSelectorTerms:
72
# - matchExpressions:
73
# - key: kubernetes.io/e2e-az-name
74
# operator: In
75
# values:
76
# - e2e-az1
77
# - e2e-az2
78
79
## If specified, the pod's tolerations.
80
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
81
##
82
tolerations: []
83
# - key: "key"
84
# operator: "Equal"
85
# value: "value"
86
# effect: "NoSchedule"
87
88
## If specified, the pod's topology spread constraints.
89
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
90
##
91
topologySpreadConstraints: []
92
# - maxSkew: 1
93
# topologyKey: topology.kubernetes.io/zone
94
# whenUnsatisfiable: DoNotSchedule
95
# labelSelector:
96
# matchLabels:
97
# app: alertmanager
98
99
# ## Labels to add to the upgrade-crd job
100
# ##
101
labels: {}
102
## Annotations to add to the upgrade-crd job
103
##
104
annotations: {}
105
## Labels to add to the upgrade-crd pod
106
##
107
podLabels: {}
108
## Annotations to add to the upgrade-crd pod
109
##
110
podAnnotations: {}
111
## Service account for upgrade crd job to use.
112
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
113
##
114
serviceAccount:
115
create: true
116
name: ""
117
annotations: {}
118
labels: {}
119
automountServiceAccountToken: true
120
## Automounting API credentials for upgrade crd job pod.
121
##
122
automountServiceAccountToken: true
123
## Container-specific security context configuration
124
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
125
##
126
containerSecurityContext:
127
allowPrivilegeEscalation: false
128
readOnlyRootFilesystem: true
129
capabilities:
130
drop:
131
- ALL
132
## SecurityContext holds pod-level security attributes and common container settings.
133
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
134
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
135
##
136
podSecurityContext:
137
fsGroup: 65534
138
runAsGroup: 65534
139
runAsNonRoot: true
140
runAsUser: 65534
141
seccompProfile:
142
type: RuntimeDefault
143
## Custom rules to override "for" and "severity" in defaultRules
144
##
145
customRules: {}
146
# AlertmanagerFailedReload:
147
# for: 3m
148
# AlertmanagerMembersInconsistent:
149
# for: 5m
150
# severity: "warning"
151
152
## Create default rules for monitoring the cluster
153
##
154
defaultRules:
155
create: true
156
rules:
157
alertmanager: true
158
etcd: true
159
configReloaders: true
160
general: true
161
k8sContainerCpuUsageSecondsTotal: true
162
k8sContainerMemoryCache: true
163
k8sContainerMemoryRss: true
164
k8sContainerMemorySwap: true
165
k8sContainerResource: true
166
k8sContainerMemoryWorkingSetBytes: true
167
k8sPodOwner: true
168
kubeApiserverAvailability: true
169
kubeApiserverBurnrate: true
170
kubeApiserverHistogram: true
171
kubeApiserverSlos: true
172
kubeControllerManager: true
173
kubelet: true
174
kubeProxy: true
175
kubePrometheusGeneral: true
176
kubePrometheusNodeRecording: true
177
kubernetesApps: true
178
kubernetesResources: true
179
kubernetesStorage: true
180
kubernetesSystem: true
181
kubeSchedulerAlerting: true
182
kubeSchedulerRecording: true
183
kubeStateMetrics: true
184
network: true
185
node: true
186
nodeExporterAlerting: true
187
nodeExporterRecording: true
188
prometheus: true
189
prometheusOperator: true
190
windows: true
191
# Defines the operator for namespace selection in rules
192
# Use "=~" to include namespaces matching the pattern (default)
193
# Use "!~" to exclude namespaces matching the pattern
194
appNamespacesOperator: "=~"
195
## Reduce app namespace alert scope
196
appNamespacesTarget: ".*"
197
## Set keep_firing_for for all alerts
198
keepFiringFor: ""
199
## Labels for default rules
200
labels: {}
201
## Annotations for default rules
202
annotations: {}
203
## Additional labels for PrometheusRule alerts
204
additionalRuleLabels: {}
205
## Additional annotations for PrometheusRule alerts
206
additionalRuleAnnotations: {}
207
## Additional labels for specific PrometheusRule alert groups
208
additionalRuleGroupLabels:
209
alertmanager: {}
210
etcd: {}
211
configReloaders: {}
212
general: {}
213
k8sContainerCpuUsageSecondsTotal: {}
214
k8sContainerMemoryCache: {}
215
k8sContainerMemoryRss: {}
216
k8sContainerMemorySwap: {}
217
k8sContainerResource: {}
218
k8sPodOwner: {}
219
kubeApiserverAvailability: {}
220
kubeApiserverBurnrate: {}
221
kubeApiserverHistogram: {}
222
kubeApiserverSlos: {}
223
kubeControllerManager: {}
224
kubelet: {}
225
kubeProxy: {}
226
kubePrometheusGeneral: {}
227
kubePrometheusNodeRecording: {}
228
kubernetesApps: {}
229
kubernetesResources: {}
230
kubernetesStorage: {}
231
kubernetesSystem: {}
232
kubeSchedulerAlerting: {}
233
kubeSchedulerRecording: {}
234
kubeStateMetrics: {}
235
network: {}
236
node: {}
237
nodeExporterAlerting: {}
238
nodeExporterRecording: {}
239
prometheus: {}
240
prometheusOperator: {}
241
## Additional annotations for specific PrometheusRule alert groups
242
additionalRuleGroupAnnotations:
243
alertmanager: {}
244
etcd: {}
245
configReloaders: {}
246
general: {}
247
k8sContainerCpuUsageSecondsTotal: {}
248
k8sContainerMemoryCache: {}
249
k8sContainerMemoryRss: {}
250
k8sContainerMemorySwap: {}
251
k8sContainerResource: {}
252
k8sPodOwner: {}
253
kubeApiserverAvailability: {}
254
kubeApiserverBurnrate: {}
255
kubeApiserverHistogram: {}
256
kubeApiserverSlos: {}
257
kubeControllerManager: {}
258
kubelet: {}
259
kubeProxy: {}
260
kubePrometheusGeneral: {}
261
kubePrometheusNodeRecording: {}
262
kubernetesApps: {}
263
kubernetesResources: {}
264
kubernetesStorage: {}
265
kubernetesSystem: {}
266
kubeSchedulerAlerting: {}
267
kubeSchedulerRecording: {}
268
kubeStateMetrics: {}
269
network: {}
270
node: {}
271
nodeExporterAlerting: {}
272
nodeExporterRecording: {}
273
prometheus: {}
274
prometheusOperator: {}
275
additionalAggregationLabels: []
276
## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
277
runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
278
node:
279
fsSelector: 'fstype!=""'
280
# fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"'
281
## Disabled PrometheusRule alerts
282
disabled: {}
283
# KubeAPIDown: true
284
# NodeRAIDDegraded: true
285
## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
286
##
287
# additionalPrometheusRules: []
288
# - name: my-rule-file
289
# groups:
290
# - name: my_group
291
# rules:
292
# - record: my_record
293
# expr: 100 * my_record
294
295
## Provide custom recording or alerting rules to be deployed into the cluster.
296
##
297
additionalPrometheusRulesMap: {}
298
# rule-name:
299
# groups:
300
# - name: my_group
301
# rules:
302
# - record: my_record
303
# expr: 100 * my_record
304
305
##
306
global:
307
rbac:
308
create: true
309
## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
310
## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
311
createAggregateClusterRoles: false
312
## Global image registry to use if it needs to be overridden for some specific use cases (e.g. local registries, custom images, ...)
313
##
314
imageRegistry: ""
315
## Reference to one or more secrets to be used when pulling images
316
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
317
##
318
imagePullSecrets: []
319
# - name: "image-pull-secret"
320
# or
321
# - "image-pull-secret"
322
windowsMonitoring:
323
## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
324
enabled: false
325
## Configuration for prometheus-windows-exporter
326
## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
327
##
328
prometheus-windows-exporter:
329
## Enable ServiceMonitor and set Kubernetes label to use as a job label
330
##
331
prometheus:
332
monitor:
333
enabled: true
334
jobLabel: jobLabel
335
releaseLabel: true
336
## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards
337
##
338
podLabels:
339
jobLabel: windows-exporter
340
## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards
341
##
342
config: |-
343
collectors:
344
enabled: '[defaults],memory,container'
345
## Configuration for alertmanager
346
## ref: https://prometheus.io/docs/alerting/alertmanager/
347
##
348
alertmanager:
349
## Deploy alertmanager
350
##
351
enabled: true
352
# Optional: Override the namespace where Alertmanager will be deployed.
353
namespaceOverride: ""
354
## Annotations for Alertmanager
355
##
356
annotations: {}
357
## Additional labels for Alertmanager
358
##
359
additionalLabels: {}
360
## API that Prometheus will use to communicate with alertmanager. Possible values are v1, v2
361
##
362
apiVersion: v2
363
## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
364
##
365
enableFeatures: []
366
## Create dashboard configmap even if alertmanager deployment has been disabled
367
##
368
forceDeployDashboards: false
369
## Network Policy configuration
370
##
371
networkPolicy:
372
# -- Enable network policy for Alertmanager
373
enabled: false
374
# -- Define policy types. If egress is enabled, both Ingress and Egress will be used
375
# Valid values are ["Ingress"] or ["Ingress", "Egress"]
376
##
377
policyTypes:
378
- Ingress
379
# -- Gateway (formerly ingress controller) configuration
380
##
381
gateway:
382
# -- Gateway namespace
383
##
384
namespace: ""
385
# -- Gateway pod labels
386
##
387
podLabels: {}
388
# app.kubernetes.io/name: ingress-nginx
389
# -- Additional custom ingress rules
390
##
391
additionalIngress: []
392
# - from:
393
# - namespaceSelector:
394
# matchLabels:
395
# name: another-namespace
396
# podSelector:
397
# matchLabels:
398
# app: another-app
399
# - from:
400
# - podSelector:
401
# matchLabels:
402
# app.kubernetes.io/name: loki
403
# ports:
404
# - port: 9093
405
# protocol: TCP
406
407
# -- Configure egress rules
408
##
409
egress:
410
# -- Enable egress rules. When enabled, policyTypes will include Egress
411
##
412
enabled: false
413
# -- Custom egress rules
414
##
415
rules: []
416
# - to:
417
# - namespaceSelector: {}
418
# podSelector:
419
# matchLabels:
420
# name: smtp-relay
421
# ports:
422
# - port: 25
423
# protocol: TCP
424
# -- Enable rules for alertmanager cluster traffic
425
##
426
enableClusterRules: true
427
# -- Configure monitoring component rules
428
##
429
monitoringRules:
430
# -- Enable ingress from Prometheus
431
##
432
prometheus: true
433
# -- Enable ingress for config reloader metrics
434
##
435
configReloader: true
436
## Service account for Alertmanager to use.
437
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
438
##
439
serviceAccount:
440
create: true
441
name: ""
442
annotations: {}
443
automountServiceAccountToken: true
444
## Configure pod disruption budgets for Alertmanager
445
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
446
##
447
podDisruptionBudget:
448
enabled: false
449
minAvailable: 1
450
# maxUnavailable: ""
451
unhealthyPodEvictionPolicy: AlwaysAllow
452
## Enable vertical pod autoscaler support for Alertmanager
453
## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
454
##
455
verticalPodAutoscaler:
456
enabled: false
457
# Recommender responsible for generating recommendation for the object.
458
# List should be empty (then the default recommender will generate the recommendation)
459
# or contain exactly one recommender.
460
# recommenders:
461
# - name: custom-recommender-performance
462
463
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
464
controlledResources: []
465
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
466
# controlledValues: RequestsAndLimits
467
468
# Define the max allowed resources for the pod
469
maxAllowed: {}
470
# cpu: 200m
471
# memory: 100Mi
472
# Define the min allowed resources for the pod
473
minAllowed: {}
474
# cpu: 200m
475
# memory: 100Mi
476
477
updatePolicy:
478
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
479
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
480
updateMode: Recreate
481
## Alertmanager configuration directives
482
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
483
## https://prometheus.io/webtools/alerting/routing-tree-editor/
484
##
485
config:
486
global:
487
resolve_timeout: 5m
488
inhibit_rules:
489
- source_matchers:
490
- 'severity = critical'
491
target_matchers:
492
- 'severity =~ warning|info'
493
equal:
494
- 'namespace'
495
- 'alertname'
496
- source_matchers:
497
- 'severity = warning'
498
target_matchers:
499
- 'severity = info'
500
equal:
501
- 'namespace'
502
- 'alertname'
503
- source_matchers:
504
- 'alertname = InfoInhibitor'
505
target_matchers:
506
- 'severity = info'
507
equal:
508
- 'namespace'
509
- target_matchers:
510
- 'alertname = InfoInhibitor'
511
route:
512
group_by: ['namespace']
513
group_wait: 30s
514
group_interval: 5m
515
repeat_interval: 12h
516
receiver: 'null'
517
routes:
518
- receiver: 'null'
519
matchers:
520
- alertname = "Watchdog"
521
receivers:
522
- name: 'null'
523
templates:
524
- '/etc/alertmanager/config/*.tmpl'
525
## Alertmanager configuration directives (as string type, preferred over the config hash map)
526
## stringConfig will be used only if tplConfig is true
527
## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
528
## https://prometheus.io/webtools/alerting/routing-tree-editor/
529
##
530
stringConfig: ""
531
## Pass the Alertmanager configuration directives through Helm's templating
532
## engine. If the Alertmanager configuration contains Alertmanager templates,
533
## they'll need to be properly escaped so that they are not interpreted by
534
## Helm
535
## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
536
## https://prometheus.io/docs/alerting/configuration/#tmpl_string
537
## https://prometheus.io/docs/alerting/notifications/
538
## https://prometheus.io/docs/alerting/notification_examples/
539
tplConfig: false
540
## Alertmanager template files to format alerts
541
## By default, templateFiles are placed in /etc/alertmanager/config/ and if
542
## they have a .tmpl file suffix will be loaded. See config.templates above
543
## to change, add other suffixes. If adding other suffixes, be sure to update
544
## config.templates above to include those suffixes.
545
## ref: https://prometheus.io/docs/alerting/notifications/
546
## https://prometheus.io/docs/alerting/notification_examples/
547
##
548
templateFiles: {}
549
#
550
## An example template:
551
# template_1.tmpl: |-
552
# {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
553
#
554
# {{ define "slack.myorg.text" }}
555
# {{- $root := . -}}
556
# {{ range .Alerts }}
557
# *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
558
# *Cluster:* {{ template "cluster" $root }}
559
# *Description:* {{ .Annotations.description }}
560
# *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
561
# *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
562
# *Details:*
563
# {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
564
# {{ end }}
565
# {{ end }}
566
# {{ end }}
567
568
ingress:
569
enabled: false
570
ingressClassName: ""
571
annotations: {}
572
labels: {}
573
## Override ingress to a different defined port on the service
574
# servicePort: 8081
575
## Override ingress to a different service then the default, this is useful if you need to
576
## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
577
# serviceName: kube-prometheus-stack-alertmanager-0
578
579
## Hosts must be provided if Ingress is enabled.
580
##
581
hosts: []
582
# - alertmanager.domain.com
583
584
## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
585
##
586
paths: []
587
# - /
588
589
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
590
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
591
# pathType: ImplementationSpecific
592
593
## TLS configuration for Alertmanager Ingress
594
## Secret must be manually created in the namespace
595
##
596
tls: []
597
# - secretName: alertmanager-general-tls
598
# hosts:
599
# - alertmanager.example.com
600
# -- BETA: Configure the gateway routes for the chart here.
601
# More routes can be added by adding a dictionary key like the 'main' route.
602
# Be aware that this is an early beta of this feature,
603
# kube-prometheus-stack does not guarantee this works and is subject to change.
604
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
605
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
606
route:
607
main:
608
# -- Enables or disables the route
609
enabled: false
610
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
611
apiVersion: gateway.networking.k8s.io/v1
612
# -- Set the route kind
613
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
614
kind: HTTPRoute
615
annotations: {}
616
labels: {}
617
hostnames: []
618
# - my-filter.example.com
619
parentRefs: []
620
# - name: acme-gw
621
622
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
623
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
624
## matches, filters and additionalRules will be ignored if this is set to true. Be are
625
httpsRedirect: false
626
matches:
627
- path:
628
type: PathPrefix
629
value: /
630
## Filters define the filters that are applied to requests that match this rule.
631
filters: []
632
## Session persistence configuration for the route rule.
633
sessionPersistence: {}
634
# sessionName: route
635
# type: Cookie
636
# absoluteTimeout: 12h
637
# cookieConfig:
638
# lifetimeType: Permanent
639
640
## Additional custom rules that can be added to the route
641
additionalRules: []
642
## Configuration for Alertmanager secret
643
##
644
secret:
645
annotations: {}
646
## Configuration for creating an Ingress that will map to each Alertmanager replica service
647
## alertmanager.servicePerReplica must be enabled
648
##
649
ingressPerReplica:
650
enabled: false
651
ingressClassName: ""
652
annotations: {}
653
labels: {}
654
## Final form of the hostname for each per replica ingress is
655
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
656
##
657
## Prefix for the per replica ingress that will have `-$replicaNumber`
658
## appended to the end
659
hostPrefix: ""
660
## Domain that will be used for the per replica ingress
661
hostDomain: ""
662
## Paths to use for ingress rules
663
##
664
paths: []
665
# - /
666
667
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
668
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
669
# pathType: ImplementationSpecific
670
671
## Secret name containing the TLS certificate for alertmanager per replica ingress
672
## Secret must be manually created in the namespace
673
tlsSecretName: ""
674
## Separated secret for each per replica Ingress. Can be used together with cert-manager
675
##
676
tlsSecretPerReplica:
677
enabled: false
678
## Final form of the secret for each per replica ingress is
679
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
680
##
681
prefix: "alertmanager"
682
## Configuration for Alertmanager service
683
##
684
service:
685
enabled: true
686
annotations: {}
687
labels: {}
688
clusterIP: ""
689
ipDualStack:
690
enabled: false
691
ipFamilies: ["IPv6", "IPv4"]
692
ipFamilyPolicy: "PreferDualStack"
693
## Port for Alertmanager Service to listen on
694
##
695
port: 9093
696
## Port for Alertmanager cluster communication
697
##
698
# clusterPort: 9094
699
## To be used with a proxy extraContainer port
700
##
701
targetPort: 9093
702
## Port to expose on each node
703
## Only used if service.type is 'NodePort'
704
##
705
nodePort: 30903
706
## List of IP addresses at which the Prometheus server service is available
707
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
708
##
709
710
## Additional ports to open for Alertmanager service
711
##
712
additionalPorts: []
713
# - name: oauth-proxy
714
# port: 8081
715
# targetPort: 8081
716
# - name: oauth-metrics
717
# port: 8082
718
# targetPort: 8082
719
720
externalIPs: []
721
loadBalancerIP: ""
722
loadBalancerSourceRanges: []
723
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
724
##
725
externalTrafficPolicy: Cluster
726
## If you want to make sure that connections from a particular client are passed to the same Pod each time
727
## Accepts 'ClientIP' or 'None'
728
##
729
sessionAffinity: None
730
## If you want to modify the ClientIP sessionAffinity timeout
731
## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
732
##
733
sessionAffinityConfig:
734
clientIP:
735
timeoutSeconds: 10800
736
## Service type
737
##
738
type: ClusterIP
739
## Configuration for creating a separate Service for each statefulset Alertmanager replica
740
##
741
servicePerReplica:
742
enabled: false
743
annotations: {}
744
## Port for Alertmanager Service per replica to listen on
745
##
746
port: 9093
747
## To be used with a proxy extraContainer port
748
targetPort: 9093
749
## Port to expose on each node
750
## Only used if servicePerReplica.type is 'NodePort'
751
##
752
nodePort: 30904
753
## Loadbalancer source IP ranges
754
## Only used if servicePerReplica.type is "LoadBalancer"
755
loadBalancerSourceRanges: []
756
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
757
##
758
externalTrafficPolicy: Cluster
759
## Service type
760
##
761
type: ClusterIP
762
## Configuration for creating a ServiceMonitor for AlertManager
763
##
764
serviceMonitor:
765
## If true, a ServiceMonitor will be created for the AlertManager service.
766
##
767
selfMonitor: true
768
## Scrape interval. If not set, the Prometheus default scrape interval is used.
769
##
770
interval: ""
771
## Additional labels
772
##
773
additionalLabels: {}
774
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
775
##
776
sampleLimit: 0
777
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
778
##
779
targetLimit: 0
780
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
781
##
782
labelLimit: 0
783
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
784
##
785
labelNameLengthLimit: 0
786
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
787
##
788
labelValueLengthLimit: 0
789
## proxyUrl: URL of a proxy that should be used for scraping.
790
##
791
proxyUrl: ""
792
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
793
scheme: ""
794
## enableHttp2: Whether to enable HTTP2.
795
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#endpoint
796
enableHttp2: true
797
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
798
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
799
tlsConfig: {}
800
bearerTokenFile:
801
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
802
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
803
##
804
metricRelabelings: []
805
# - action: keep
806
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
807
# sourceLabels: [__name__]
808
809
## RelabelConfigs to apply to samples before scraping
810
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
811
##
812
relabelings: []
813
# - sourceLabels: [__meta_kubernetes_pod_node_name]
814
# separator: ;
815
# regex: ^(.*)$
816
# targetLabel: nodename
817
# replacement: $1
818
# action: replace
819
820
## Additional Endpoints
821
##
822
additionalEndpoints: []
823
# - port: oauth-metrics
824
# path: /metrics
825
## Settings affecting alertmanagerSpec
826
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerspec
827
##
828
alertmanagerSpec:
829
## Statefulset's persistent volume claim retention policy
830
## whenDeleted and whenScaled determine whether
831
## statefulset's PVCs are deleted (true) or retained (false)
832
## on scaling down and deleting statefulset, respectively.
833
## Requires Kubernetes version 1.27.0+.
834
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
835
persistentVolumeClaimRetentionPolicy: {}
836
# whenDeleted: Retain
837
# whenScaled: Retain
838
839
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
840
## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
841
##
842
podMetadata: {}
843
##
844
serviceName:
845
## Image of Alertmanager
846
##
847
image:
848
registry: cgr.dev
849
repository: chainguard-private/prometheus-alertmanager
850
tag: latest
851
sha: sha256:d7d7b666653d76212c1304266d09a8266407becda346bccc2f7a296842f497cd
852
pullPolicy: IfNotPresent
853
## If true then the user will be responsible to provide a secret with alertmanager configuration
854
## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
855
##
856
useExistingSecret: false
857
## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
858
## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
859
##
860
secrets: []
861
## If false then the user will opt out of automounting API credentials.
862
##
863
automountServiceAccountToken: true
864
## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
865
## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
866
##
867
configMaps: []
868
## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
869
## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
870
##
871
# configSecret:
872
873
## WebTLSConfig defines the TLS parameters for HTTPS
874
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerwebspec
875
web: {}
876
## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
877
##
878
alertmanagerConfigSelector: {}
879
## Example which selects all alertmanagerConfig resources
880
## with label "alertconfig" with values any of "example-config" or "example-config-2"
881
# alertmanagerConfigSelector:
882
# matchExpressions:
883
# - key: alertconfig
884
# operator: In
885
# values:
886
# - example-config
887
# - example-config-2
888
#
889
## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
890
# alertmanagerConfigSelector:
891
# matchLabels:
892
# role: example-config
893
894
## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
895
##
896
alertmanagerConfigNamespaceSelector: {}
897
## Example which selects all namespaces
898
## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
899
# alertmanagerConfigNamespaceSelector:
900
# matchExpressions:
901
# - key: alertmanagerconfig
902
# operator: In
903
# values:
904
# - example-namespace
905
# - example-namespace-2
906
907
## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
908
# alertmanagerConfigNamespaceSelector:
909
# matchLabels:
910
# alertmanagerconfig: enabled
911
912
## AlermanagerConfig to be used as top level configuration
913
##
914
alertmanagerConfiguration: {}
915
## Example with select a global alertmanagerconfig
916
# alertmanagerConfiguration:
917
# name: global-alertmanager-Configuration
918
919
## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
920
##
921
alertmanagerConfigMatcherStrategy: {}
922
## Example with use OnNamespace strategy
923
# alertmanagerConfigMatcherStrategy:
924
# type: OnNamespace
925
926
## Additional command line arguments to pass to Alertmanager (in addition to those generated by the chart)
927
additionalArgs: []
928
## Define Log Format
929
# Use logfmt (default) or json logging
930
logFormat: logfmt
931
## Log level for Alertmanager to be configured with.
932
##
933
logLevel: info
934
## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
935
## running cluster equal to the expected size.
936
replicas: 1
937
## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
938
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
939
##
940
retention: 120h
941
## Storage is the definition of how storage will be used by the Alertmanager instances.
942
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
943
##
944
storage: {}
945
# volumeClaimTemplate:
946
# spec:
947
# storageClassName: gluster
948
# accessModes: ["ReadWriteOnce"]
949
# resources:
950
# requests:
951
# storage: 50Gi
952
# selector: {}
953
954
## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
955
##
956
externalUrl:
957
## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
958
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
959
##
960
routePrefix: /
961
## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
962
scheme: ""
963
## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
964
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
965
tlsConfig: {}
966
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
967
##
968
paused: false
969
## Define which Nodes the Pods are scheduled on.
970
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
971
##
972
nodeSelector: {}
973
## Define resources requests and limits for single Pods.
974
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
975
##
976
resources: {}
977
# requests:
978
# memory: 400Mi
979
980
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
981
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
982
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
983
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
984
##
985
podAntiAffinity: "soft"
986
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
987
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
988
##
989
podAntiAffinityTopologyKey: kubernetes.io/hostname
990
## Assign custom affinity rules to the alertmanager instance
991
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
992
##
993
affinity: {}
994
# nodeAffinity:
995
# requiredDuringSchedulingIgnoredDuringExecution:
996
# nodeSelectorTerms:
997
# - matchExpressions:
998
# - key: kubernetes.io/e2e-az-name
999
# operator: In
1000
# values:
1001
# - e2e-az1
1002
# - e2e-az2
1003
1004
## If specified, the pod's tolerations.
1005
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
1006
##
1007
tolerations: []
1008
# - key: "key"
1009
# operator: "Equal"
1010
# value: "value"
1011
# effect: "NoSchedule"
1012
1013
## If specified, the pod's topology spread constraints.
1014
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
1015
##
1016
topologySpreadConstraints: []
1017
# - maxSkew: 1
1018
# topologyKey: topology.kubernetes.io/zone
1019
# whenUnsatisfiable: DoNotSchedule
1020
# labelSelector:
1021
# matchLabels:
1022
# app: alertmanager
1023
1024
## SecurityContext holds pod-level security attributes and common container settings.
1025
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
1026
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
1027
##
1028
securityContext:
1029
runAsGroup: 2000
1030
runAsNonRoot: true
1031
runAsUser: 1000
1032
fsGroup: 2000
1033
seccompProfile:
1034
type: RuntimeDefault
1035
## Use the host's user namespace for Alertmanager pods.
1036
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
1037
hostUsers: ~
1038
## DNS configuration for Alertmanager.
1039
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
1040
dnsConfig: {}
1041
## DNS policy for Alertmanager.
1042
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
1043
dnsPolicy: ""
1044
## Enable hostNetwork for Alertmanager.
1045
hostNetwork: false
1046
## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
1047
## Note this is only for the Alertmanager UI, not the gossip communication.
1048
##
1049
listenLocal: false
1050
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
1051
##
1052
containers: []
1053
# containers:
1054
# - name: oauth-proxy
1055
# image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
1056
# args:
1057
# - --upstream=http://127.0.0.1:9093
1058
# - --http-address=0.0.0.0:8081
1059
# - --metrics-address=0.0.0.0:8082
1060
# - ...
1061
# ports:
1062
# - containerPort: 8081
1063
# name: oauth-proxy
1064
# protocol: TCP
1065
# - containerPort: 8082
1066
# name: oauth-metrics
1067
# protocol: TCP
1068
# resources: {}
1069
1070
# Additional volumes on the output StatefulSet definition.
1071
volumes: []
1072
# Additional VolumeMounts on the output StatefulSet definition.
1073
volumeMounts: []
1074
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
1075
## (permissions, dir tree) on mounted volumes before starting prometheus
1076
initContainers: []
1077
## Priority class assigned to the Pods
1078
##
1079
priorityClassName: ""
1080
## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
1081
##
1082
additionalPeers: []
1083
## PortName to use for Alert Manager.
1084
##
1085
portName: "http-web"
1086
## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
1087
##
1088
clusterAdvertiseAddress: false
1089
## clusterGossipInterval determines interval between gossip attempts.
1090
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1091
clusterGossipInterval: ""
1092
## clusterPeerTimeout determines timeout for cluster peering.
1093
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1094
clusterPeerTimeout: ""
1095
## clusterPushpullInterval determines interval between pushpull attempts.
1096
## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1097
clusterPushpullInterval: ""
1098
## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster.
1099
clusterLabel: ""
1100
## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
1101
## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
1102
forceEnableClusterMode: false
1103
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
1104
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
1105
minReadySeconds: 0
1106
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
1107
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
1108
podManagementPolicy: ""
1109
## Update strategy for the StatefulSet.
1110
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
1111
updateStrategy: {}
1112
# type: RollingUpdate
1113
# rollingUpdate:
1114
# maxUnavailable: 1
1115
1116
## Duration in seconds the pod needs to terminate gracefully.
1117
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
1118
terminationGracePeriodSeconds: ~
1119
## Additional configuration which is not covered by the properties above. (passed through tpl)
1120
additionalConfig: {}
1121
## Additional configuration which is not covered by the properties above.
1122
## Useful, if you need advanced templating inside alertmanagerSpec.
1123
## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl)
1124
additionalConfigString: ""
1125
## ExtraSecret can be used to store various data in an extra secret
1126
## (use it for example to store hashed basic auth credentials)
1127
extraSecret:
1128
## if not set, name will be auto generated
1129
# name: ""
1130
annotations: {}
1131
data: {}
1132
# auth: |
1133
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
1134
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
1135
## Using default values from https://github.com/grafana-community/helm-charts/blob/main/charts/grafana/values.yaml
1136
##
1137
grafana:
1138
enabled: true
1139
namespaceOverride: ""
1140
## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
1141
##
1142
forceDeployDatasources: false
1143
## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
1144
##
1145
forceDeployDashboards: false
1146
## Deploy default dashboards
1147
##
1148
defaultDashboardsEnabled: true
1149
## Deploy GrafanaDashboard CRDs that reference dashboards from ConfigMaps when grafana-operator is used
1150
## These settings control how dashboards are integrated with the Grafana Operator
1151
## Note: End user still need to create is own kind: GrafanaDataSource for Prometheus
1152
## eg:
1153
## apiVersion: grafana.integreatly.org/v1beta1
1154
## kind: GrafanaDatasource
1155
## metadata:
1156
## name: prometheus
1157
## annotations: {}
1158
## spec:
1159
## allowCrossNamespaceImport: true
1160
## instanceSelector:
1161
## matchLabels:
1162
## app: grafana
1163
## datasource:
1164
## name: prometheus
1165
## type: prometheus
1166
## access: proxy
1167
## url: http://prometheus-operated.prometheus-stack.svc.cluster.local:9090
1168
## isDefault: true
1169
## jsonData:
1170
## "tlsSkipVerify": true
1171
## "timeInterval": "5s"
1172
##
1173
operator:
1174
## Enable references to ConfigMaps containing dashboards in GrafanaDashboard CRs
1175
## Set to true to allow dashboards to be loaded from ConfigMap references
1176
dashboardsConfigMapRefEnabled: false
1177
## Annotations for GrafanaDashboard Cr
1178
##
1179
annotations: {}
1180
## Labels that should be matched kind: Grafana instance
1181
## Example: { app: grafana, category: dashboard }
1182
##
1183
matchLabels: {}
1184
## How frequently the operator should resync resources (in duration format)
1185
## Controls how often dashboards are reconciled by the operator
1186
##
1187
resyncPeriod: 10m
1188
## Which folder contains all dashboards in Grafana
1189
## This folder will be created on the Root level
1190
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1191
##
1192
folder: General
1193
## Which UID of the target folder contains all dashboards in Grafana
1194
## This allows you to use subfolder hierarchy
1195
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1196
##
1197
folderUID: null
1198
## Which GrafanaFolder reference contains all dashboards in Grafana
1199
## This allows you to use subfolder hierarchy.
1200
## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1201
##
1202
folderRef: null
1203
## Timezone for the default dashboards
1204
## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
1205
##
1206
defaultDashboardsTimezone: utc
1207
## Editable flag for the default dashboards
1208
##
1209
defaultDashboardsEditable: true
1210
## Default interval for Grafana dashboards
1211
##
1212
defaultDashboardsInterval: 1m
1213
# Administrator credentials when not using an existing secret (see below)
1214
adminUser: admin
1215
# adminPassword: strongpassword
1216
1217
# Use an existing secret for the admin user.
1218
admin:
1219
## Name of the secret. Can be templated.
1220
existingSecret: ""
1221
userKey: admin-user
1222
passwordKey: admin-password
1223
rbac:
1224
## If true, Grafana PSPs will be created
1225
##
1226
pspEnabled: false
1227
ingress:
1228
## If true, Grafana Ingress will be created
1229
##
1230
enabled: false
1231
## IngressClassName for Grafana Ingress.
1232
## Should be provided if Ingress is enable.
1233
##
1234
# ingressClassName: nginx
1235
1236
## Annotations for Grafana Ingress
1237
##
1238
annotations: {}
1239
# kubernetes.io/ingress.class: nginx
1240
# kubernetes.io/tls-acme: "true"
1241
1242
## Labels to be added to the Ingress
1243
##
1244
labels: {}
1245
## Hostnames.
1246
## Must be provided if Ingress is enable.
1247
##
1248
# hosts:
1249
# - grafana.domain.com
1250
hosts: []
1251
## Path for grafana ingress
1252
path: /
1253
## TLS configuration for grafana Ingress
1254
## Secret must be manually created in the namespace
1255
##
1256
tls: []
1257
# - secretName: grafana-general-tls
1258
# hosts:
1259
# - grafana.example.com
1260
# # To make Grafana persistent (Using Statefulset)
1261
# #
1262
# persistence:
1263
# enabled: true
1264
# type: sts
1265
# storageClassName: "storageClassName"
1266
# accessModes:
1267
# - ReadWriteOnce
1268
# size: 20Gi
1269
# finalizers:
1270
# - kubernetes.io/pvc-protection
1271
serviceAccount:
1272
create: true
1273
autoMount: true
1274
sidecar:
1275
dashboards:
1276
enabled: true
1277
label: grafana_dashboard
1278
labelValue: "1"
1279
# Allow discovery in all namespaces for dashboards
1280
searchNamespace: ALL
1281
# Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels
1282
enableNewTablePanelSyntax: false
1283
## Annotations for Grafana dashboard configmaps
1284
##
1285
annotations: {}
1286
multicluster:
1287
global:
1288
enabled: false
1289
etcd:
1290
enabled: false
1291
provider:
1292
allowUiUpdates: false
1293
datasources:
1294
enabled: true
1295
defaultDatasourceEnabled: true
1296
isDefaultDatasource: true
1297
name: Prometheus
1298
uid: prometheus
1299
## Extra jsonData properties to add to the datasource
1300
# extraJsonData:
1301
# prometheusType: Prometheus
1302
1303
## URL of prometheus datasource
1304
##
1305
# url: http://prometheus-stack-prometheus:9090/
1306
1307
## Prometheus request timeout in seconds
1308
# timeout: 30
1309
1310
## Query parameters to add, as a URL-encoded string,
1311
## to query Prometheus
1312
# customQueryParameters: ""
1313
1314
# If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
1315
# defaultDatasourceScrapeInterval: 15s
1316
1317
## Annotations for Grafana datasource configmaps
1318
##
1319
annotations: {}
1320
## Set method for HTTP to send query to datasource
1321
httpMethod: POST
1322
## Create datasource for each Pod of Prometheus StatefulSet;
1323
## this uses by default the headless service `prometheus-operated` which is
1324
## created by Prometheus Operator. In case you deployed your own Service for your
1325
## Prometheus instance, you can specify it with the field `prometheusServiceName`
1326
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
1327
createPrometheusReplicasDatasources: false
1328
prometheusServiceName: prometheus-operated
1329
label: grafana_datasource
1330
labelValue: "1"
1331
## Field with internal link pointing to existing data source in Grafana.
1332
## Can be provisioned via additionalDataSources
1333
exemplarTraceIdDestinations: {}
1334
# datasourceUid: Jaeger
1335
# traceIdLabelName: trace_id
1336
# urlDisplayLabel: View traces
1337
alertmanager:
1338
enabled: true
1339
name: Alertmanager
1340
uid: alertmanager
1341
handleGrafanaManagedAlerts: false
1342
implementation: prometheus
1343
extraConfigmapMounts: []
1344
# - name: certs-configmap
1345
# mountPath: /etc/grafana/ssl/
1346
# configMap: certs-configmap
1347
# readOnly: true
1348
1349
deleteDatasources: []
1350
# - name: example-datasource
1351
# orgId: 1
1352
1353
## Configure additional grafana datasources (passed through tpl)
1354
## ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
1355
additionalDataSources: []
1356
# - name: prometheus-sample
1357
# access: proxy
1358
# basicAuth: true
1359
# secureJsonData:
1360
# basicAuthPassword: pass
1361
# basicAuthUser: daco
1362
# editable: false
1363
# jsonData:
1364
# tlsSkipVerify: true
1365
# orgId: 1
1366
# type: prometheus
1367
# url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
1368
# version: 1
1369
1370
## Configure additional grafana datasources as a templated string (passed through tpl)
1371
## Useful when you need Helm flow control or templating inside the datasource definition
1372
additionalDataSourcesString: ""
1373
# Flag to mark provisioned data sources for deletion if they are no longer configured.
1374
# It takes no effect if data sources are already listed in the deleteDatasources section.
1375
# ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-configuration-file
1376
prune: false
1377
## Passed to grafana subchart and used by servicemonitor below
1378
##
1379
service:
1380
portName: http-web
1381
ipFamilies: []
1382
ipFamilyPolicy: ""
1383
serviceMonitor:
1384
# If true, a ServiceMonitor CRD is created for a prometheus operator
1385
# https://github.com/prometheus-operator/prometheus-operator
1386
#
1387
enabled: true
1388
# Path to use for scraping metrics. Might be different if server.root_url is set
1389
# in grafana.ini
1390
path: "/metrics"
1391
# namespace: monitoring (defaults to use the namespace this chart is deployed to)
1392
1393
# labels for the ServiceMonitor
1394
labels: {}
1395
# Scrape interval. If not set, the Prometheus default scrape interval is used.
1396
#
1397
interval: ""
1398
scheme: http
1399
tlsConfig: {}
1400
scrapeTimeout: 30s
1401
## RelabelConfigs to apply to samples before scraping
1402
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1403
##
1404
relabelings: []
1405
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1406
# separator: ;
1407
# regex: ^(.*)$
1408
# targetLabel: nodename
1409
# replacement: $1
1410
# action: replace
1411
## Flag to disable all the kubernetes component scrapers
1412
##
1413
kubernetesServiceMonitors:
1414
enabled: true
1415
## Component scraping the kube api server
1416
##
1417
kubeApiServer:
1418
enabled: true
1419
tlsConfig:
1420
serverName: kubernetes
1421
insecureSkipVerify: false
1422
serviceMonitor:
1423
enabled: true
1424
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1425
##
1426
interval: ""
1427
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1428
##
1429
sampleLimit: 0
1430
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1431
##
1432
targetLimit: 0
1433
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1434
##
1435
labelLimit: 0
1436
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1437
##
1438
labelNameLengthLimit: 0
1439
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1440
##
1441
labelValueLengthLimit: 0
1442
## proxyUrl: URL of a proxy that should be used for scraping.
1443
##
1444
proxyUrl: ""
1445
jobLabel: component
1446
selector:
1447
matchLabels:
1448
component: apiserver
1449
provider: kubernetes
1450
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1451
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1452
##
1453
metricRelabelings:
1454
# Drop excessively noisy apiserver buckets.
1455
- action: drop
1456
regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|40|45|50)(\.0)?
1457
sourceLabels:
1458
- __name__
1459
- le
1460
# - action: keep
1461
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1462
# sourceLabels: [__name__]
1463
1464
## RelabelConfigs to apply to samples before scraping
1465
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1466
##
1467
relabelings: []
1468
# - sourceLabels:
1469
# - __meta_kubernetes_namespace
1470
# - __meta_kubernetes_service_name
1471
# - __meta_kubernetes_endpoint_port_name
1472
# action: keep
1473
# regex: default;kubernetes;https
1474
# - targetLabel: __address__
1475
# replacement: kubernetes.default.svc:443
1476
1477
## Additional labels
1478
##
1479
additionalLabels: {}
1480
# foo: bar
1481
1482
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1483
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1484
targetLabels: []
1485
## Override the job label used for the apiserver.
1486
## This allows users who scrape apiserver metrics under a different job name (e.g. k3s-server via PushProx)
1487
## to align the recording rules and alerts with their actual job label.
1488
jobNameOverride: ""
1489
## Component scraping the kubelet and kubelet-hosted cAdvisor
1490
##
1491
kubelet:
1492
enabled: true
1493
namespace: kube-system
1494
# Overrides the job selector in Grafana dashboards and Prometheus rules
1495
# For k3s clusters, change to k3s-server
1496
jobNameOverride: ""
1497
serviceMonitor:
1498
enabled: true
1499
## Enable scraping /metrics from kubelet's service
1500
kubelet: true
1501
## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator.
1502
##
1503
attachMetadata:
1504
node: false
1505
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1506
##
1507
interval: ""
1508
## If true, Prometheus use (respect) labels provided by exporter.
1509
##
1510
honorLabels: true
1511
## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape.
1512
##
1513
honorTimestamps: true
1514
## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false.
1515
## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor.
1516
## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849
1517
trackTimestampsStaleness: true
1518
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1519
##
1520
sampleLimit: 0
1521
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1522
##
1523
targetLimit: 0
1524
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1525
##
1526
labelLimit: 0
1527
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1528
##
1529
labelNameLengthLimit: 0
1530
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1531
##
1532
labelValueLengthLimit: 0
1533
## proxyUrl: URL of a proxy that should be used for scraping.
1534
##
1535
proxyUrl: ""
1536
## Enable scraping the kubelet over https. For requirements to enable this see
1537
## https://github.com/prometheus-operator/prometheus-operator/issues/926
1538
##
1539
https: true
1540
## Skip TLS certificate validation when scraping.
1541
## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed
1542
## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
1543
##
1544
insecureSkipVerify: true
1545
## Enable scraping /metrics/probes from kubelet's service
1546
##
1547
probes: true
1548
## Enable scraping /metrics/resource from kubelet's service
1549
## This is disabled by default because container metrics are already exposed by cAdvisor
1550
##
1551
resource: false
1552
# From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
1553
resourcePath: "/metrics/resource/v1alpha1"
1554
## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor
1555
## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1556
## if kubelet.serviceMonitor.interval is not empty.
1557
resourceInterval: 10s
1558
## Enable scraping /metrics/cadvisor from kubelet's service
1559
##
1560
cAdvisor: true
1561
## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor
1562
## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1563
## if kubelet.serviceMonitor.interval is not empty.
1564
cAdvisorInterval: 10s
1565
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1566
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1567
##
1568
cAdvisorMetricRelabelings:
1569
# Drop less useful container CPU metrics.
1570
- sourceLabels: [__name__]
1571
action: drop
1572
regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
1573
# Drop less useful container / always zero filesystem metrics.
1574
- sourceLabels: [__name__]
1575
action: drop
1576
regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
1577
# Drop less useful / always zero container memory metrics.
1578
- sourceLabels: [__name__]
1579
action: drop
1580
regex: 'container_memory_(mapped_file|swap)'
1581
# Drop less useful container process metrics.
1582
- sourceLabels: [__name__]
1583
action: drop
1584
regex: 'container_(file_descriptors|tasks_state|threads_max)'
1585
# Drop container_memory_failures_total{scope="hierarchy"} metrics,
1586
# we only need the container scope.
1587
- sourceLabels: [__name__, scope]
1588
action: drop
1589
regex: 'container_memory_failures_total;hierarchy'
1590
# Drop container_network_... metrics that match various interfaces that
1591
# correspond to CNI and similar interfaces. This avoids capturing network
1592
# metrics for host network containers.
1593
- sourceLabels: [__name__, interface]
1594
action: drop
1595
regex: 'container_network_.*;(cali|cilium|cni|lxc|nodelocaldns|tunl).*'
1596
# Drop container spec metrics that overlap with kube-state-metrics.
1597
- sourceLabels: [__name__]
1598
action: drop
1599
regex: 'container_spec.*'
1600
# Drop cgroup metrics with no pod.
1601
- sourceLabels: [id, pod]
1602
action: drop
1603
regex: '.+;'
1604
# - sourceLabels: [__name__, image]
1605
# separator: ;
1606
# regex: container_([a-z_]+);
1607
# replacement: $1
1608
# action: drop
1609
# - sourceLabels: [__name__]
1610
# separator: ;
1611
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1612
# replacement: $1
1613
# action: drop
1614
1615
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1616
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1617
##
1618
probesMetricRelabelings: []
1619
# - sourceLabels: [__name__, image]
1620
# separator: ;
1621
# regex: container_([a-z_]+);
1622
# replacement: $1
1623
# action: drop
1624
# - sourceLabels: [__name__]
1625
# separator: ;
1626
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1627
# replacement: $1
1628
# action: drop
1629
1630
## RelabelConfigs to apply to samples before scraping
1631
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1632
##
1633
## metrics_path is required to match upstream rules and charts
1634
cAdvisorRelabelings:
1635
- action: replace
1636
sourceLabels: [__metrics_path__]
1637
targetLabel: metrics_path
1638
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1639
# separator: ;
1640
# regex: ^(.*)$
1641
# targetLabel: nodename
1642
# replacement: $1
1643
# action: replace
1644
1645
## RelabelConfigs to apply to samples before scraping
1646
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1647
##
1648
probesRelabelings:
1649
- action: replace
1650
sourceLabels: [__metrics_path__]
1651
targetLabel: metrics_path
1652
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1653
# separator: ;
1654
# regex: ^(.*)$
1655
# targetLabel: nodename
1656
# replacement: $1
1657
# action: replace
1658
1659
## RelabelConfigs to apply to samples before scraping
1660
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1661
##
1662
resourceRelabelings:
1663
- action: replace
1664
sourceLabels: [__metrics_path__]
1665
targetLabel: metrics_path
1666
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1667
# separator: ;
1668
# regex: ^(.*)$
1669
# targetLabel: nodename
1670
# replacement: $1
1671
# action: replace
1672
1673
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1674
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1675
##
1676
metricRelabelings:
1677
# Reduce bucket cardinality of kubelet storage operations.
1678
- action: drop
1679
sourceLabels: [__name__, le]
1680
regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)?
1681
# - sourceLabels: [__name__, image]
1682
# separator: ;
1683
# regex: container_([a-z_]+);
1684
# replacement: $1
1685
# action: drop
1686
# - sourceLabels: [__name__]
1687
# separator: ;
1688
# regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1689
# replacement: $1
1690
# action: drop
1691
1692
## RelabelConfigs to apply to samples before scraping
1693
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1694
##
1695
## metrics_path is required to match upstream rules and charts
1696
relabelings:
1697
- action: replace
1698
sourceLabels: [__metrics_path__]
1699
targetLabel: metrics_path
1700
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1701
# separator: ;
1702
# regex: ^(.*)$
1703
# targetLabel: nodename
1704
# replacement: $1
1705
# action: replace
1706
1707
## Additional labels
1708
##
1709
additionalLabels: {}
1710
# foo: bar
1711
1712
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1713
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1714
targetLabels: []
1715
## Component scraping the kube controller manager
1716
##
1717
kubeControllerManager:
1718
enabled: true
1719
# Overrides the job selector in Grafana dashboards and Prometheus rules
1720
# For k3s clusters, change to k3s-server
1721
jobNameOverride: ""
1722
## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
1723
##
1724
endpoints: []
1725
# - 10.141.4.22
1726
# - 10.141.4.23
1727
# - 10.141.4.24
1728
1729
## If using kubeControllerManager.endpoints only the port and targetPort are used
1730
##
1731
service:
1732
enabled: true
1733
## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
1734
## of default port in Kubernetes 1.22.
1735
##
1736
port: null
1737
targetPort: null
1738
ipDualStack:
1739
enabled: false
1740
ipFamilies: ["IPv6", "IPv4"]
1741
ipFamilyPolicy: "PreferDualStack"
1742
# selector:
1743
# component: kube-controller-manager
1744
serviceMonitor:
1745
enabled: true
1746
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1747
##
1748
interval: ""
1749
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1750
##
1751
sampleLimit: 0
1752
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1753
##
1754
targetLimit: 0
1755
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1756
##
1757
labelLimit: 0
1758
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1759
##
1760
labelNameLengthLimit: 0
1761
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1762
##
1763
labelValueLengthLimit: 0
1764
## proxyUrl: URL of a proxy that should be used for scraping.
1765
##
1766
proxyUrl: ""
1767
## port: Name of the port the metrics will be scraped from
1768
##
1769
port: http-metrics
1770
jobLabel: jobLabel
1771
selector: {}
1772
# matchLabels:
1773
# component: kube-controller-manager
1774
1775
## Enable scraping kube-controller-manager over https.
1776
## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
1777
## If null or unset, the value is determined dynamically based on target Kubernetes version.
1778
##
1779
https: null
1780
# Skip TLS certificate validation when scraping
1781
insecureSkipVerify: null
1782
# Name of the server to use when validating TLS certificate
1783
serverName: null
1784
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1785
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1786
##
1787
metricRelabelings: []
1788
# - action: keep
1789
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1790
# sourceLabels: [__name__]
1791
1792
## RelabelConfigs to apply to samples before scraping
1793
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1794
##
1795
relabelings: []
1796
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1797
# separator: ;
1798
# regex: ^(.*)$
1799
# targetLabel: nodename
1800
# replacement: $1
1801
# action: replace
1802
1803
## Additional labels
1804
##
1805
additionalLabels: {}
1806
# foo: bar
1807
1808
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1809
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1810
targetLabels: []
1811
## Component scraping coreDns. Use either this or kubeDns
1812
##
1813
coreDns:
1814
enabled: true
1815
service:
1816
enabled: true
1817
port: 9153
1818
targetPort: 9153
1819
ipDualStack:
1820
enabled: false
1821
ipFamilies: ["IPv6", "IPv4"]
1822
ipFamilyPolicy: "PreferDualStack"
1823
# selector:
1824
# k8s-app: kube-dns
1825
serviceMonitor:
1826
enabled: true
1827
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1828
##
1829
interval: ""
1830
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1831
##
1832
sampleLimit: 0
1833
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1834
##
1835
targetLimit: 0
1836
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1837
##
1838
labelLimit: 0
1839
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1840
##
1841
labelNameLengthLimit: 0
1842
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1843
##
1844
labelValueLengthLimit: 0
1845
## proxyUrl: URL of a proxy that should be used for scraping.
1846
##
1847
proxyUrl: ""
1848
## port: Name of the port the metrics will be scraped from
1849
##
1850
port: http-metrics
1851
jobLabel: jobLabel
1852
selector: {}
1853
# matchLabels:
1854
# k8s-app: kube-dns
1855
1856
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1857
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1858
##
1859
metricRelabelings: []
1860
# - action: keep
1861
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1862
# sourceLabels: [__name__]
1863
1864
## RelabelConfigs to apply to samples before scraping
1865
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1866
##
1867
relabelings: []
1868
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1869
# separator: ;
1870
# regex: ^(.*)$
1871
# targetLabel: nodename
1872
# replacement: $1
1873
# action: replace
1874
1875
## Additional labels
1876
##
1877
additionalLabels: {}
1878
# foo: bar
1879
1880
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1881
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1882
targetLabels: []
1883
## File containing bearer token to be used when scraping targets
1884
## Empty value do not send any bearer token.
1885
##
1886
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1887
## Component scraping kubeDns. Use either this or coreDns
1888
##
1889
kubeDns:
1890
enabled: false
1891
service:
1892
dnsmasq:
1893
port: 10054
1894
targetPort: 10054
1895
skydns:
1896
port: 10055
1897
targetPort: 10055
1898
ipDualStack:
1899
enabled: false
1900
ipFamilies: ["IPv6", "IPv4"]
1901
ipFamilyPolicy: "PreferDualStack"
1902
# selector:
1903
# k8s-app: kube-dns
1904
serviceMonitor:
1905
## Scrape interval. If not set, the Prometheus default scrape interval is used.
1906
##
1907
interval: ""
1908
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1909
##
1910
sampleLimit: 0
1911
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1912
##
1913
targetLimit: 0
1914
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1915
##
1916
labelLimit: 0
1917
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1918
##
1919
labelNameLengthLimit: 0
1920
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1921
##
1922
labelValueLengthLimit: 0
1923
## proxyUrl: URL of a proxy that should be used for scraping.
1924
##
1925
proxyUrl: ""
1926
jobLabel: jobLabel
1927
selector: {}
1928
# matchLabels:
1929
# k8s-app: kube-dns
1930
1931
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1932
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1933
##
1934
metricRelabelings: []
1935
# - action: keep
1936
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1937
# sourceLabels: [__name__]
1938
1939
## RelabelConfigs to apply to samples before scraping
1940
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1941
##
1942
relabelings: []
1943
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1944
# separator: ;
1945
# regex: ^(.*)$
1946
# targetLabel: nodename
1947
# replacement: $1
1948
# action: replace
1949
1950
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1951
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1952
##
1953
dnsmasqMetricRelabelings: []
1954
# - action: keep
1955
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1956
# sourceLabels: [__name__]
1957
1958
## RelabelConfigs to apply to samples before scraping
1959
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1960
##
1961
dnsmasqRelabelings: []
1962
# - sourceLabels: [__meta_kubernetes_pod_node_name]
1963
# separator: ;
1964
# regex: ^(.*)$
1965
# targetLabel: nodename
1966
# replacement: $1
1967
# action: replace
1968
1969
## Additional labels
1970
##
1971
additionalLabels: {}
1972
# foo: bar
1973
1974
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1975
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1976
targetLabels: []
1977
## File containing bearer token to be used when scraping targets
1978
## Empty value do not send any bearer token.
1979
##
1980
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1981
## Component scraping etcd
1982
##
1983
kubeEtcd:
1984
enabled: true
1985
## If your etcd is not deployed as a pod, specify IPs it can be found on
1986
##
1987
endpoints: []
1988
# - 10.141.4.22
1989
# - 10.141.4.23
1990
# - 10.141.4.24
1991
1992
## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
1993
##
1994
service:
1995
enabled: true
1996
port: 2381
1997
targetPort: 2381
1998
ipDualStack:
1999
enabled: false
2000
ipFamilies: ["IPv6", "IPv4"]
2001
ipFamilyPolicy: "PreferDualStack"
2002
# selector:
2003
# component: etcd
2004
## Configure secure access to the etcd cluster by loading a secret into prometheus and
2005
## specifying security configuration below. For example, with a secret named etcd-client-cert
2006
##
2007
## serviceMonitor:
2008
## scheme: https
2009
## insecureSkipVerify: false
2010
## serverName: localhost
2011
## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
2012
## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
2013
## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
2014
##
2015
serviceMonitor:
2016
enabled: true
2017
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2018
##
2019
interval: ""
2020
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2021
##
2022
sampleLimit: 0
2023
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2024
##
2025
targetLimit: 0
2026
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2027
##
2028
labelLimit: 0
2029
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2030
##
2031
labelNameLengthLimit: 0
2032
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2033
##
2034
labelValueLengthLimit: 0
2035
## proxyUrl: URL of a proxy that should be used for scraping.
2036
##
2037
proxyUrl: ""
2038
scheme: http
2039
insecureSkipVerify: false
2040
serverName: ""
2041
caFile: ""
2042
certFile: ""
2043
keyFile: ""
2044
## port: Name of the port the metrics will be scraped from
2045
##
2046
port: http-metrics
2047
jobLabel: jobLabel
2048
selector: {}
2049
# matchLabels:
2050
# component: etcd
2051
2052
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2053
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2054
##
2055
metricRelabelings: []
2056
# - action: keep
2057
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2058
# sourceLabels: [__name__]
2059
2060
## RelabelConfigs to apply to samples before scraping
2061
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2062
##
2063
relabelings: []
2064
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2065
# separator: ;
2066
# regex: ^(.*)$
2067
# targetLabel: nodename
2068
# replacement: $1
2069
# action: replace
2070
2071
## Additional labels
2072
##
2073
additionalLabels: {}
2074
# foo: bar
2075
2076
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2077
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2078
targetLabels: []
2079
## File containing bearer token to be used when scraping targets
2080
## Empty value do not send any bearer token.
2081
##
2082
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2083
## Component scraping kube scheduler
2084
##
2085
kubeScheduler:
2086
enabled: true
2087
# Overrides the job selector in Grafana dashboards and Prometheus rules
2088
# For k3s clusters, change to k3s-server
2089
jobNameOverride: ""
2090
## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
2091
##
2092
endpoints: []
2093
# - 10.141.4.22
2094
# - 10.141.4.23
2095
# - 10.141.4.24
2096
2097
## If using kubeScheduler.endpoints only the port and targetPort are used
2098
##
2099
service:
2100
enabled: true
2101
## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
2102
## of default port in Kubernetes 1.23.
2103
##
2104
port: null
2105
targetPort: null
2106
ipDualStack:
2107
enabled: false
2108
ipFamilies: ["IPv6", "IPv4"]
2109
ipFamilyPolicy: "PreferDualStack"
2110
# selector:
2111
# component: kube-scheduler
2112
serviceMonitor:
2113
enabled: true
2114
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2115
##
2116
interval: ""
2117
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2118
##
2119
sampleLimit: 0
2120
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2121
##
2122
targetLimit: 0
2123
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2124
##
2125
labelLimit: 0
2126
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2127
##
2128
labelNameLengthLimit: 0
2129
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2130
##
2131
labelValueLengthLimit: 0
2132
## proxyUrl: URL of a proxy that should be used for scraping.
2133
##
2134
proxyUrl: ""
2135
## Enable scraping kube-scheduler over https.
2136
## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
2137
## If null or unset, the value is determined dynamically based on target Kubernetes version.
2138
##
2139
https: null
2140
## port: Name of the port the metrics will be scraped from
2141
##
2142
port: http-metrics
2143
jobLabel: jobLabel
2144
selector: {}
2145
# matchLabels:
2146
# component: kube-scheduler
2147
2148
## Skip TLS certificate validation when scraping
2149
insecureSkipVerify: null
2150
## Name of the server to use when validating TLS certificate
2151
serverName: null
2152
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2153
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2154
##
2155
metricRelabelings: []
2156
# - action: keep
2157
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2158
# sourceLabels: [__name__]
2159
2160
## RelabelConfigs to apply to samples before scraping
2161
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2162
##
2163
relabelings: []
2164
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2165
# separator: ;
2166
# regex: ^(.*)$
2167
# targetLabel: nodename
2168
# replacement: $1
2169
# action: replace
2170
2171
## Additional labels
2172
##
2173
additionalLabels: {}
2174
# foo: bar
2175
2176
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2177
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2178
targetLabels: []
2179
## Component scraping kube proxy
2180
##
2181
kubeProxy:
2182
enabled: true
2183
# Overrides the job selector in Grafana dashboards and Prometheus rules
2184
# For k3s clusters, change to k3s-server
2185
jobNameOverride: ""
2186
## If your kube proxy is not deployed as a pod, specify IPs it can be found on
2187
##
2188
endpoints: []
2189
# - 10.141.4.22
2190
# - 10.141.4.23
2191
# - 10.141.4.24
2192
2193
service:
2194
enabled: true
2195
port: 10249
2196
targetPort: 10249
2197
ipDualStack:
2198
enabled: false
2199
ipFamilies: ["IPv6", "IPv4"]
2200
ipFamilyPolicy: "PreferDualStack"
2201
# selector:
2202
# k8s-app: kube-proxy
2203
serviceMonitor:
2204
enabled: true
2205
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2206
##
2207
interval: ""
2208
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2209
##
2210
sampleLimit: 0
2211
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2212
##
2213
targetLimit: 0
2214
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2215
##
2216
labelLimit: 0
2217
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2218
##
2219
labelNameLengthLimit: 0
2220
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2221
##
2222
labelValueLengthLimit: 0
2223
## proxyUrl: URL of a proxy that should be used for scraping.
2224
##
2225
proxyUrl: ""
2226
## port: Name of the port the metrics will be scraped from
2227
##
2228
port: http-metrics
2229
jobLabel: jobLabel
2230
selector: {}
2231
# matchLabels:
2232
# k8s-app: kube-proxy
2233
2234
## Enable scraping kube-proxy over https.
2235
## Requires proper certs (not self-signed) and delegated authentication/authorization checks
2236
##
2237
https: false
2238
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2239
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2240
##
2241
metricRelabelings: []
2242
# - action: keep
2243
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2244
# sourceLabels: [__name__]
2245
2246
## RelabelConfigs to apply to samples before scraping
2247
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2248
##
2249
relabelings: []
2250
# - action: keep
2251
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2252
# sourceLabels: [__name__]
2253
2254
## Additional labels
2255
##
2256
additionalLabels: {}
2257
# foo: bar
2258
2259
## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2260
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2261
targetLabels: []
2262
## File containing bearer token to be used when scraping targets
2263
## Empty value do not send any bearer token.
2264
##
2265
bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2266
## Component scraping kube state metrics
2267
##
2268
kubeStateMetrics:
2269
enabled: true
2270
## Configuration for kube-state-metrics subchart
2271
##
2272
kube-state-metrics:
2273
## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
2274
releaseLabel: true
2275
## Enable scraping via kubernetes-service-endpoints
2276
## Disabled by default as we service monitor is enabled below
2277
##
2278
prometheusScrape: false
2279
prometheus:
2280
monitor:
2281
## Enable scraping via service monitor
2282
## Disable to prevent duplication if you enable prometheusScrape above
2283
enabled: true
2284
## kube-state-metrics endpoint
2285
http:
2286
## Keep labels from scraped data, overriding server-side labels
2287
honorLabels: true
2288
## selfMonitor endpoint
2289
metrics:
2290
## Keep labels from scraped data, overriding server-side labels
2291
honorLabels: true
2292
## Deploy node exporter as a daemonset to all nodes
2293
##
2294
nodeExporter:
2295
enabled: true
2296
operatingSystems:
2297
linux:
2298
enabled: true
2299
aix:
2300
enabled: true
2301
darwin:
2302
enabled: true
2303
## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled
2304
##
2305
forceDeployDashboards: false
2306
## Configuration for prometheus-node-exporter subchart
2307
##
2308
prometheus-node-exporter:
2309
namespaceOverride: ""
2310
podLabels:
2311
## Add the 'node-exporter' label to be used by serviceMonitor and podMonitor to match standard common usage in rules and grafana dashboards
2312
##
2313
jobLabel: node-exporter
2314
releaseLabel: true
2315
extraArgs:
2316
- --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
2317
- --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$
2318
service:
2319
portName: http-metrics
2320
ipDualStack:
2321
enabled: false
2322
ipFamilies: ["IPv6", "IPv4"]
2323
ipFamilyPolicy: "PreferDualStack"
2324
labels:
2325
jobLabel: node-exporter
2326
prometheus:
2327
monitor:
2328
enabled: true
2329
jobLabel: jobLabel
2330
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2331
##
2332
interval: ""
2333
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2334
##
2335
sampleLimit: 0
2336
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2337
##
2338
targetLimit: 0
2339
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2340
##
2341
labelLimit: 0
2342
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2343
##
2344
labelNameLengthLimit: 0
2345
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2346
##
2347
labelValueLengthLimit: 0
2348
## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
2349
##
2350
scrapeTimeout: ""
2351
## proxyUrl: URL of a proxy that should be used for scraping.
2352
##
2353
proxyUrl: ""
2354
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2355
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2356
##
2357
metricRelabelings: []
2358
# - sourceLabels: [__name__]
2359
# separator: ;
2360
# regex: ^node_mountstats_nfs_(event|operations|transport)_.+
2361
# replacement: $1
2362
# action: drop
2363
2364
## RelabelConfigs to apply to samples before scraping
2365
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2366
##
2367
relabelings: []
2368
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2369
# separator: ;
2370
# regex: ^(.*)$
2371
# targetLabel: nodename
2372
# replacement: $1
2373
# action: replace
2374
## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
2375
##
2376
# attachMetadata:
2377
# node: false
2378
2379
podMonitor:
2380
enabled: false
2381
jobLabel: jobLabel
2382
rbac:
2383
## If true, create PSPs for node-exporter
2384
##
2385
pspEnabled: false
2386
## Manages Prometheus and Alertmanager components
2387
##
2388
prometheusOperator:
2389
enabled: true
2390
## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default
2391
fullnameOverride: ""
2392
## Number of old replicasets to retain ##
2393
## The default value is 10, 0 will garbage-collect old replicasets ##
2394
revisionHistoryLimit: 10
2395
## Strategy of the deployment
2396
##
2397
strategy: {}
2398
## Prometheus-Operator v0.39.0 and later support TLS natively.
2399
##
2400
tls:
2401
enabled: true
2402
# Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2403
tlsMinVersion: VersionTLS13
2404
# The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2405
internalPort: 10250
2406
## Liveness probe for the prometheusOperator deployment
2407
##
2408
livenessProbe:
2409
enabled: true
2410
failureThreshold: 3
2411
initialDelaySeconds: 0
2412
periodSeconds: 10
2413
successThreshold: 1
2414
timeoutSeconds: 1
2415
## Readiness probe for the prometheusOperator deployment
2416
##
2417
readinessProbe:
2418
enabled: true
2419
failureThreshold: 3
2420
initialDelaySeconds: 0
2421
periodSeconds: 10
2422
successThreshold: 1
2423
timeoutSeconds: 1
2424
## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
2425
## rules from making their way into prometheus and potentially preventing the container from starting
2426
admissionWebhooks:
2427
## Valid values: Fail, Ignore, IgnoreOnInstallOnly
2428
## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
2429
failurePolicy: ""
2430
## The default timeoutSeconds is 10 and the maximum value is 30.
2431
timeoutSeconds: 10
2432
enabled: true
2433
## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
2434
## If unspecified, system trust roots on the apiserver are used.
2435
caBundle: ""
2436
## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
2437
## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
2438
## certs ahead of time if you wish.
2439
##
2440
annotations: {}
2441
# argocd.argoproj.io/hook: PreSync
2442
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
2443
2444
namespaceSelector: {}
2445
objectSelector: {}
2446
matchConditions: {}
2447
mutatingWebhookConfiguration:
2448
annotations: {}
2449
# argocd.argoproj.io/hook: PreSync
2450
validatingWebhookConfiguration:
2451
annotations: {}
2452
# argocd.argoproj.io/hook: PreSync
2453
deployment:
2454
enabled: false
2455
## Number of replicas
2456
##
2457
replicas: 1
2458
## Strategy of the deployment
2459
##
2460
strategy: {}
2461
# Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
2462
podDisruptionBudget:
2463
enabled: false
2464
minAvailable: 1
2465
# maxUnavailable: ""
2466
unhealthyPodEvictionPolicy: AlwaysAllow
2467
## Number of old replicasets to retain ##
2468
## The default value is 10, 0 will garbage-collect old replicasets ##
2469
revisionHistoryLimit: 10
2470
## Prometheus-Operator v0.39.0 and later support TLS natively.
2471
##
2472
tls:
2473
enabled: true
2474
# Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2475
tlsMinVersion: VersionTLS13
2476
# The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2477
internalPort: 10250
2478
## Service account for Prometheus Operator Webhook to use.
2479
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2480
##
2481
serviceAccount:
2482
annotations: {}
2483
automountServiceAccountToken: false
2484
create: true
2485
name: ""
2486
## Configuration for Prometheus operator Webhook service
2487
##
2488
service:
2489
annotations: {}
2490
labels: {}
2491
clusterIP: ""
2492
ipDualStack:
2493
enabled: false
2494
ipFamilies: ["IPv6", "IPv4"]
2495
ipFamilyPolicy: "PreferDualStack"
2496
## Port to expose on each node
2497
## Only used if service.type is 'NodePort'
2498
##
2499
nodePort: 31080
2500
nodePortTls: 31443
2501
## Additional ports to open for Prometheus operator Webhook service
2502
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2503
##
2504
additionalPorts: []
2505
## Loadbalancer IP
2506
## Only use if service.type is "LoadBalancer"
2507
##
2508
loadBalancerIP: ""
2509
loadBalancerSourceRanges: []
2510
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2511
##
2512
externalTrafficPolicy: Cluster
2513
## Service type
2514
## NodePort, ClusterIP, LoadBalancer
2515
##
2516
type: ClusterIP
2517
## List of IP addresses at which the Prometheus server service is available
2518
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2519
##
2520
externalIPs: []
2521
# ## Labels to add to the operator webhook deployment
2522
# ##
2523
labels: {}
2524
## Annotations to add to the operator webhook deployment
2525
##
2526
annotations: {}
2527
## Labels to add to the operator webhook pod
2528
##
2529
podLabels: {}
2530
## Annotations to add to the operator webhook pod
2531
##
2532
podAnnotations: {}
2533
## Assign a PriorityClassName to pods if set
2534
# priorityClassName: ""
2535
2536
## Define Log Format
2537
# Use logfmt (default) or json logging
2538
# logFormat: logfmt
2539
2540
## Decrease log verbosity to errors only
2541
# logLevel: error
2542
2543
## Prometheus-operator webhook image
2544
##
2545
image:
2546
registry: cgr.dev
2547
repository: chainguard-private/prometheus-admission-webhook
2548
# if not set appVersion field from Chart.yaml is used
2549
tag: latest
2550
sha: sha256:4d060b4fb915cf04db68d87682d21d7759d00eb866ce9ea12afe913bcdb04133
2551
pullPolicy: IfNotPresent
2552
## Define Log Format
2553
# Use logfmt (default) or json logging
2554
# logFormat: logfmt
2555
2556
## Decrease log verbosity to errors only
2557
# logLevel: error
2558
2559
## Liveness probe
2560
##
2561
livenessProbe:
2562
enabled: true
2563
failureThreshold: 3
2564
initialDelaySeconds: 30
2565
periodSeconds: 10
2566
successThreshold: 1
2567
timeoutSeconds: 1
2568
## Readiness probe
2569
##
2570
readinessProbe:
2571
enabled: true
2572
failureThreshold: 3
2573
initialDelaySeconds: 5
2574
periodSeconds: 10
2575
successThreshold: 1
2576
timeoutSeconds: 1
2577
## Resource limits & requests
2578
##
2579
resources: {}
2580
# limits:
2581
# cpu: 200m
2582
# memory: 200Mi
2583
# requests:
2584
# cpu: 100m
2585
# memory: 100Mi
2586
2587
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2588
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2589
##
2590
hostNetwork: false
2591
## Define which Nodes the Pods are scheduled on.
2592
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2593
##
2594
nodeSelector: {}
2595
## Tolerations for use with node taints
2596
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2597
##
2598
tolerations: []
2599
# - key: "key"
2600
# operator: "Equal"
2601
# value: "value"
2602
# effect: "NoSchedule"
2603
2604
## Assign custom affinity rules to the prometheus operator
2605
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2606
##
2607
affinity: {}
2608
# nodeAffinity:
2609
# requiredDuringSchedulingIgnoredDuringExecution:
2610
# nodeSelectorTerms:
2611
# - matchExpressions:
2612
# - key: kubernetes.io/e2e-az-name
2613
# operator: In
2614
# values:
2615
# - e2e-az1
2616
# - e2e-az2
2617
dnsConfig: {}
2618
# nameservers:
2619
# - 1.2.3.4
2620
# searches:
2621
# - ns1.svc.cluster-domain.example
2622
# - my.dns.search.suffix
2623
# options:
2624
# - name: ndots
2625
# value: "2"
2626
# - name: edns0
2627
securityContext:
2628
fsGroup: 65534
2629
runAsGroup: 65534
2630
runAsNonRoot: true
2631
runAsUser: 65534
2632
seccompProfile:
2633
type: RuntimeDefault
2634
## Container-specific security context configuration
2635
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2636
##
2637
containerSecurityContext:
2638
allowPrivilegeEscalation: false
2639
readOnlyRootFilesystem: true
2640
capabilities:
2641
drop:
2642
- ALL
2643
## If false then the user will opt out of automounting API credentials.
2644
##
2645
automountServiceAccountToken: true
2646
patch:
2647
enabled: true
2648
image:
2649
registry: cgr.dev
2650
repository: chainguard-private/kube-webhook-certgen
2651
tag: latest
2652
sha: sha256:1f2157ebf63c7ebfc135640afd44383e43898fb372c2a38c1509d47cf7dd08c0
2653
pullPolicy: IfNotPresent
2654
resources: {}
2655
## Provide a priority class name to the webhook patching job
2656
##
2657
priorityClassName: ""
2658
ttlSecondsAfterFinished: 60
2659
annotations: {}
2660
# argocd.argoproj.io/hook: PreSync
2661
# argocd.argoproj.io/hook-delete-policy: HookSucceeded
2662
podAnnotations: {}
2663
nodeSelector: {}
2664
affinity: {}
2665
tolerations: []
2666
## SecurityContext holds pod-level security attributes and common container settings.
2667
## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
2668
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2669
##
2670
securityContext:
2671
runAsGroup: 2000
2672
runAsNonRoot: true
2673
runAsUser: 2000
2674
seccompProfile:
2675
type: RuntimeDefault
2676
## Service account for Prometheus Operator Webhook Job Patch to use.
2677
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2678
##
2679
serviceAccount:
2680
create: true
2681
annotations: {}
2682
automountServiceAccountToken: true
2683
# Security context for create job container
2684
createSecretJob:
2685
securityContext:
2686
allowPrivilegeEscalation: false
2687
readOnlyRootFilesystem: true
2688
capabilities:
2689
drop:
2690
- ALL
2691
# Security context for patch job container
2692
patchWebhookJob:
2693
securityContext:
2694
allowPrivilegeEscalation: false
2695
readOnlyRootFilesystem: true
2696
capabilities:
2697
drop:
2698
- ALL
2699
# Use certmanager to generate webhook certs
2700
certManager:
2701
enabled: false
2702
# self-signed root certificate
2703
rootCert:
2704
duration: "" # default to be 5y
2705
# -- Set the revisionHistoryLimit on the Certificate. See
2706
# https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2707
# Defaults to nil.
2708
revisionHistoryLimit:
2709
admissionCert:
2710
duration: "" # default to be 1y
2711
# -- Set the revisionHistoryLimit on the Certificate. See
2712
# https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2713
# Defaults to nil.
2714
revisionHistoryLimit:
2715
# issuerRef:
2716
# name: "issuer"
2717
# kind: "ClusterIssuer"
2718
## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
2719
## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
2720
##
2721
namespaces: {}
2722
# releaseNamespace: true
2723
# additional:
2724
# - kube-system
2725
2726
## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
2727
##
2728
denyNamespaces: []
2729
## Filter namespaces to look for prometheus-operator custom resources
2730
##
2731
alertmanagerInstanceNamespaces: []
2732
alertmanagerConfigNamespaces: []
2733
prometheusInstanceNamespaces: []
2734
thanosRulerInstanceNamespaces: []
2735
## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
2736
## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
2737
## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
2738
##
2739
# clusterDomain: "cluster.local"
2740
networkPolicy:
2741
## Enable creation of NetworkPolicy resources.
2742
##
2743
enabled: false
2744
## Flavor of the network policy to use.
2745
# Can be:
2746
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
2747
# * cilium for cilium.io/v2/CiliumNetworkPolicy
2748
flavor: kubernetes
2749
# cilium:
2750
# egress:
2751
2752
## match labels used in selector
2753
# matchLabels: {}
2754
## Service account for Prometheus Operator to use.
2755
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2756
##
2757
serviceAccount:
2758
create: true
2759
name: ""
2760
automountServiceAccountToken: true
2761
annotations: {}
2762
# -- terminationGracePeriodSeconds for container lifecycle hook
2763
terminationGracePeriodSeconds: 30
2764
# -- Specify lifecycle hooks for the controller
2765
lifecycle: {}
2766
## Configuration for Prometheus operator service
2767
##
2768
service:
2769
annotations: {}
2770
labels: {}
2771
clusterIP: ""
2772
ipDualStack:
2773
enabled: false
2774
ipFamilies: ["IPv6", "IPv4"]
2775
ipFamilyPolicy: "PreferDualStack"
2776
## Port to expose on each node
2777
## Only used if service.type is 'NodePort'
2778
##
2779
nodePort: 30080
2780
nodePortTls: 30443
2781
## Additional ports to open for Prometheus operator service
2782
## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2783
##
2784
additionalPorts: []
2785
## Loadbalancer IP
2786
## Only use if service.type is "LoadBalancer"
2787
##
2788
loadBalancerIP: ""
2789
loadBalancerSourceRanges: []
2790
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2791
##
2792
externalTrafficPolicy: Cluster
2793
## Service type
2794
## NodePort, ClusterIP, LoadBalancer
2795
##
2796
type: ClusterIP
2797
## List of IP addresses at which the Prometheus server service is available
2798
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2799
##
2800
externalIPs: []
2801
# ## Labels to add to the operator deployment
2802
# ##
2803
labels: {}
2804
## Annotations to add to the operator deployment
2805
##
2806
annotations: {}
2807
## Labels to add to the operator pod
2808
##
2809
podLabels: {}
2810
## Annotations to add to the operator pod
2811
##
2812
podAnnotations: {}
2813
## Assign a podDisruptionBudget to the operator
2814
##
2815
podDisruptionBudget:
2816
enabled: false
2817
minAvailable: 1
2818
# maxUnavailable: ""
2819
unhealthyPodEvictionPolicy: AlwaysAllow
2820
## Assign a PriorityClassName to pods if set
2821
# priorityClassName: ""
2822
2823
## Define Log Format
2824
# Use logfmt (default) or json logging
2825
# logFormat: logfmt
2826
2827
## Decrease log verbosity to errors only
2828
# logLevel: error
2829
kubeletService:
2830
## If true, the operator will create and maintain a service for scraping kubelets
2831
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
2832
##
2833
enabled: true
2834
namespace: kube-system
2835
selector: ""
2836
## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
2837
name: ""
2838
## Create Endpoints objects for kubelet targets.
2839
kubeletEndpointsEnabled: true
2840
## Create EndpointSlice objects for kubelet targets.
2841
kubeletEndpointSliceEnabled: false
2842
## Extra arguments to pass to prometheusOperator
2843
# https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/operator.md
2844
extraArgs: []
2845
# - --labels="cluster=talos-cluster"
2846
2847
## Create a servicemonitor for the operator
2848
##
2849
serviceMonitor:
2850
## If true, create a serviceMonitor for prometheus operator
2851
##
2852
selfMonitor: true
2853
## Labels for ServiceMonitor
2854
additionalLabels: {}
2855
## Scrape interval. If not set, the Prometheus default scrape interval is used.
2856
##
2857
interval: ""
2858
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2859
##
2860
sampleLimit: 0
2861
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2862
##
2863
targetLimit: 0
2864
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2865
##
2866
labelLimit: 0
2867
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2868
##
2869
labelNameLengthLimit: 0
2870
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2871
##
2872
labelValueLengthLimit: 0
2873
## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
2874
scrapeTimeout: ""
2875
## Metric relabel configs to apply to samples before ingestion.
2876
##
2877
metricRelabelings: []
2878
# - action: keep
2879
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2880
# sourceLabels: [__name__]
2881
2882
# relabel configs to apply to samples before ingestion.
2883
##
2884
relabelings: []
2885
# - sourceLabels: [__meta_kubernetes_pod_node_name]
2886
# separator: ;
2887
# regex: ^(.*)$
2888
# targetLabel: nodename
2889
# replacement: $1
2890
# action: replace
2891
## Resource limits & requests
2892
##
2893
resources: {}
2894
# limits:
2895
# cpu: 200m
2896
# memory: 200Mi
2897
# requests:
2898
# cpu: 100m
2899
# memory: 100Mi
2900
2901
## Operator Environment
2902
## env:
2903
## VARIABLE: value
2904
env:
2905
GOGC: "30"
2906
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2907
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2908
##
2909
hostNetwork: false
2910
## Define which Nodes the Pods are scheduled on.
2911
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2912
##
2913
nodeSelector: {}
2914
## Tolerations for use with node taints
2915
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2916
##
2917
tolerations: []
2918
# - key: "key"
2919
# operator: "Equal"
2920
# value: "value"
2921
# effect: "NoSchedule"
2922
2923
## Assign custom affinity rules to the prometheus operator
2924
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2925
##
2926
affinity: {}
2927
# nodeAffinity:
2928
# requiredDuringSchedulingIgnoredDuringExecution:
2929
# nodeSelectorTerms:
2930
# - matchExpressions:
2931
# - key: kubernetes.io/e2e-az-name
2932
# operator: In
2933
# values:
2934
# - e2e-az1
2935
# - e2e-az2
2936
dnsConfig: {}
2937
# nameservers:
2938
# - 1.2.3.4
2939
# searches:
2940
# - ns1.svc.cluster-domain.example
2941
# - my.dns.search.suffix
2942
# options:
2943
# - name: ndots
2944
# value: "2"
2945
# - name: edns0
2946
securityContext:
2947
fsGroup: 65534
2948
runAsGroup: 65534
2949
runAsNonRoot: true
2950
runAsUser: 65534
2951
seccompProfile:
2952
type: RuntimeDefault
2953
## Setup hostUsers for prometheus-operator
2954
## ref: https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/
2955
hostUsers: ~
2956
## Container-specific security context configuration
2957
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2958
##
2959
containerSecurityContext:
2960
allowPrivilegeEscalation: false
2961
readOnlyRootFilesystem: true
2962
capabilities:
2963
drop:
2964
- ALL
2965
# Enable vertical pod autoscaler support for prometheus-operator
2966
verticalPodAutoscaler:
2967
enabled: false
2968
# Recommender responsible for generating recommendation for the object.
2969
# List should be empty (then the default recommender will generate the recommendation)
2970
# or contain exactly one recommender.
2971
# recommenders:
2972
# - name: custom-recommender-performance
2973
2974
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
2975
controlledResources: []
2976
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
2977
# controlledValues: RequestsAndLimits
2978
2979
# Define the max allowed resources for the pod
2980
maxAllowed: {}
2981
# cpu: 200m
2982
# memory: 100Mi
2983
# Define the min allowed resources for the pod
2984
minAllowed: {}
2985
# cpu: 200m
2986
# memory: 100Mi
2987
2988
updatePolicy:
2989
# Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
2990
# minReplicas: 1
2991
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
2992
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
2993
updateMode: Recreate
2994
## Prometheus-operator image
2995
##
2996
image:
2997
registry: cgr.dev
2998
repository: chainguard-private/prometheus-operator
2999
# if not set appVersion field from Chart.yaml is used
3000
tag: latest
3001
sha: sha256:69dce7b86620326df1d4593b520b6ab81b6db3daaddb365cb094b35957b8f474
3002
pullPolicy: IfNotPresent
3003
## Prometheus image to use for prometheuses managed by the operator
3004
##
3005
# prometheusDefaultBaseImage: prometheus/prometheus
3006
3007
## Prometheus image registry to use for prometheuses managed by the operator
3008
##
3009
# prometheusDefaultBaseImageRegistry: quay.io
3010
3011
## Alertmanager image to use for alertmanagers managed by the operator
3012
##
3013
# alertmanagerDefaultBaseImage: prometheus/alertmanager
3014
3015
## Alertmanager image registry to use for alertmanagers managed by the operator
3016
##
3017
# alertmanagerDefaultBaseImageRegistry: quay.io
3018
3019
## Prometheus-config-reloader
3020
##
3021
prometheusConfigReloader:
3022
image:
3023
registry: cgr.dev
3024
repository: chainguard-private/prometheus-config-reloader
3025
# if not set appVersion field from Chart.yaml is used
3026
tag: latest
3027
sha: sha256:0bb4e4a27053c655527f5f585df9eb9b81782b81a4213622dba9c2494bcde757
3028
# add prometheus config reloader liveness and readiness probe. Default: false
3029
enableProbe: false
3030
# resource config for prometheusConfigReloader
3031
resources: {}
3032
# requests:
3033
# cpu: 200m
3034
# memory: 50Mi
3035
# limits:
3036
# cpu: 200m
3037
# memory: 50Mi
3038
## Thanos side-car image when configured
3039
##
3040
thanosImage:
3041
registry: cgr.dev
3042
repository: chainguard-private/thanos
3043
tag: latest
3044
sha: sha256:91616ecf31235c2f626295c55d11389fcdcfb2b1f817099a9d7460e7765bd183
3045
## Set a Label Selector to filter watched prometheus and prometheusAgent
3046
##
3047
prometheusInstanceSelector: ""
3048
## Set a Label Selector to filter watched alertmanager
3049
##
3050
alertmanagerInstanceSelector: ""
3051
## Set a Label Selector to filter watched thanosRuler
3052
thanosRulerInstanceSelector: ""
3053
## Set a Field Selector to filter watched secrets
3054
##
3055
secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
3056
## If false then the user will opt out of automounting API credentials.
3057
##
3058
automountServiceAccountToken: true
3059
## Additional volumes
3060
##
3061
extraVolumes: []
3062
## Additional volume mounts
3063
##
3064
extraVolumeMounts: []
3065
## Deploy a Prometheus instance
3066
##
3067
prometheus:
3068
enabled: true
3069
## Toggle prometheus into agent mode
3070
## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
3071
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/prometheus-agent.md
3072
##
3073
agentMode: false
3074
## Annotations for Prometheus
3075
##
3076
annotations: {}
3077
## Additional labels for Prometheus
3078
##
3079
additionalLabels: {}
3080
## Configure network policy for the prometheus
3081
networkPolicy:
3082
enabled: false
3083
## Flavor of the network policy to use.
3084
# Can be:
3085
# * kubernetes for networking.k8s.io/v1/NetworkPolicy
3086
# * cilium for cilium.io/v2/CiliumNetworkPolicy
3087
flavor: kubernetes
3088
namespace:
3089
# cilium:
3090
# endpointSelector:
3091
# egress:
3092
# ingress:
3093
3094
# egress:
3095
# - {}
3096
# ingress:
3097
# - {}
3098
# podSelector:
3099
# matchLabels:
3100
# app: prometheus
3101
## Service account for Prometheuses to use.
3102
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
3103
##
3104
serviceAccount:
3105
create: true
3106
name: ""
3107
annotations: {}
3108
automountServiceAccountToken: true
3109
# Service for thanos service discovery on sidecar
3110
# Enable this can make Thanos Query can use
3111
# `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
3112
# Thanos sidecar on prometheus nodes
3113
# (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
3114
thanosService:
3115
enabled: false
3116
annotations: {}
3117
labels: {}
3118
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3119
##
3120
externalTrafficPolicy: Cluster
3121
## Service type
3122
##
3123
type: ClusterIP
3124
## Service dual stack
3125
##
3126
ipDualStack:
3127
enabled: false
3128
ipFamilies: ["IPv6", "IPv4"]
3129
ipFamilyPolicy: "PreferDualStack"
3130
## gRPC port config
3131
portName: grpc
3132
port: 10901
3133
targetPort: "grpc"
3134
## HTTP port config (for metrics)
3135
httpPortName: http
3136
httpPort: 10902
3137
targetHttpPort: "http"
3138
## ClusterIP to assign
3139
# Default is to make this a headless service ("None")
3140
clusterIP: "None"
3141
## Port to expose on each node, if service type is NodePort
3142
##
3143
nodePort: 30901
3144
httpNodePort: 30902
3145
# ServiceMonitor to scrape Sidecar metrics
3146
# Needs thanosService to be enabled as well
3147
thanosServiceMonitor:
3148
enabled: false
3149
interval: ""
3150
## Additional labels
3151
##
3152
additionalLabels: {}
3153
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3154
scheme: ""
3155
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3156
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3157
tlsConfig: {}
3158
bearerTokenFile:
3159
## Metric relabel configs to apply to samples before ingestion.
3160
metricRelabelings: []
3161
## relabel configs to apply to samples before ingestion.
3162
relabelings: []
3163
# Service for external access to sidecar
3164
# Enabling this creates a service to expose thanos-sidecar outside the cluster.
3165
thanosServiceExternal:
3166
enabled: false
3167
annotations: {}
3168
labels: {}
3169
loadBalancerIP: ""
3170
loadBalancerSourceRanges: []
3171
## gRPC port config
3172
portName: grpc
3173
port: 10901
3174
targetPort: "grpc"
3175
## HTTP port config (for metrics)
3176
httpPortName: http
3177
httpPort: 10902
3178
targetHttpPort: "http"
3179
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3180
##
3181
externalTrafficPolicy: Cluster
3182
## Service type
3183
##
3184
type: LoadBalancer
3185
## Port to expose on each node
3186
##
3187
nodePort: 30901
3188
httpNodePort: 30902
3189
## Configuration for Prometheus service
3190
##
3191
service:
3192
enabled: true
3193
annotations: {}
3194
labels: {}
3195
clusterIP: ""
3196
ipDualStack:
3197
enabled: false
3198
ipFamilies: ["IPv6", "IPv4"]
3199
ipFamilyPolicy: "PreferDualStack"
3200
## Port for Prometheus Service to listen on
3201
##
3202
port: 9090
3203
## To be used with a proxy extraContainer port
3204
targetPort: 9090
3205
## Port for Prometheus Reloader to listen on
3206
##
3207
reloaderWebPort: 8080
3208
## Port to expose for Prometheus Reloader
3209
## Only used if service.type is 'NodePort'
3210
##
3211
reloaderWebNodePort: null
3212
## List of IP addresses at which the Prometheus server service is available
3213
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
3214
##
3215
externalIPs: []
3216
## Port to expose on each node
3217
## Only used if service.type is 'NodePort'
3218
##
3219
nodePort: 30090
3220
## Loadbalancer IP
3221
## Only use if service.type is "LoadBalancer"
3222
loadBalancerIP: ""
3223
loadBalancerSourceRanges: []
3224
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3225
##
3226
externalTrafficPolicy: Cluster
3227
## Service type
3228
##
3229
type: ClusterIP
3230
## Additional ports to open for Prometheus service
3231
##
3232
additionalPorts: []
3233
# additionalPorts:
3234
# - name: oauth-proxy
3235
# port: 8081
3236
# targetPort: 8081
3237
# - name: oauth-metrics
3238
# port: 8082
3239
# targetPort: 8082
3240
3241
## Consider that all endpoints are considered "ready" even if the Pods themselves are not
3242
## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
3243
publishNotReadyAddresses: false
3244
## If you want to make sure that connections from a particular client are passed to the same Pod each time
3245
## Accepts 'ClientIP' or 'None'
3246
##
3247
sessionAffinity: None
3248
## If you want to modify the ClientIP sessionAffinity timeout
3249
## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
3250
##
3251
sessionAffinityConfig:
3252
clientIP:
3253
timeoutSeconds: 10800
3254
## Configuration for creating a separate Service for each statefulset Prometheus replica
3255
##
3256
servicePerReplica:
3257
enabled: false
3258
annotations: {}
3259
## Port for Prometheus Service per replica to listen on
3260
##
3261
port: 9090
3262
## To be used with a proxy extraContainer port
3263
targetPort: 9090
3264
## Port to expose on each node
3265
## Only used if servicePerReplica.type is 'NodePort'
3266
##
3267
nodePort: 30091
3268
## Loadbalancer source IP ranges
3269
## Only used if servicePerReplica.type is "LoadBalancer"
3270
loadBalancerSourceRanges: []
3271
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3272
##
3273
externalTrafficPolicy: Cluster
3274
## Service type
3275
##
3276
type: ClusterIP
3277
## Service dual stack
3278
##
3279
ipDualStack:
3280
enabled: false
3281
ipFamilies: ["IPv6", "IPv4"]
3282
ipFamilyPolicy: "PreferDualStack"
3283
## Configure pod disruption budgets for Prometheus
3284
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
3285
##
3286
podDisruptionBudget:
3287
enabled: false
3288
minAvailable: 1
3289
# maxUnavailable: ""
3290
unhealthyPodEvictionPolicy: AlwaysAllow
3291
## Enable vertical pod autoscaler support for Prometheus
3292
## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
3293
##
3294
verticalPodAutoscaler:
3295
enabled: false
3296
# Recommender responsible for generating recommendation for the object.
3297
# List should be empty (then the default recommender will generate the recommendation)
3298
# or contain exactly one recommender.
3299
# recommenders:
3300
# - name: custom-recommender-performance
3301
3302
# List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
3303
controlledResources: []
3304
# Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
3305
# controlledValues: RequestsAndLimits
3306
3307
# Define the max allowed resources for the pod
3308
maxAllowed: {}
3309
# cpu: 200m
3310
# memory: 100Mi
3311
# Define the min allowed resources for the pod
3312
minAllowed: {}
3313
# cpu: 200m
3314
# memory: 100Mi
3315
3316
updatePolicy:
3317
# Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
3318
# are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
3319
updateMode: Recreate
3320
# Ingress exposes thanos sidecar outside the cluster
3321
thanosIngress:
3322
enabled: false
3323
ingressClassName: ""
3324
annotations: {}
3325
labels: {}
3326
servicePort: 10901
3327
## Port to expose on each node
3328
## Only used if service.type is 'NodePort'
3329
##
3330
nodePort: 30901
3331
## Hosts must be provided if Ingress is enabled.
3332
##
3333
hosts: []
3334
# - thanos-gateway.domain.com
3335
3336
## Paths to use for ingress rules
3337
##
3338
paths: []
3339
# - /
3340
3341
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3342
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3343
# pathType: ImplementationSpecific
3344
3345
## TLS configuration for Thanos Ingress
3346
## Secret must be manually created in the namespace
3347
##
3348
tls: []
3349
# - secretName: thanos-gateway-tls
3350
# hosts:
3351
# - thanos-gateway.domain.com
3352
#
3353
## ExtraSecret can be used to store various data in an extra secret
3354
## (use it for example to store hashed basic auth credentials)
3355
extraSecret:
3356
## if not set, name will be auto generated
3357
# name: ""
3358
annotations: {}
3359
data: {}
3360
# auth: |
3361
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
3362
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
3363
3364
ingress:
3365
enabled: false
3366
ingressClassName: ""
3367
annotations: {}
3368
labels: {}
3369
## Redirect ingress to an additional defined port on the service
3370
# servicePort: 8081
3371
3372
## Hostnames.
3373
## Must be provided if Ingress is enabled.
3374
##
3375
# hosts:
3376
# - prometheus.domain.com
3377
hosts: []
3378
## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
3379
##
3380
paths: []
3381
# - /
3382
3383
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3384
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3385
# pathType: ImplementationSpecific
3386
3387
## TLS configuration for Prometheus Ingress
3388
## Secret must be manually created in the namespace
3389
##
3390
tls: []
3391
# - secretName: prometheus-general-tls
3392
# hosts:
3393
# - prometheus.example.com
3394
# -- BETA: Configure the gateway routes for the chart here.
3395
# More routes can be added by adding a dictionary key like the 'main' route.
3396
# Be aware that this is an early beta of this feature,
3397
# kube-prometheus-stack does not guarantee this works and is subject to change.
3398
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
3399
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
3400
route:
3401
main:
3402
# -- Enables or disables the route
3403
enabled: false
3404
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
3405
apiVersion: gateway.networking.k8s.io/v1
3406
# -- Set the route kind
3407
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
3408
kind: HTTPRoute
3409
annotations: {}
3410
labels: {}
3411
hostnames: []
3412
# - my-filter.example.com
3413
parentRefs: []
3414
# - name: acme-gw
3415
3416
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
3417
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
3418
## matches, filters and additionalRules will be ignored if this is set to true. Be are
3419
httpsRedirect: false
3420
matches:
3421
- path:
3422
type: PathPrefix
3423
value: /
3424
## Filters define the filters that are applied to requests that match this rule.
3425
filters: []
3426
## Session persistence configuration for the route rule.
3427
sessionPersistence: {}
3428
# sessionName: route
3429
# type: Cookie
3430
# absoluteTimeout: 12h
3431
# cookieConfig:
3432
# lifetimeType: Permanent
3433
3434
## Additional custom rules that can be added to the route
3435
additionalRules: []
3436
## Configuration for creating an Ingress that will map to each Prometheus replica service
3437
## prometheus.servicePerReplica must be enabled
3438
##
3439
ingressPerReplica:
3440
enabled: false
3441
ingressClassName: ""
3442
annotations: {}
3443
labels: {}
3444
## Final form of the hostname for each per replica ingress is
3445
## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
3446
##
3447
## Prefix for the per replica ingress that will have `-$replicaNumber`
3448
## appended to the end
3449
hostPrefix: ""
3450
## Domain that will be used for the per replica ingress
3451
hostDomain: ""
3452
## Paths to use for ingress rules
3453
##
3454
paths: []
3455
# - /
3456
3457
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3458
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3459
# pathType: ImplementationSpecific
3460
3461
## Secret name containing the TLS certificate for Prometheus per replica ingress
3462
## Secret must be manually created in the namespace
3463
tlsSecretName: ""
3464
## Separated secret for each per replica Ingress. Can be used together with cert-manager
3465
##
3466
tlsSecretPerReplica:
3467
enabled: false
3468
## Final form of the secret for each per replica ingress is
3469
## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
3470
##
3471
prefix: "prometheus"
3472
serviceMonitor:
3473
## If true, create a serviceMonitor for prometheus
3474
##
3475
selfMonitor: true
3476
## Scrape interval. If not set, the Prometheus default scrape interval is used.
3477
##
3478
interval: ""
3479
## Additional labels
3480
##
3481
additionalLabels: {}
3482
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
3483
##
3484
sampleLimit: 0
3485
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
3486
##
3487
targetLimit: 0
3488
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3489
##
3490
labelLimit: 0
3491
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3492
##
3493
labelNameLengthLimit: 0
3494
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3495
##
3496
labelValueLengthLimit: 0
3497
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3498
scheme: ""
3499
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3500
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3501
tlsConfig: {}
3502
bearerTokenFile:
3503
## Metric relabel configs to apply to samples before ingestion.
3504
##
3505
metricRelabelings: []
3506
# - action: keep
3507
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
3508
# sourceLabels: [__name__]
3509
3510
# relabel configs to apply to samples before ingestion.
3511
##
3512
relabelings: []
3513
# - sourceLabels: [__meta_kubernetes_pod_node_name]
3514
# separator: ;
3515
# regex: ^(.*)$
3516
# targetLabel: nodename
3517
# replacement: $1
3518
# action: replace
3519
3520
## Additional Endpoints
3521
##
3522
additionalEndpoints: []
3523
# - port: oauth-metrics
3524
# path: /metrics
3525
## Settings affecting prometheusSpec
3526
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec
3527
##
3528
prometheusSpec:
3529
## Statefulset's persistent volume claim retention policy
3530
## whenDeleted and whenScaled determine whether
3531
## statefulset's PVCs are deleted (true) or retained (false)
3532
## on scaling down and deleting statefulset, respectively.
3533
## Requires Kubernetes version 1.27.0+.
3534
## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
3535
persistentVolumeClaimRetentionPolicy: {}
3536
# whenDeleted: Retain
3537
# whenScaled: Retain
3538
3539
## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
3540
##
3541
disableCompaction: false
3542
## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod,
3543
## If the field isn't set, the operator mounts the service account token by default.
3544
## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery,
3545
## It is possible to use strategic merge patch to project the service account token into the 'prometheus' container.
3546
automountServiceAccountToken: true
3547
## APIServerConfig
3548
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#apiserverconfig
3549
##
3550
apiserverConfig: {}
3551
## Allows setting additional arguments for the Prometheus container
3552
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3553
additionalArgs: []
3554
## Convert all classic histograms to native histograms with custom buckets.
3555
## This corresponds to the 'convert_classic_histograms_to_nhcb' field in Prometheus configuration.
3556
##
3557
convertClassicHistogramsToNHCB: false
3558
## Enable scraping of classic histograms that are also exposed as native histograms.
3559
## This corresponds to the 'always_scrape_classic_histograms' field in Prometheus configuration.
3560
##
3561
scrapeClassicHistograms: false
3562
## Enable scraping of native histograms.
3563
## This corresponds to the 'scrape_native_histograms' field in Prometheus configuration.
3564
##
3565
scrapeNativeHistograms: false
3566
## File to which scrape failures are logged.
3567
## Reloading the configuration will reopen the file.
3568
## Defaults to empty (disabled)
3569
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3570
##
3571
scrapeFailureLogFile: ""
3572
## Interval between consecutive scrapes.
3573
## Defaults to 30s.
3574
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
3575
##
3576
scrapeInterval: ""
3577
## Number of seconds to wait for target to respond before erroring
3578
##
3579
scrapeTimeout: ""
3580
## List of scrape classes to expose to scraping objects such as
3581
## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs.
3582
##
3583
scrapeClasses: []
3584
# - name: istio-mtls
3585
# default: false
3586
# tlsConfig:
3587
# caFile: /etc/prometheus/secrets/istio.default/root-cert.pem
3588
# certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem
3589
3590
## PodTargetLabels are appended to the `spec.podTargetLabels` field of all PodMonitor and ServiceMonitor objects.
3591
##
3592
podTargetLabels: []
3593
# - customlabel
3594
3595
## Interval between consecutive evaluations.
3596
##
3597
evaluationInterval: ""
3598
## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
3599
##
3600
listenLocal: false
3601
## enableOTLPReceiver enables the OTLP receiver for Prometheus.
3602
enableOTLPReceiver: false
3603
## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
3604
## This is disabled by default.
3605
## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
3606
##
3607
enableAdminAPI: false
3608
## Sets version of Prometheus overriding the Prometheus version as derived
3609
## from the image tag. Useful in cases where the tag does not follow semver v2.
3610
version: ""
3611
## WebTLSConfig defines the TLS parameters for HTTPS
3612
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#webtlsconfig
3613
web: {}
3614
## Exemplars related settings that are runtime reloadable.
3615
## It requires to enable the exemplar storage feature to be effective.
3616
exemplars: {}
3617
## Maximum number of exemplars stored in memory for all series.
3618
## If not set, Prometheus uses its default value.
3619
## A value of zero or less than zero disables the storage.
3620
# maxSize: 100000
3621
3622
# EnableFeatures API enables access to Prometheus disabled features.
3623
# ref: https://prometheus.io/docs/prometheus/latest/feature_flags/
3624
enableFeatures: []
3625
# - exemplar-storage
3626
3627
## https://prometheus.io/docs/guides/opentelemetry
3628
##
3629
otlp: {}
3630
# promoteResourceAttributes: []
3631
# keepIdentifyingResourceAttributes: false
3632
# translationStrategy: NoUTF8EscapingWithSuffixes
3633
# convertHistogramsToNHCB: false
3634
3635
##
3636
serviceName:
3637
## Image of Prometheus.
3638
##
3639
image:
3640
registry: cgr.dev
3641
repository: chainguard-private/prometheus
3642
tag: latest
3643
sha: sha256:1306477d5bcf41caf21e06401b90933497a0ae84cb181376eee8ffaebe058b2b
3644
pullPolicy: IfNotPresent
3645
## Tolerations for use with node taints
3646
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
3647
##
3648
tolerations: []
3649
# - key: "key"
3650
# operator: "Equal"
3651
# value: "value"
3652
# effect: "NoSchedule"
3653
3654
## If specified, the pod's topology spread constraints.
3655
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
3656
##
3657
topologySpreadConstraints: []
3658
# - maxSkew: 1
3659
# topologyKey: topology.kubernetes.io/zone
3660
# whenUnsatisfiable: DoNotSchedule
3661
# labelSelector:
3662
# matchLabels:
3663
# app: prometheus
3664
3665
## Disable alerting
3666
##
3667
disableAlerting: false
3668
## Alertmanagers to which alerts will be sent
3669
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerendpoints
3670
##
3671
## Default configuration will connect to the alertmanager deployed as part of this release
3672
##
3673
alertingEndpoints: []
3674
# - name: ""
3675
# namespace: ""
3676
# port: http
3677
# scheme: http
3678
# pathPrefix: ""
3679
# tlsConfig: {}
3680
# bearerTokenFile: ""
3681
# apiVersion: v2
3682
3683
## External labels to add to any time series or alerts when communicating with external systems
3684
##
3685
externalLabels: {}
3686
## enable --web.enable-remote-write-receiver flag on prometheus-server
3687
##
3688
enableRemoteWriteReceiver: false
3689
## Name of the external label used to denote replica name
3690
##
3691
replicaExternalLabelName: ""
3692
## If true, the Operator won't add the external label used to denote replica name
3693
##
3694
replicaExternalLabelNameClear: false
3695
## Name of the external label used to denote Prometheus instance name
3696
##
3697
prometheusExternalLabelName: ""
3698
## If true, the Operator won't add the external label used to denote Prometheus instance name
3699
##
3700
prometheusExternalLabelNameClear: false
3701
## External URL at which Prometheus will be reachable.
3702
##
3703
externalUrl: ""
3704
## Define which Nodes the Pods are scheduled on.
3705
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
3706
##
3707
nodeSelector: {}
3708
## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3709
## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
3710
## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
3711
## with the new list of secrets.
3712
##
3713
secrets: []
3714
## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3715
## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
3716
##
3717
configMaps: []
3718
## QuerySpec defines the query command line flags when starting Prometheus.
3719
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#queryspec
3720
##
3721
query: {}
3722
## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
3723
ruleNamespaceSelector: {}
3724
## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
3725
# ruleNamespaceSelector:
3726
# matchLabels:
3727
# prometheus: somelabel
3728
3729
## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
3730
## prometheus resource to be created with selectors based on values in the helm deployment,
3731
## which will also match the PrometheusRule resources created
3732
##
3733
ruleSelectorNilUsesHelmValues: true
3734
## PrometheusRules to be selected for target discovery.
3735
## If {}, select all PrometheusRules
3736
##
3737
ruleSelector: {}
3738
## Example which select all PrometheusRules resources
3739
## with label "prometheus" with values any of "example-rules" or "example-rules-2"
3740
# ruleSelector:
3741
# matchExpressions:
3742
# - key: prometheus
3743
# operator: In
3744
# values:
3745
# - example-rules
3746
# - example-rules-2
3747
#
3748
## Example which select all PrometheusRules resources with label "role" set to "example-rules"
3749
# ruleSelector:
3750
# matchLabels:
3751
# role: example-rules
3752
3753
## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
3754
## prometheus resource to be created with selectors based on values in the helm deployment,
3755
## which will also match the servicemonitors created
3756
##
3757
serviceMonitorSelectorNilUsesHelmValues: true
3758
## ServiceMonitors to be selected for target discovery.
3759
## If {}, select all ServiceMonitors
3760
##
3761
serviceMonitorSelector: {}
3762
## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
3763
# serviceMonitorSelector:
3764
# matchLabels:
3765
# prometheus: somelabel
3766
3767
## Namespaces to be selected for ServiceMonitor discovery.
3768
##
3769
serviceMonitorNamespaceSelector: {}
3770
## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
3771
# serviceMonitorNamespaceSelector:
3772
# matchLabels:
3773
# prometheus: somelabel
3774
3775
## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
3776
## prometheus resource to be created with selectors based on values in the helm deployment,
3777
## which will also match the podmonitors created
3778
##
3779
podMonitorSelectorNilUsesHelmValues: true
3780
## PodMonitors to be selected for target discovery.
3781
## If {}, select all PodMonitors
3782
##
3783
podMonitorSelector: {}
3784
## Example which selects PodMonitors with label "prometheus" set to "somelabel"
3785
# podMonitorSelector:
3786
# matchLabels:
3787
# prometheus: somelabel
3788
3789
## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
3790
podMonitorNamespaceSelector: {}
3791
## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
3792
# podMonitorNamespaceSelector:
3793
# matchLabels:
3794
# prometheus: somelabel
3795
3796
## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
3797
## prometheus resource to be created with selectors based on values in the helm deployment,
3798
## which will also match the probes created
3799
##
3800
probeSelectorNilUsesHelmValues: true
3801
## Probes to be selected for target discovery.
3802
## If {}, select all Probes
3803
##
3804
probeSelector: {}
3805
## Example which selects Probes with label "prometheus" set to "somelabel"
3806
# probeSelector:
3807
# matchLabels:
3808
# prometheus: somelabel
3809
3810
## If nil, select own namespace. Namespaces to be selected for Probe discovery.
3811
probeNamespaceSelector: {}
3812
## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
3813
# probeNamespaceSelector:
3814
# matchLabels:
3815
# prometheus: somelabel
3816
3817
## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
3818
## prometheus resource to be created with selectors based on values in the helm deployment,
3819
## which will also match the scrapeConfigs created
3820
##
3821
## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec
3822
## (keeping downward compatibility with older versions of CRD)
3823
##
3824
scrapeConfigSelectorNilUsesHelmValues: true
3825
## scrapeConfigs to be selected for target discovery.
3826
## If {}, select all scrapeConfigs
3827
##
3828
scrapeConfigSelector: {}
3829
## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
3830
# scrapeConfigSelector:
3831
# matchLabels:
3832
# prometheus: somelabel
3833
3834
## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
3835
## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD)
3836
scrapeConfigNamespaceSelector: {}
3837
## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
3838
# scrapeConfigNamespaceSelector:
3839
# matchLabels:
3840
# prometheus: somelabel
3841
3842
## How long to retain metrics
3843
##
3844
retention: 10d
3845
## Maximum size of metrics
3846
## Unit format should be in the form of "50GiB"
3847
retentionSize: ""
3848
## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
3849
## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
3850
tsdb:
3851
outOfOrderTimeWindow: 0s
3852
## Enable compression of the write-ahead log using Snappy.
3853
##
3854
walCompression: true
3855
## If true, the Operator won't process any Prometheus configuration changes
3856
##
3857
paused: false
3858
## Number of replicas of each shard to deploy for a Prometheus deployment.
3859
## Number of replicas multiplied by shards is the total number of Pods created.
3860
##
3861
replicas: 1
3862
## EXPERIMENTAL: Number of shards to distribute targets onto.
3863
## Number of replicas multiplied by shards is the total number of Pods created.
3864
## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
3865
## Increasing shards will not reshard data either but it will continue to be available from the same instances.
3866
## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
3867
## Sharding is done on the content of the `__address__` target meta-label.
3868
##
3869
shards: 1
3870
## Log level for Prometheus be configured in
3871
##
3872
logLevel: info
3873
## Log format for Prometheus be configured in
3874
##
3875
logFormat: logfmt
3876
## Prefix used to register routes, overriding externalUrl route.
3877
## Useful for proxies that rewrite URLs.
3878
##
3879
routePrefix: /
3880
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
3881
## Metadata Labels and Annotations gets propagated to the prometheus pods.
3882
##
3883
podMetadata: {}
3884
# labels:
3885
# app: prometheus
3886
# k8s-app: prometheus
3887
3888
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
3889
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
3890
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
3891
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
3892
podAntiAffinity: "soft"
3893
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
3894
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
3895
##
3896
podAntiAffinityTopologyKey: kubernetes.io/hostname
3897
## Assign custom affinity rules to the prometheus instance
3898
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
3899
##
3900
affinity: {}
3901
# nodeAffinity:
3902
# requiredDuringSchedulingIgnoredDuringExecution:
3903
# nodeSelectorTerms:
3904
# - matchExpressions:
3905
# - key: kubernetes.io/e2e-az-name
3906
# operator: In
3907
# values:
3908
# - e2e-az1
3909
# - e2e-az2
3910
3911
## The remote_read spec configuration for Prometheus.
3912
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotereadspec
3913
remoteRead: []
3914
# - url: http://remote1/read
3915
## additionalRemoteRead is appended to remoteRead
3916
additionalRemoteRead: []
3917
## The remote_write spec configuration for Prometheus.
3918
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotewritespec
3919
remoteWrite: []
3920
# - url: http://remote1/push
3921
## additionalRemoteWrite is appended to remoteWrite
3922
additionalRemoteWrite: []
3923
## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
3924
remoteWriteDashboards: false
3925
## Resource limits & requests
3926
##
3927
resources: {}
3928
# requests:
3929
# memory: 400Mi
3930
3931
## Prometheus StorageSpec for persistent data
3932
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
3933
##
3934
storageSpec: {}
3935
## Using PersistentVolumeClaim
3936
##
3937
# volumeClaimTemplate:
3938
# spec:
3939
# storageClassName: gluster
3940
# accessModes: ["ReadWriteOnce"]
3941
# resources:
3942
# requests:
3943
# storage: 50Gi
3944
# selector: {}
3945
3946
## Using tmpfs volume
3947
##
3948
# emptyDir:
3949
# medium: Memory
3950
3951
# Additional volumes on the output StatefulSet definition.
3952
volumes: []
3953
# Additional VolumeMounts on the output StatefulSet definition.
3954
volumeMounts: []
3955
## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
3956
## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
3957
## as specified in the official Prometheus documentation:
3958
## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
3959
## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
3960
## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
3961
## scrape configs are going to break Prometheus after the upgrade.
3962
## AdditionalScrapeConfigs can be defined as a list or as a templated string.
3963
##
3964
## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
3965
## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
3966
##
3967
additionalScrapeConfigs: []
3968
# - job_name: kube-etcd
3969
# kubernetes_sd_configs:
3970
# - role: node
3971
# scheme: https
3972
# tls_config:
3973
# ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
3974
# cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
3975
# key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
3976
# relabel_configs:
3977
# - action: labelmap
3978
# regex: __meta_kubernetes_node_label_(.+)
3979
# - source_labels: [__address__]
3980
# action: replace
3981
# target_label: __address__
3982
# regex: ([^:;]+):(\d+)
3983
# replacement: ${1}:2379
3984
# - source_labels: [__meta_kubernetes_node_name]
3985
# action: keep
3986
# regex: .*mst.*
3987
# - source_labels: [__meta_kubernetes_node_name]
3988
# action: replace
3989
# target_label: node
3990
# regex: (.*)
3991
# replacement: ${1}
3992
# metric_relabel_configs:
3993
# - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
3994
# action: labeldrop
3995
#
3996
## If scrape config contains a repetitive section, you may want to use a template.
3997
## In the following example, you can see how to define `gce_sd_configs` for multiple zones
3998
# additionalScrapeConfigs: |
3999
# - job_name: "node-exporter"
4000
# gce_sd_configs:
4001
# {{range $zone := .Values.gcp_zones}}
4002
# - project: "project1"
4003
# zone: "{{$zone}}"
4004
# port: 9100
4005
# {{end}}
4006
# relabel_configs:
4007
# ...
4008
4009
## If additional scrape configurations are already deployed in a single secret file you can use this section.
4010
## Expected values are the secret name and key
4011
## Cannot be used with additionalScrapeConfigs
4012
additionalScrapeConfigsSecret: {}
4013
# enabled: false
4014
# name:
4015
# key:
4016
4017
## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
4018
## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
4019
additionalPrometheusSecretsAnnotations: {}
4020
## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
4021
## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config.
4022
## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
4023
## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
4024
## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
4025
## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
4026
##
4027
additionalAlertManagerConfigs: []
4028
# - consul_sd_configs:
4029
# - server: consul.dev.test:8500
4030
# scheme: http
4031
# datacenter: dev
4032
# tag_separator: ','
4033
# services:
4034
# - metrics-prometheus-alertmanager
4035
4036
## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
4037
## them separately from the helm deployment, you can use this section.
4038
## Expected values are the secret name and key
4039
## Cannot be used with additionalAlertManagerConfigs
4040
additionalAlertManagerConfigsSecret: {}
4041
# name:
4042
# key:
4043
# optional: false
4044
4045
## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
4046
## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
4047
## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
4048
## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
4049
## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
4050
## configs are going to break Prometheus after the upgrade.
4051
##
4052
additionalAlertRelabelConfigs: []
4053
# - separator: ;
4054
# regex: prometheus_replica
4055
# replacement: $1
4056
# action: labeldrop
4057
4058
## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
4059
## them separately from the helm deployment, you can use this section.
4060
## Expected values are the secret name and key
4061
## Cannot be used with additionalAlertRelabelConfigs
4062
additionalAlertRelabelConfigsSecret: {}
4063
# name:
4064
# key:
4065
4066
## SecurityContext holds pod-level security attributes and common container settings.
4067
## This defaults to non root user with uid 1000 and gid 2000.
4068
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md
4069
##
4070
securityContext:
4071
runAsGroup: 2000
4072
runAsNonRoot: true
4073
runAsUser: 1000
4074
fsGroup: 2000
4075
seccompProfile:
4076
type: RuntimeDefault
4077
## DNS configuration for Prometheus.
4078
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
4079
dnsConfig: {}
4080
## DNS policy for Prometheus.
4081
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
4082
dnsPolicy: ""
4083
## Priority class assigned to the Pods
4084
##
4085
priorityClassName: ""
4086
## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
4087
## This section is experimental, it may change significantly without deprecation notice in any release.
4088
## This is experimental and may change significantly without backward compatibility in any release.
4089
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosspec
4090
##
4091
thanos: {}
4092
# image: quay.io/thanos/thanos
4093
# secretProviderClass:
4094
# provider: gcp
4095
# parameters:
4096
# secrets: |
4097
# - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
4098
# fileName: "objstore.yaml"
4099
## ObjectStorageConfig configures object storage in Thanos.
4100
# objectStorageConfig:
4101
# # use existing secret, if configured, objectStorageConfig.secret will not be used
4102
# existingSecret: {}
4103
# # name: ""
4104
# # key: ""
4105
# # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource,
4106
# # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set
4107
# # https://thanos.io/tip/thanos/storage.md/#s3
4108
# secret: {}
4109
# # type: S3
4110
# # config:
4111
# # bucket: ""
4112
# # endpoint: ""
4113
# # region: ""
4114
# # access_key: ""
4115
# # secret_key: ""
4116
4117
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
4118
## if using proxy extraContainer update targetPort with proxy container port
4119
containers: []
4120
# containers:
4121
# - name: oauth-proxy
4122
# image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
4123
# args:
4124
# - --upstream=http://127.0.0.1:9090
4125
# - --http-address=0.0.0.0:8081
4126
# - --metrics-address=0.0.0.0:8082
4127
# - ...
4128
# ports:
4129
# - containerPort: 8081
4130
# name: oauth-proxy
4131
# protocol: TCP
4132
# - containerPort: 8082
4133
# name: oauth-metrics
4134
# protocol: TCP
4135
# resources: {}
4136
4137
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4138
## (permissions, dir tree) on mounted volumes before starting prometheus
4139
initContainers: []
4140
## PortName to use for Prometheus.
4141
##
4142
portName: "http-web"
4143
## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
4144
## on the file system of the Prometheus container e.g. bearer token files.
4145
arbitraryFSAccessThroughSMs: false
4146
## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
4147
## or PodMonitor to true, this overrides honor_labels to false.
4148
overrideHonorLabels: false
4149
## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
4150
overrideHonorTimestamps: false
4151
## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored,
4152
## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object,
4153
## and servicemonitors will be installed in the default service namespace.
4154
## Defaults to false.
4155
ignoreNamespaceSelectors: false
4156
## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
4157
## The label value will always be the namespace of the object that is being created.
4158
## Disabled by default
4159
enforcedNamespaceLabel: ""
4160
## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
4161
## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
4162
## Deprecated, use `excludedFromEnforcement` instead
4163
prometheusRulesExcludedFromEnforce: []
4164
## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
4165
## to be excluded from enforcing a namespace label of origin.
4166
## Works only if enforcedNamespaceLabel set to true.
4167
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#objectreference
4168
excludedFromEnforcement: []
4169
## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
4170
## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
4171
## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
4172
## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
4173
queryLogFile: false
4174
# Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor.
4175
# Set to 'false' to disable global sample_limit. or set to a number to override the default value.
4176
sampleLimit: false
4177
# EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory.
4178
# The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets
4179
# is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit.
4180
enforcedKeepDroppedTargets: 0
4181
## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
4182
## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
4183
## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
4184
enforcedSampleLimit: false
4185
## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
4186
## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
4187
## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
4188
## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
4189
enforcedTargetLimit: false
4190
## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
4191
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4192
## 2.27.0 and newer.
4193
enforcedLabelLimit: false
4194
## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
4195
## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4196
## 2.27.0 and newer.
4197
enforcedLabelNameLengthLimit: false
4198
## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
4199
## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
4200
## versions 2.27.0 and newer.
4201
enforcedLabelValueLengthLimit: false
4202
## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
4203
## in Prometheus so it may change in any upcoming release.
4204
allowOverlappingBlocks: false
4205
## Specifies the validation scheme for metric and label names.
4206
## Supported values are: Legacy, UTF8
4207
nameValidationScheme: ""
4208
## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
4209
## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
4210
minReadySeconds: 0
4211
## Duration in seconds the pod needs to terminate gracefully.
4212
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4213
terminationGracePeriodSeconds: ~
4214
# Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
4215
# because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
4216
# Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
4217
# When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
4218
hostNetwork: false
4219
## Use the host's user namespace for Prometheus pods.
4220
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4221
hostUsers: ~
4222
# HostAlias holds the mapping between IP and hostnames that will be injected
4223
# as an entry in the pod's hosts file.
4224
hostAliases: []
4225
# - ip: 10.10.0.100
4226
# hostnames:
4227
# - a1.app.local
4228
# - b1.app.local
4229
4230
## TracingConfig configures tracing in Prometheus.
4231
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheustracingconfig
4232
tracingConfig: {}
4233
## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints.
4234
## If set, the value should be either "Endpoints" or "EndpointSlice". If unset, the operator assumes the "Endpoints" role.
4235
serviceDiscoveryRole: ""
4236
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4237
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4238
podManagementPolicy: ""
4239
## Update strategy for the StatefulSet.
4240
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4241
updateStrategy: {}
4242
# type: RollingUpdate
4243
# rollingUpdate:
4244
# maxUnavailable: 1
4245
4246
## Additional configuration which is not covered by the properties above. (passed through tpl)
4247
additionalConfig: {}
4248
## Additional configuration which is not covered by the properties above.
4249
## Useful, if you need advanced templating inside alertmanagerSpec.
4250
## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl)
4251
additionalConfigString: ""
4252
## Defines the maximum time that the `prometheus` container's startup probe
4253
## will wait before being considered failed. The startup probe will return
4254
## success after the WAL replay is complete. If set, the value should be
4255
## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15
4256
## minutes).
4257
maximumStartupDurationSeconds: 0
4258
## Set default scrapeProtocols for Prometheus instances
4259
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias
4260
scrapeProtocols: []
4261
additionalRulesForClusterRole: []
4262
# - apiGroups: [ "" ]
4263
# resources:
4264
# - nodes/proxy
4265
# verbs: [ "get", "list", "watch" ]
4266
4267
additionalServiceMonitors: []
4268
## Name of the ServiceMonitor to create
4269
##
4270
# - name: ""
4271
4272
## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
4273
## the chart
4274
##
4275
# additionalLabels: {}
4276
4277
## Service label for use in assembling a job name of the form <label value>-<port>
4278
## If no label is specified, the service name is used.
4279
##
4280
# jobLabel: ""
4281
4282
## labels to transfer from the kubernetes service to the target
4283
##
4284
# targetLabels: []
4285
4286
## labels to transfer from the kubernetes pods to the target
4287
##
4288
# podTargetLabels: []
4289
4290
## Label selector for services to which this ServiceMonitor applies
4291
##
4292
# selector: {}
4293
## Example which selects all services to be monitored
4294
## with label "monitoredby" with values any of "example-service-1" or "example-service-2"
4295
# matchExpressions:
4296
# - key: "monitoredby"
4297
# operator: In
4298
# values:
4299
# - example-service-1
4300
# - example-service-2
4301
4302
## label selector for services
4303
##
4304
# matchLabels: {}
4305
4306
## Namespaces from which services are selected
4307
##
4308
# namespaceSelector:
4309
## Match any namespace
4310
##
4311
# any: false
4312
4313
## Explicit list of namespace names to select
4314
##
4315
# matchNames: []
4316
4317
## Endpoints of the selected service to be monitored
4318
##
4319
# endpoints: []
4320
## Name of the endpoint's service port
4321
## Mutually exclusive with targetPort
4322
# - port: ""
4323
4324
## Name or number of the endpoint's target port
4325
## Mutually exclusive with port
4326
# - targetPort: ""
4327
4328
## File containing bearer token to be used when scraping targets
4329
##
4330
# bearerTokenFile: ""
4331
4332
## Interval at which metrics should be scraped
4333
##
4334
# interval: 30s
4335
4336
## HTTP path to scrape for metrics
4337
##
4338
# path: /metrics
4339
4340
## HTTP scheme to use for scraping
4341
##
4342
# scheme: http
4343
4344
## TLS configuration to use when scraping the endpoint
4345
##
4346
# tlsConfig:
4347
4348
## Path to the CA file
4349
##
4350
# caFile: ""
4351
4352
## Path to client certificate file
4353
##
4354
# certFile: ""
4355
4356
## Skip certificate verification
4357
##
4358
# insecureSkipVerify: false
4359
4360
## Path to client key file
4361
##
4362
# keyFile: ""
4363
4364
## Server name used to verify host name
4365
##
4366
# serverName: ""
4367
4368
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4369
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4370
##
4371
# metricRelabelings: []
4372
# - action: keep
4373
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4374
# sourceLabels: [__name__]
4375
4376
## RelabelConfigs to apply to samples before scraping
4377
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4378
##
4379
# relabelings: []
4380
# - sourceLabels: [__meta_kubernetes_pod_node_name]
4381
# separator: ;
4382
# regex: ^(.*)$
4383
# targetLabel: nodename
4384
# replacement: $1
4385
# action: replace
4386
4387
## Fallback scrape protocol used by Prometheus for scraping metrics
4388
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4389
##
4390
# fallbackScrapeProtocol: ""
4391
4392
## Attaches node metadata to the discovered targets
4393
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4394
##
4395
# attachMetadata:
4396
# node: true
4397
additionalPodMonitors: []
4398
## Name of the PodMonitor to create
4399
##
4400
# - name: ""
4401
## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
4402
## the chart
4403
##
4404
# additionalLabels: {}
4405
4406
## Pod label for use in assembling a job name of the form <label value>-<port>
4407
## If no label is specified, the pod endpoint name is used.
4408
##
4409
# jobLabel: ""
4410
4411
## Label selector for pods to which this PodMonitor applies
4412
##
4413
# selector: {}
4414
## Example which selects all Pods to be monitored
4415
## with label "monitoredby" with values any of "example-pod-1" or "example-pod-2"
4416
# matchExpressions:
4417
# - key: "monitoredby"
4418
# operator: In
4419
# values:
4420
# - example-pod-1
4421
# - example-pod-2
4422
4423
## label selector for pods
4424
##
4425
# matchLabels: {}
4426
4427
## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
4428
##
4429
# podTargetLabels: {}
4430
4431
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4432
##
4433
# sampleLimit: 0
4434
4435
## Namespaces from which pods are selected
4436
##
4437
# namespaceSelector:
4438
## Match any namespace
4439
##
4440
# any: false
4441
4442
## Explicit list of namespace names to select
4443
##
4444
# matchNames: []
4445
4446
## Endpoints of the selected pods to be monitored
4447
## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmetricsendpoint
4448
##
4449
# podMetricsEndpoints: []
4450
4451
## Fallback scrape protocol used by Prometheus for scraping metrics
4452
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4453
##
4454
# fallbackScrapeProtocol: ""
4455
4456
## Attaches node metadata to the discovered targets
4457
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4458
##
4459
# attachMetadata:
4460
# node: true
4461
4462
## Configuration for thanosRuler
4463
## ref: https://thanos.io/tip/components/rule.md/
4464
##
4465
thanosRuler:
4466
## Deploy thanosRuler
4467
##
4468
enabled: false
4469
## Annotations for ThanosRuler
4470
##
4471
annotations: {}
4472
## Service account for ThanosRuler to use.
4473
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
4474
##
4475
serviceAccount:
4476
create: true
4477
name: ""
4478
annotations: {}
4479
## Configure pod disruption budgets for ThanosRuler
4480
## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
4481
##
4482
podDisruptionBudget:
4483
enabled: false
4484
minAvailable: 1
4485
# maxUnavailable: ""
4486
unhealthyPodEvictionPolicy: AlwaysAllow
4487
ingress:
4488
enabled: false
4489
ingressClassName: ""
4490
annotations: {}
4491
labels: {}
4492
## Hosts must be provided if Ingress is enabled.
4493
##
4494
hosts: []
4495
# - thanosruler.domain.com
4496
4497
## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
4498
##
4499
paths: []
4500
# - /
4501
4502
## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
4503
## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
4504
# pathType: ImplementationSpecific
4505
4506
## TLS configuration for ThanosRuler Ingress
4507
## Secret must be manually created in the namespace
4508
##
4509
tls: []
4510
# - secretName: thanosruler-general-tls
4511
# hosts:
4512
# - thanosruler.example.com
4513
# -- BETA: Configure the gateway routes for the chart here.
4514
# More routes can be added by adding a dictionary key like the 'main' route.
4515
# Be aware that this is an early beta of this feature,
4516
# kube-prometheus-stack does not guarantee this works and is subject to change.
4517
# Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
4518
# [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
4519
route:
4520
main:
4521
# -- Enables or disables the route
4522
enabled: false
4523
# -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
4524
apiVersion: gateway.networking.k8s.io/v1
4525
# -- Set the route kind
4526
# Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
4527
kind: HTTPRoute
4528
annotations: {}
4529
labels: {}
4530
hostnames: []
4531
# - my-filter.example.com
4532
parentRefs: []
4533
# - name: acme-gw
4534
4535
# -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
4536
## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
4537
## matches, filters and additionalRules will be ignored if this is set to true. Be are
4538
httpsRedirect: false
4539
matches:
4540
- path:
4541
type: PathPrefix
4542
value: /
4543
## Filters define the filters that are applied to requests that match this rule.
4544
filters: []
4545
## Session persistence configuration for the route rule.
4546
sessionPersistence: {}
4547
# sessionName: route
4548
# type: Cookie
4549
# absoluteTimeout: 12h
4550
# cookieConfig:
4551
# lifetimeType: Permanent
4552
4553
## Additional custom rules that can be added to the route
4554
additionalRules: []
4555
## Configuration for ThanosRuler service
4556
##
4557
service:
4558
enabled: true
4559
annotations: {}
4560
labels: {}
4561
clusterIP: ""
4562
ipDualStack:
4563
enabled: false
4564
ipFamilies: ["IPv6", "IPv4"]
4565
ipFamilyPolicy: "PreferDualStack"
4566
## Port for ThanosRuler Service to listen on
4567
##
4568
port: 10902
4569
## To be used with a proxy extraContainer port
4570
##
4571
targetPort: 10902
4572
## Port to expose on each node
4573
## Only used if service.type is 'NodePort'
4574
##
4575
nodePort: 30905
4576
## List of IP addresses at which the Prometheus server service is available
4577
## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
4578
##
4579
4580
## Additional ports to open for ThanosRuler service
4581
additionalPorts: []
4582
externalIPs: []
4583
loadBalancerIP: ""
4584
loadBalancerSourceRanges: []
4585
## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
4586
##
4587
externalTrafficPolicy: Cluster
4588
## Service type
4589
##
4590
type: ClusterIP
4591
## Configuration for creating a ServiceMonitor for the ThanosRuler service
4592
##
4593
serviceMonitor:
4594
## If true, create a serviceMonitor for thanosRuler
4595
##
4596
selfMonitor: true
4597
## Scrape interval. If not set, the Prometheus default scrape interval is used.
4598
##
4599
interval: ""
4600
## Additional labels
4601
##
4602
additionalLabels: {}
4603
## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4604
##
4605
sampleLimit: 0
4606
## TargetLimit defines a limit on the number of scraped targets that will be accepted.
4607
##
4608
targetLimit: 0
4609
## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4610
##
4611
labelLimit: 0
4612
## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4613
##
4614
labelNameLengthLimit: 0
4615
## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4616
##
4617
labelValueLengthLimit: 0
4618
## proxyUrl: URL of a proxy that should be used for scraping.
4619
##
4620
proxyUrl: ""
4621
## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
4622
scheme: ""
4623
## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
4624
## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
4625
tlsConfig: {}
4626
bearerTokenFile:
4627
## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4628
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4629
##
4630
metricRelabelings: []
4631
# - action: keep
4632
# regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4633
# sourceLabels: [__name__]
4634
4635
## RelabelConfigs to apply to samples before scraping
4636
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4637
##
4638
relabelings: []
4639
# - sourceLabels: [__meta_kubernetes_pod_node_name]
4640
# separator: ;
4641
# regex: ^(.*)$
4642
# targetLabel: nodename
4643
# replacement: $1
4644
# action: replace
4645
4646
## Additional Endpoints
4647
##
4648
additionalEndpoints: []
4649
# - port: oauth-metrics
4650
# path: /metrics
4651
## Settings affecting thanosRulerpec
4652
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerspec
4653
##
4654
thanosRulerSpec:
4655
## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
4656
## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
4657
##
4658
podMetadata: {}
4659
##
4660
serviceName:
4661
## Image of ThanosRuler
4662
##
4663
image:
4664
registry: cgr.dev
4665
repository: chainguard-private/thanos
4666
tag: latest
4667
sha: sha256:91616ecf31235c2f626295c55d11389fcdcfb2b1f817099a9d7460e7765bd183
4668
## Namespaces to be selected for PrometheusRules discovery.
4669
## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
4670
## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#namespaceselector for usage
4671
##
4672
ruleNamespaceSelector: {}
4673
## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
4674
## prometheus resource to be created with selectors based on values in the helm deployment,
4675
## which will also match the PrometheusRule resources created
4676
##
4677
ruleSelectorNilUsesHelmValues: true
4678
## PrometheusRules to be selected for target discovery.
4679
## If {}, select all PrometheusRules
4680
##
4681
ruleSelector: {}
4682
## Example which select all PrometheusRules resources
4683
## with label "prometheus" with values any of "example-rules" or "example-rules-2"
4684
# ruleSelector:
4685
# matchExpressions:
4686
# - key: prometheus
4687
# operator: In
4688
# values:
4689
# - example-rules
4690
# - example-rules-2
4691
#
4692
## Example which select all PrometheusRules resources with label "role" set to "example-rules"
4693
# ruleSelector:
4694
# matchLabels:
4695
# role: example-rules
4696
4697
## Define Log Format
4698
# Use logfmt (default) or json logging
4699
logFormat: logfmt
4700
## Log level for ThanosRuler to be configured with.
4701
##
4702
logLevel: info
4703
## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
4704
## running cluster equal to the expected size.
4705
replicas: 1
4706
## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
4707
## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
4708
##
4709
retention: 24h
4710
## Interval between consecutive evaluations.
4711
##
4712
evaluationInterval: ""
4713
## Storage is the definition of how storage will be used by the ThanosRuler instances.
4714
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
4715
##
4716
storage: {}
4717
# volumeClaimTemplate:
4718
# spec:
4719
# storageClassName: gluster
4720
# accessModes: ["ReadWriteOnce"]
4721
# resources:
4722
# requests:
4723
# storage: 50Gi
4724
# selector: {}
4725
4726
## AlertmanagerConfig define configuration for connecting to alertmanager.
4727
## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
4728
alertmanagersConfig:
4729
# use existing secret, if configured, alertmanagersConfig.secret will not be used
4730
existingSecret: {}
4731
# name: ""
4732
# key: ""
4733
# will render alertmanagersConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when alertmanagersConfig.existingSecret is set
4734
# https://thanos.io/tip/components/rule.md/#alertmanager
4735
secret: {}
4736
# alertmanagers:
4737
# - api_version: v2
4738
# http_config:
4739
# basic_auth:
4740
# username: some_user
4741
# password: some_pass
4742
# static_configs:
4743
# - alertmanager.thanos.io
4744
# scheme: http
4745
# timeout: 10s
4746
## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
4747
## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
4748
# alertmanagersUrl:
4749
4750
## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
4751
##
4752
externalPrefix:
4753
## If true, http://{{ template "kube-prometheus-stack.thanosRuler.name" . }}.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }}
4754
## will be used as value for externalPrefix
4755
externalPrefixNilUsesHelmValues: true
4756
## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
4757
## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
4758
##
4759
routePrefix: /
4760
## ObjectStorageConfig configures object storage in Thanos
4761
objectStorageConfig:
4762
# use existing secret, if configured, objectStorageConfig.secret will not be used
4763
existingSecret: {}
4764
# name: ""
4765
# key: ""
4766
# will render objectStorageConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when objectStorageConfig.existingSecret is set
4767
# https://thanos.io/tip/thanos/storage.md/#s3
4768
secret: {}
4769
# type: S3
4770
# config:
4771
# bucket: ""
4772
# endpoint: ""
4773
# region: ""
4774
# access_key: ""
4775
# secret_key: ""
4776
## Labels by name to drop before sending to alertmanager
4777
## Maps to the --alert.label-drop flag of thanos ruler.
4778
alertDropLabels: []
4779
## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
4780
## Maps to the --query flag of thanos ruler.
4781
queryEndpoints: []
4782
## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
4783
## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
4784
queryConfig:
4785
# use existing secret, if configured, queryConfig.secret will not be used
4786
existingSecret: {}
4787
# name: ""
4788
# key: ""
4789
# render queryConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when queryConfig.existingSecret is set
4790
# https://thanos.io/tip/components/rule.md/#query-api
4791
secret: {}
4792
# - http_config:
4793
# basic_auth:
4794
# username: some_user
4795
# password: some_pass
4796
# static_configs:
4797
# - URL
4798
# scheme: http
4799
# timeout: 10s
4800
## Labels configure the external label pairs to ThanosRuler. A default replica
4801
## label `thanos_ruler_replica` will be always added as a label with the value
4802
## of the pod's name and it will be dropped in the alerts.
4803
labels: {}
4804
## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
4805
##
4806
paused: false
4807
## Allows setting additional arguments for the ThanosRuler container
4808
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosruler
4809
##
4810
additionalArgs: []
4811
# - name: remote-write.config
4812
# value: |-
4813
# "remote_write":
4814
# - "name": "receiver-0"
4815
# "remote_timeout": "30s"
4816
# "url": "http://thanos-receiver-0.thanos-receiver:8081/api/v1/receive"
4817
4818
## Define which Nodes the Pods are scheduled on.
4819
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
4820
##
4821
nodeSelector: {}
4822
## Define resources requests and limits for single Pods.
4823
## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
4824
##
4825
resources: {}
4826
# requests:
4827
# memory: 400Mi
4828
4829
## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
4830
## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
4831
## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
4832
## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
4833
##
4834
podAntiAffinity: "soft"
4835
## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
4836
## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
4837
##
4838
podAntiAffinityTopologyKey: kubernetes.io/hostname
4839
## Assign custom affinity rules to the thanosRuler instance
4840
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
4841
##
4842
affinity: {}
4843
# nodeAffinity:
4844
# requiredDuringSchedulingIgnoredDuringExecution:
4845
# nodeSelectorTerms:
4846
# - matchExpressions:
4847
# - key: kubernetes.io/e2e-az-name
4848
# operator: In
4849
# values:
4850
# - e2e-az1
4851
# - e2e-az2
4852
4853
## If specified, the pod's tolerations.
4854
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
4855
##
4856
tolerations: []
4857
# - key: "key"
4858
# operator: "Equal"
4859
# value: "value"
4860
# effect: "NoSchedule"
4861
4862
## If specified, the pod's topology spread constraints.
4863
## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
4864
##
4865
topologySpreadConstraints: []
4866
# - maxSkew: 1
4867
# topologyKey: topology.kubernetes.io/zone
4868
# whenUnsatisfiable: DoNotSchedule
4869
# labelSelector:
4870
# matchLabels:
4871
# app: thanos-ruler
4872
4873
## SecurityContext holds pod-level security attributes and common container settings.
4874
## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
4875
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
4876
##
4877
securityContext:
4878
runAsGroup: 2000
4879
runAsNonRoot: true
4880
runAsUser: 1000
4881
fsGroup: 2000
4882
seccompProfile:
4883
type: RuntimeDefault
4884
## Use the host's user namespace for ThanosRuler pods.
4885
## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4886
hostUsers: ~
4887
## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
4888
## Note this is only for the ThanosRuler UI, not the gossip communication.
4889
##
4890
listenLocal: false
4891
## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
4892
##
4893
containers: []
4894
# Additional volumes on the output StatefulSet definition.
4895
volumes: []
4896
# Additional VolumeMounts on the output StatefulSet definition.
4897
volumeMounts: []
4898
## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4899
## (permissions, dir tree) on mounted volumes before starting prometheus
4900
initContainers: []
4901
## Priority class assigned to the Pods
4902
##
4903
priorityClassName: ""
4904
## PortName to use for ThanosRuler.
4905
##
4906
portName: "web"
4907
## Duration in seconds the pod needs to terminate gracefully.
4908
## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4909
terminationGracePeriodSeconds: ~
4910
## WebTLSConfig defines the TLS parameters for HTTPS
4911
## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerwebspec
4912
web: {}
4913
## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4914
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4915
podManagementPolicy: ""
4916
## Update strategy for the StatefulSet.
4917
## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4918
updateStrategy: {}
4919
# type: RollingUpdate
4920
# rollingUpdate:
4921
# maxUnavailable: 1
4922
4923
## Additional configuration which is not covered by the properties above. (passed through tpl)
4924
additionalConfig: {}
4925
## Additional configuration which is not covered by the properties above.
4926
## Useful, if you need advanced templating
4927
additionalConfigString: ""
4928
## ExtraSecret can be used to store various data in an extra secret
4929
## (use it for example to store hashed basic auth credentials)
4930
extraSecret:
4931
## if not set, name will be auto generated
4932
# name: ""
4933
annotations: {}
4934
data: {}
4935
# auth: |
4936
# foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
4937
# someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
4938
## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
4939
##
4940
cleanPrometheusOperatorObjectNames: false
4941
## Extra manifests to deploy. Can be of type dict or list.
4942
## If dict, keys are ignored and only values are used.
4943
## Items contained within extraObjects can be defined as dict or string and are passed through tpl.
4944
extraManifests: null
4945
# - apiVersion: v1
4946
# kind: ConfigMap
4947
# metadata:
4948
# labels:
4949
# name: prometheus-extra
4950
# data:
4951
# extra-data: "value"
4952
#
4953
# can also be defined as a string, useful for templating field names
4954
# - |
4955
# apiVersion: v1
4956
# kind: Secret
4957
# type: Opaque
4958
# metadata:
4959
# name: super-secret
4960
# labels:
4961
# {{- range $key, $value := .Values.commonLabels }}
4962
# {{ $key }}: {{ $value }}
4963
# {{- end }}
4964
# data:
4965
# plaintext: Zm9vYmFy
4966
# templated: '{{ print "foobar" | upper | b64enc }}'
4967

The trusted source for open source

Talk to an expert
PrivacyTerms

Product

Chainguard ContainersChainguard LibrariesChainguard VMsChainguard OS PackagesChainguard ActionsChainguard Agent SkillsIntegrationsPricing
© 2026 Chainguard, Inc. All Rights Reserved.
Chainguard® and the Chainguard logo are registered trademarks of Chainguard, Inc. in the United States and/or other countries.
The other respective trademarks mentioned on this page are owned by the respective companies and use of them does not imply any affiliation or endorsement.