1# Default values for kube-prometheus-stack.
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
5## Provide a name in place of kube-prometheus-stack for `app:` labels
8## Override the deployment namespace
11## Provide a k8s version to auto dashboard import script example: kubeTargetVersionOverride: 1.26.6
13kubeTargetVersionOverride: ""
14## Allow kubeVersion to be overridden while creating the ingress
16kubeVersionOverride: ""
17## Provide a name to substitute for the full names of resources
20## Labels to apply to all resources
26## Install Prometheus Operator CRDs
30 ## The CRD upgrade job mitigates the limitation of helm not being able to upgrade CRDs.
31 ## The job will apply the CRDs to the cluster before the operator is deployed, using helm hooks.
32 ## It deploys a corresponding clusterrole, clusterrolebinding and serviceaccount to apply the CRDs.
33 ## This feature is in preview, off by default and may change in the future.
40 repository: chainguard-private/busybox
42 sha: sha256:a4df82542624593a943071c90310653381295bb95494ff58a4650101aefeafaf
43 pullPolicy: IfNotPresent
46 repository: chainguard-private/kubectl
48 sha: sha256:2ad180bbbcc8d809f3a9ab75202adeddec89ee5554a46aff8ed5d0429f18a151
49 pullPolicy: IfNotPresent
51 ## Define resources requests and limits for single Pods.
52 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
58 ## Additional volume mounts
61 ## Define which Nodes the Pods are scheduled on.
62 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
65 ## Assign custom affinity rules to the upgrade-crd job
66 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
70 # requiredDuringSchedulingIgnoredDuringExecution:
73 # - key: kubernetes.io/e2e-az-name
79 ## If specified, the pod's tolerations.
80 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
86 # effect: "NoSchedule"
88 ## If specified, the pod's topology spread constraints.
89 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
91 topologySpreadConstraints: []
93 # topologyKey: topology.kubernetes.io/zone
94 # whenUnsatisfiable: DoNotSchedule
99 # ## Labels to add to the upgrade-crd job
102 ## Annotations to add to the upgrade-crd job
105 ## Labels to add to the upgrade-crd pod
108 ## Annotations to add to the upgrade-crd pod
111 ## Service account for upgrade crd job to use.
112 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
119 automountServiceAccountToken: true
120 ## Automounting API credentials for upgrade crd job pod.
122 automountServiceAccountToken: true
123 ## Container-specific security context configuration
124 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
126 containerSecurityContext:
127 allowPrivilegeEscalation: false
128 readOnlyRootFilesystem: true
132 ## SecurityContext holds pod-level security attributes and common container settings.
133 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
134 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
143## Custom rules to override "for" and "severity" in defaultRules
146# AlertmanagerFailedReload:
148# AlertmanagerMembersInconsistent:
152## Create default rules for monitoring the cluster
159 configReloaders: true
161 k8sContainerCpuUsageSecondsTotal: true
162 k8sContainerMemoryCache: true
163 k8sContainerMemoryRss: true
164 k8sContainerMemorySwap: true
165 k8sContainerResource: true
166 k8sContainerMemoryWorkingSetBytes: true
168 kubeApiserverAvailability: true
169 kubeApiserverBurnrate: true
170 kubeApiserverHistogram: true
171 kubeApiserverSlos: true
172 kubeControllerManager: true
175 kubePrometheusGeneral: true
176 kubePrometheusNodeRecording: true
178 kubernetesResources: true
179 kubernetesStorage: true
180 kubernetesSystem: true
181 kubeSchedulerAlerting: true
182 kubeSchedulerRecording: true
183 kubeStateMetrics: true
186 nodeExporterAlerting: true
187 nodeExporterRecording: true
189 prometheusOperator: true
191 # Defines the operator for namespace selection in rules
192 # Use "=~" to include namespaces matching the pattern (default)
193 # Use "!~" to exclude namespaces matching the pattern
194 appNamespacesOperator: "=~"
195 ## Reduce app namespace alert scope
196 appNamespacesTarget: ".*"
197 ## Set keep_firing_for for all alerts
199 ## Labels for default rules
201 ## Annotations for default rules
203 ## Additional labels for PrometheusRule alerts
204 additionalRuleLabels: {}
205 ## Additional annotations for PrometheusRule alerts
206 additionalRuleAnnotations: {}
207 ## Additional labels for specific PrometheusRule alert groups
208 additionalRuleGroupLabels:
213 k8sContainerCpuUsageSecondsTotal: {}
214 k8sContainerMemoryCache: {}
215 k8sContainerMemoryRss: {}
216 k8sContainerMemorySwap: {}
217 k8sContainerResource: {}
219 kubeApiserverAvailability: {}
220 kubeApiserverBurnrate: {}
221 kubeApiserverHistogram: {}
222 kubeApiserverSlos: {}
223 kubeControllerManager: {}
226 kubePrometheusGeneral: {}
227 kubePrometheusNodeRecording: {}
229 kubernetesResources: {}
230 kubernetesStorage: {}
232 kubeSchedulerAlerting: {}
233 kubeSchedulerRecording: {}
237 nodeExporterAlerting: {}
238 nodeExporterRecording: {}
240 prometheusOperator: {}
241 ## Additional annotations for specific PrometheusRule alert groups
242 additionalRuleGroupAnnotations:
247 k8sContainerCpuUsageSecondsTotal: {}
248 k8sContainerMemoryCache: {}
249 k8sContainerMemoryRss: {}
250 k8sContainerMemorySwap: {}
251 k8sContainerResource: {}
253 kubeApiserverAvailability: {}
254 kubeApiserverBurnrate: {}
255 kubeApiserverHistogram: {}
256 kubeApiserverSlos: {}
257 kubeControllerManager: {}
260 kubePrometheusGeneral: {}
261 kubePrometheusNodeRecording: {}
263 kubernetesResources: {}
264 kubernetesStorage: {}
266 kubeSchedulerAlerting: {}
267 kubeSchedulerRecording: {}
271 nodeExporterAlerting: {}
272 nodeExporterRecording: {}
274 prometheusOperator: {}
275 additionalAggregationLabels: []
276 ## Prefix for runbook URLs. Use this to override the first part of the runbookURLs that is common to all rules.
277 runbookUrl: "https://runbooks.prometheus-operator.dev/runbooks"
279 fsSelector: 'fstype!=""'
280 # fsSelector: 'fstype=~"ext[234]|btrfs|xfs|zfs"'
281 ## Disabled PrometheusRule alerts
284 # NodeRAIDDegraded: true
285## Deprecated way to provide custom recording or alerting rules to be deployed into the cluster.
287# additionalPrometheusRules: []
288# - name: my-rule-file
293# expr: 100 * my_record
295## Provide custom recording or alerting rules to be deployed into the cluster.
297additionalPrometheusRulesMap: {}
303# expr: 100 * my_record
309 ## Create ClusterRoles that extend the existing view, edit and admin ClusterRoles to interact with prometheus-operator CRDs
310 ## Ref: https://kubernetes.io/docs/reference/access-authn-authz/rbac/#aggregated-clusterroles
311 createAggregateClusterRoles: false
312 ## Global image registry to use if it needs to be overridden for some specific use cases (e.g. local registries, custom images, ...)
315 ## Reference to one or more secrets to be used when pulling images
316 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/
319 # - name: "image-pull-secret"
321 # - "image-pull-secret"
323 ## Deploys the windows-exporter and Windows-specific dashboards and rules (job name must be 'windows-exporter')
325## Configuration for prometheus-windows-exporter
326## ref: https://github.com/prometheus-community/helm-charts/tree/main/charts/prometheus-windows-exporter
328prometheus-windows-exporter:
329 ## Enable ServiceMonitor and set Kubernetes label to use as a job label
336 ## Set job label to 'windows-exporter' as required by the default Prometheus rules and Grafana dashboards
339 jobLabel: windows-exporter
340 ## Enable memory and container metrics as required by the default Prometheus rules and Grafana dashboards
344 enabled: '[defaults],memory,container'
345## Configuration for alertmanager
346## ref: https://prometheus.io/docs/alerting/alertmanager/
349 ## Deploy alertmanager
352 # Optional: Override the namespace where Alertmanager will be deployed.
353 namespaceOverride: ""
354 ## Annotations for Alertmanager
357 ## Additional labels for Alertmanager
360 ## API that Prometheus will use to communicate with alertmanager. Possible values are v1, v2
363 ## @param alertmanager.enableFeatures Enable access to Alertmanager disabled features.
366 ## Create dashboard configmap even if alertmanager deployment has been disabled
368 forceDeployDashboards: false
369 ## Network Policy configuration
372 # -- Enable network policy for Alertmanager
374 # -- Define policy types. If egress is enabled, both Ingress and Egress will be used
375 # Valid values are ["Ingress"] or ["Ingress", "Egress"]
379 # -- Gateway (formerly ingress controller) configuration
382 # -- Gateway namespace
385 # -- Gateway pod labels
388 # app.kubernetes.io/name: ingress-nginx
389 # -- Additional custom ingress rules
391 additionalIngress: []
393 # - namespaceSelector:
395 # name: another-namespace
402 # app.kubernetes.io/name: loki
407 # -- Configure egress rules
410 # -- Enable egress rules. When enabled, policyTypes will include Egress
413 # -- Custom egress rules
417 # - namespaceSelector: {}
424 # -- Enable rules for alertmanager cluster traffic
426 enableClusterRules: true
427 # -- Configure monitoring component rules
430 # -- Enable ingress from Prometheus
433 # -- Enable ingress for config reloader metrics
436 ## Service account for Alertmanager to use.
437 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
443 automountServiceAccountToken: true
444 ## Configure pod disruption budgets for Alertmanager
445 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
451 unhealthyPodEvictionPolicy: AlwaysAllow
452 ## Enable vertical pod autoscaler support for Alertmanager
453 ## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
455 verticalPodAutoscaler:
457 # Recommender responsible for generating recommendation for the object.
458 # List should be empty (then the default recommender will generate the recommendation)
459 # or contain exactly one recommender.
461 # - name: custom-recommender-performance
463 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
464 controlledResources: []
465 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
466 # controlledValues: RequestsAndLimits
468 # Define the max allowed resources for the pod
472 # Define the min allowed resources for the pod
478 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
479 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
481 ## Alertmanager configuration directives
482 ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
483 ## https://prometheus.io/webtools/alerting/routing-tree-editor/
490 - 'severity = critical'
492 - 'severity =~ warning|info'
497 - 'severity = warning'
504 - 'alertname = InfoInhibitor'
510 - 'alertname = InfoInhibitor'
512 group_by: ['namespace']
520 - alertname = "Watchdog"
524 - '/etc/alertmanager/config/*.tmpl'
525 ## Alertmanager configuration directives (as string type, preferred over the config hash map)
526 ## stringConfig will be used only if tplConfig is true
527 ## ref: https://prometheus.io/docs/alerting/configuration/#configuration-file
528 ## https://prometheus.io/webtools/alerting/routing-tree-editor/
531 ## Pass the Alertmanager configuration directives through Helm's templating
532 ## engine. If the Alertmanager configuration contains Alertmanager templates,
533 ## they'll need to be properly escaped so that they are not interpreted by
535 ## ref: https://helm.sh/docs/developing_charts/#using-the-tpl-function
536 ## https://prometheus.io/docs/alerting/configuration/#tmpl_string
537 ## https://prometheus.io/docs/alerting/notifications/
538 ## https://prometheus.io/docs/alerting/notification_examples/
540 ## Alertmanager template files to format alerts
541 ## By default, templateFiles are placed in /etc/alertmanager/config/ and if
542 ## they have a .tmpl file suffix will be loaded. See config.templates above
543 ## to change, add other suffixes. If adding other suffixes, be sure to update
544 ## config.templates above to include those suffixes.
545 ## ref: https://prometheus.io/docs/alerting/notifications/
546 ## https://prometheus.io/docs/alerting/notification_examples/
550 ## An example template:
551 # template_1.tmpl: |-
552 # {{ define "cluster" }}{{ .ExternalURL | reReplaceAll ".*alertmanager\\.(.*)" "$1" }}{{ end }}
554 # {{ define "slack.myorg.text" }}
556 # {{ range .Alerts }}
557 # *Alert:* {{ .Annotations.summary }} - `{{ .Labels.severity }}`
558 # *Cluster:* {{ template "cluster" $root }}
559 # *Description:* {{ .Annotations.description }}
560 # *Graph:* <{{ .GeneratorURL }}|:chart_with_upwards_trend:>
561 # *Runbook:* <{{ .Annotations.runbook }}|:spiral_note_pad:>
563 # {{ range .Labels.SortedPairs }} - *{{ .Name }}:* `{{ .Value }}`
573 ## Override ingress to a different defined port on the service
575 ## Override ingress to a different service then the default, this is useful if you need to
576 ## point to a specific instance of the alertmanager (eg kube-prometheus-stack-alertmanager-0)
577 # serviceName: kube-prometheus-stack-alertmanager-0
579 ## Hosts must be provided if Ingress is enabled.
582 # - alertmanager.domain.com
584 ## Paths to use for ingress rules - one path should match the alertmanagerSpec.routePrefix
589 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
590 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
591 # pathType: ImplementationSpecific
593 ## TLS configuration for Alertmanager Ingress
594 ## Secret must be manually created in the namespace
597 # - secretName: alertmanager-general-tls
599 # - alertmanager.example.com
600 # -- BETA: Configure the gateway routes for the chart here.
601 # More routes can be added by adding a dictionary key like the 'main' route.
602 # Be aware that this is an early beta of this feature,
603 # kube-prometheus-stack does not guarantee this works and is subject to change.
604 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
605 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
608 # -- Enables or disables the route
610 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
611 apiVersion: gateway.networking.k8s.io/v1
612 # -- Set the route kind
613 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
618 # - my-filter.example.com
622 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
623 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
624 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
630 ## Filters define the filters that are applied to requests that match this rule.
632 ## Session persistence configuration for the route rule.
633 sessionPersistence: {}
636 # absoluteTimeout: 12h
638 # lifetimeType: Permanent
640 ## Additional custom rules that can be added to the route
642 ## Configuration for Alertmanager secret
646 ## Configuration for creating an Ingress that will map to each Alertmanager replica service
647 ## alertmanager.servicePerReplica must be enabled
654 ## Final form of the hostname for each per replica ingress is
655 ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
657 ## Prefix for the per replica ingress that will have `-$replicaNumber`
658 ## appended to the end
660 ## Domain that will be used for the per replica ingress
662 ## Paths to use for ingress rules
667 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
668 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
669 # pathType: ImplementationSpecific
671 ## Secret name containing the TLS certificate for alertmanager per replica ingress
672 ## Secret must be manually created in the namespace
674 ## Separated secret for each per replica Ingress. Can be used together with cert-manager
678 ## Final form of the secret for each per replica ingress is
679 ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
681 prefix: "alertmanager"
682 ## Configuration for Alertmanager service
691 ipFamilies: ["IPv6", "IPv4"]
692 ipFamilyPolicy: "PreferDualStack"
693 ## Port for Alertmanager Service to listen on
696 ## Port for Alertmanager cluster communication
699 ## To be used with a proxy extraContainer port
702 ## Port to expose on each node
703 ## Only used if service.type is 'NodePort'
706 ## List of IP addresses at which the Prometheus server service is available
707 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
710 ## Additional ports to open for Alertmanager service
713 # - name: oauth-proxy
716 # - name: oauth-metrics
722 loadBalancerSourceRanges: []
723 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
725 externalTrafficPolicy: Cluster
726 ## If you want to make sure that connections from a particular client are passed to the same Pod each time
727 ## Accepts 'ClientIP' or 'None'
729 sessionAffinity: None
730 ## If you want to modify the ClientIP sessionAffinity timeout
731 ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
733 sessionAffinityConfig:
735 timeoutSeconds: 10800
739 ## Configuration for creating a separate Service for each statefulset Alertmanager replica
744 ## Port for Alertmanager Service per replica to listen on
747 ## To be used with a proxy extraContainer port
749 ## Port to expose on each node
750 ## Only used if servicePerReplica.type is 'NodePort'
753 ## Loadbalancer source IP ranges
754 ## Only used if servicePerReplica.type is "LoadBalancer"
755 loadBalancerSourceRanges: []
756 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
758 externalTrafficPolicy: Cluster
762 ## Configuration for creating a ServiceMonitor for AlertManager
765 ## If true, a ServiceMonitor will be created for the AlertManager service.
768 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
774 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
777 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
780 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
783 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
785 labelNameLengthLimit: 0
786 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
788 labelValueLengthLimit: 0
789 ## proxyUrl: URL of a proxy that should be used for scraping.
792 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
794 ## enableHttp2: Whether to enable HTTP2.
795 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#endpoint
797 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
798 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
801 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
802 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
804 metricRelabelings: []
806 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
807 # sourceLabels: [__name__]
809 ## RelabelConfigs to apply to samples before scraping
810 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
813 # - sourceLabels: [__meta_kubernetes_pod_node_name]
816 # targetLabel: nodename
820 ## Additional Endpoints
822 additionalEndpoints: []
823 # - port: oauth-metrics
825 ## Settings affecting alertmanagerSpec
826 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerspec
829 ## Statefulset's persistent volume claim retention policy
830 ## whenDeleted and whenScaled determine whether
831 ## statefulset's PVCs are deleted (true) or retained (false)
832 ## on scaling down and deleting statefulset, respectively.
833 ## Requires Kubernetes version 1.27.0+.
834 ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
835 persistentVolumeClaimRetentionPolicy: {}
836 # whenDeleted: Retain
839 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
840 ## Metadata Labels and Annotations gets propagated to the Alertmanager pods.
845 ## Image of Alertmanager
849 repository: chainguard-private/prometheus-alertmanager
851 sha: sha256:d7d7b666653d76212c1304266d09a8266407becda346bccc2f7a296842f497cd
852 pullPolicy: IfNotPresent
853 ## If true then the user will be responsible to provide a secret with alertmanager configuration
854 ## So when true the config part will be ignored (including templateFiles) and the one in the secret will be used
856 useExistingSecret: false
857 ## Secrets is a list of Secrets in the same namespace as the Alertmanager object, which shall be mounted into the
858 ## Alertmanager Pods. The Secrets are mounted into /etc/alertmanager/secrets/.
861 ## If false then the user will opt out of automounting API credentials.
863 automountServiceAccountToken: true
864 ## ConfigMaps is a list of ConfigMaps in the same namespace as the Alertmanager object, which shall be mounted into the Alertmanager Pods.
865 ## The ConfigMaps are mounted into /etc/alertmanager/configmaps/.
868 ## ConfigSecret is the name of a Kubernetes Secret in the same namespace as the Alertmanager object, which contains configuration for
869 ## this Alertmanager instance. Defaults to 'alertmanager-' The secret is mounted into /etc/alertmanager/config.
873 ## WebTLSConfig defines the TLS parameters for HTTPS
874 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerwebspec
876 ## AlertmanagerConfigs to be selected to merge and configure Alertmanager with.
878 alertmanagerConfigSelector: {}
879 ## Example which selects all alertmanagerConfig resources
880 ## with label "alertconfig" with values any of "example-config" or "example-config-2"
881 # alertmanagerConfigSelector:
889 ## Example which selects all alertmanagerConfig resources with label "role" set to "example-config"
890 # alertmanagerConfigSelector:
892 # role: example-config
894 ## Namespaces to be selected for AlertmanagerConfig discovery. If nil, only check own namespace.
896 alertmanagerConfigNamespaceSelector: {}
897 ## Example which selects all namespaces
898 ## with label "alertmanagerconfig" with values any of "example-namespace" or "example-namespace-2"
899 # alertmanagerConfigNamespaceSelector:
901 # - key: alertmanagerconfig
904 # - example-namespace
905 # - example-namespace-2
907 ## Example which selects all namespaces with label "alertmanagerconfig" set to "enabled"
908 # alertmanagerConfigNamespaceSelector:
910 # alertmanagerconfig: enabled
912 ## AlermanagerConfig to be used as top level configuration
914 alertmanagerConfiguration: {}
915 ## Example with select a global alertmanagerconfig
916 # alertmanagerConfiguration:
917 # name: global-alertmanager-Configuration
919 ## Defines the strategy used by AlertmanagerConfig objects to match alerts. eg:
921 alertmanagerConfigMatcherStrategy: {}
922 ## Example with use OnNamespace strategy
923 # alertmanagerConfigMatcherStrategy:
926 ## Additional command line arguments to pass to Alertmanager (in addition to those generated by the chart)
929 # Use logfmt (default) or json logging
931 ## Log level for Alertmanager to be configured with.
934 ## Size is the expected size of the alertmanager cluster. The controller will eventually make the size of the
935 ## running cluster equal to the expected size.
937 ## Time duration Alertmanager shall retain data for. Default is '120h', and must match the regular expression
938 ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
941 ## Storage is the definition of how storage will be used by the Alertmanager instances.
942 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
945 # volumeClaimTemplate:
947 # storageClassName: gluster
948 # accessModes: ["ReadWriteOnce"]
954 ## The external URL the Alertmanager instances will be available under. This is necessary to generate correct URLs. This is necessary if Alertmanager is not served from root of a DNS name. string false
957 ## The route prefix Alertmanager registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
958 ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
961 ## scheme: HTTP scheme to use. Can be used with `tlsConfig` for example if using istio mTLS.
963 ## tlsConfig: TLS configuration to use when connect to the endpoint. For example if using istio mTLS.
964 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
966 ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
969 ## Define which Nodes the Pods are scheduled on.
970 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
973 ## Define resources requests and limits for single Pods.
974 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
980 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
981 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
982 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
983 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
985 podAntiAffinity: "soft"
986 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
987 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
989 podAntiAffinityTopologyKey: kubernetes.io/hostname
990 ## Assign custom affinity rules to the alertmanager instance
991 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
995 # requiredDuringSchedulingIgnoredDuringExecution:
997 # - matchExpressions:
998 # - key: kubernetes.io/e2e-az-name
1004 ## If specified, the pod's tolerations.
1005 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
1011 # effect: "NoSchedule"
1013 ## If specified, the pod's topology spread constraints.
1014 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
1016 topologySpreadConstraints: []
1018 # topologyKey: topology.kubernetes.io/zone
1019 # whenUnsatisfiable: DoNotSchedule
1024 ## SecurityContext holds pod-level security attributes and common container settings.
1025 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
1026 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
1034 type: RuntimeDefault
1035 ## Use the host's user namespace for Alertmanager pods.
1036 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
1038 ## DNS configuration for Alertmanager.
1039 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
1041 ## DNS policy for Alertmanager.
1042 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
1044 ## Enable hostNetwork for Alertmanager.
1046 ## ListenLocal makes the Alertmanager server listen on loopback, so that it does not bind against the Pod IP.
1047 ## Note this is only for the Alertmanager UI, not the gossip communication.
1050 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an Alertmanager pod.
1054 # - name: oauth-proxy
1055 # image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
1057 # - --upstream=http://127.0.0.1:9093
1058 # - --http-address=0.0.0.0:8081
1059 # - --metrics-address=0.0.0.0:8082
1062 # - containerPort: 8081
1065 # - containerPort: 8082
1066 # name: oauth-metrics
1070 # Additional volumes on the output StatefulSet definition.
1072 # Additional VolumeMounts on the output StatefulSet definition.
1074 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
1075 ## (permissions, dir tree) on mounted volumes before starting prometheus
1077 ## Priority class assigned to the Pods
1079 priorityClassName: ""
1080 ## AdditionalPeers allows injecting a set of additional Alertmanagers to peer with to form a highly available cluster.
1083 ## PortName to use for Alert Manager.
1085 portName: "http-web"
1086 ## ClusterAdvertiseAddress is the explicit address to advertise in cluster. Needs to be provided for non RFC1918 [1] (public) addresses. [1] RFC1918: https://tools.ietf.org/html/rfc1918
1088 clusterAdvertiseAddress: false
1089 ## clusterGossipInterval determines interval between gossip attempts.
1090 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1091 clusterGossipInterval: ""
1092 ## clusterPeerTimeout determines timeout for cluster peering.
1093 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1094 clusterPeerTimeout: ""
1095 ## clusterPushpullInterval determines interval between pushpull attempts.
1096 ## Needs to be specified as GoDuration, a time duration that can be parsed by Go's time.ParseDuration() (e.g. 45ms, 30s, 1m, 1h20m15s)
1097 clusterPushpullInterval: ""
1098 ## clusterLabel defines the identifier that uniquely identifies the Alertmanager cluster.
1100 ## ForceEnableClusterMode ensures Alertmanager does not deactivate the cluster mode when running with a single replica.
1101 ## Use case is e.g. spanning an Alertmanager cluster across Kubernetes clusters with a single replica in each.
1102 forceEnableClusterMode: false
1103 ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
1104 ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
1106 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
1107 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
1108 podManagementPolicy: ""
1109 ## Update strategy for the StatefulSet.
1110 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
1112 # type: RollingUpdate
1116 ## Duration in seconds the pod needs to terminate gracefully.
1117 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
1118 terminationGracePeriodSeconds: ~
1119 ## Additional configuration which is not covered by the properties above. (passed through tpl)
1120 additionalConfig: {}
1121 ## Additional configuration which is not covered by the properties above.
1122 ## Useful, if you need advanced templating inside alertmanagerSpec.
1123 ## Otherwise, use alertmanager.alertmanagerSpec.additionalConfig (passed through tpl)
1124 additionalConfigString: ""
1125 ## ExtraSecret can be used to store various data in an extra secret
1126 ## (use it for example to store hashed basic auth credentials)
1128 ## if not set, name will be auto generated
1133 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
1134 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
1135## Using default values from https://github.com/grafana-community/helm-charts/blob/main/charts/grafana/values.yaml
1139 namespaceOverride: ""
1140 ## ForceDeployDatasources Create datasource configmap even if grafana deployment has been disabled
1142 forceDeployDatasources: false
1143 ## ForceDeployDashboard Create dashboard configmap even if grafana deployment has been disabled
1145 forceDeployDashboards: false
1146 ## Deploy default dashboards
1148 defaultDashboardsEnabled: true
1149 ## Deploy GrafanaDashboard CRDs that reference dashboards from ConfigMaps when grafana-operator is used
1150 ## These settings control how dashboards are integrated with the Grafana Operator
1151 ## Note: End user still need to create is own kind: GrafanaDataSource for Prometheus
1153 ## apiVersion: grafana.integreatly.org/v1beta1
1154 ## kind: GrafanaDatasource
1159 ## allowCrossNamespaceImport: true
1160 ## instanceSelector:
1167 ## url: http://prometheus-operated.prometheus-stack.svc.cluster.local:9090
1170 ## "tlsSkipVerify": true
1171 ## "timeInterval": "5s"
1174 ## Enable references to ConfigMaps containing dashboards in GrafanaDashboard CRs
1175 ## Set to true to allow dashboards to be loaded from ConfigMap references
1176 dashboardsConfigMapRefEnabled: false
1177 ## Annotations for GrafanaDashboard Cr
1180 ## Labels that should be matched kind: Grafana instance
1181 ## Example: { app: grafana, category: dashboard }
1184 ## How frequently the operator should resync resources (in duration format)
1185 ## Controls how often dashboards are reconciled by the operator
1188 ## Which folder contains all dashboards in Grafana
1189 ## This folder will be created on the Root level
1190 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1193 ## Which UID of the target folder contains all dashboards in Grafana
1194 ## This allows you to use subfolder hierarchy
1195 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1198 ## Which GrafanaFolder reference contains all dashboards in Grafana
1199 ## This allows you to use subfolder hierarchy.
1200 ## Only one of 'folder', 'folderUID' or 'folderRef' can be set
1203 ## Timezone for the default dashboards
1204 ## Other options are: browser or a specific timezone, i.e. Europe/Luxembourg
1206 defaultDashboardsTimezone: utc
1207 ## Editable flag for the default dashboards
1209 defaultDashboardsEditable: true
1210 ## Default interval for Grafana dashboards
1212 defaultDashboardsInterval: 1m
1213 # Administrator credentials when not using an existing secret (see below)
1215 # adminPassword: strongpassword
1217 # Use an existing secret for the admin user.
1219 ## Name of the secret. Can be templated.
1222 passwordKey: admin-password
1224 ## If true, Grafana PSPs will be created
1228 ## If true, Grafana Ingress will be created
1231 ## IngressClassName for Grafana Ingress.
1232 ## Should be provided if Ingress is enable.
1234 # ingressClassName: nginx
1236 ## Annotations for Grafana Ingress
1239 # kubernetes.io/ingress.class: nginx
1240 # kubernetes.io/tls-acme: "true"
1242 ## Labels to be added to the Ingress
1246 ## Must be provided if Ingress is enable.
1249 # - grafana.domain.com
1251 ## Path for grafana ingress
1253 ## TLS configuration for grafana Ingress
1254 ## Secret must be manually created in the namespace
1257 # - secretName: grafana-general-tls
1259 # - grafana.example.com
1260 # # To make Grafana persistent (Using Statefulset)
1265 # storageClassName: "storageClassName"
1270 # - kubernetes.io/pvc-protection
1277 label: grafana_dashboard
1279 # Allow discovery in all namespaces for dashboards
1280 searchNamespace: ALL
1281 # Support for new table panels, when enabled grafana auto migrates the old table panels to newer table panels
1282 enableNewTablePanelSyntax: false
1283 ## Annotations for Grafana dashboard configmaps
1292 allowUiUpdates: false
1295 defaultDatasourceEnabled: true
1296 isDefaultDatasource: true
1299 ## Extra jsonData properties to add to the datasource
1301 # prometheusType: Prometheus
1303 ## URL of prometheus datasource
1305 # url: http://prometheus-stack-prometheus:9090/
1307 ## Prometheus request timeout in seconds
1310 ## Query parameters to add, as a URL-encoded string,
1311 ## to query Prometheus
1312 # customQueryParameters: ""
1314 # If not defined, will use prometheus.prometheusSpec.scrapeInterval or its default
1315 # defaultDatasourceScrapeInterval: 15s
1317 ## Annotations for Grafana datasource configmaps
1320 ## Set method for HTTP to send query to datasource
1322 ## Create datasource for each Pod of Prometheus StatefulSet;
1323 ## this uses by default the headless service `prometheus-operated` which is
1324 ## created by Prometheus Operator. In case you deployed your own Service for your
1325 ## Prometheus instance, you can specify it with the field `prometheusServiceName`
1326 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/0fee93e12dc7c2ea1218f19ae25ec6b893460590/pkg/prometheus/statefulset.go#L255-L286
1327 createPrometheusReplicasDatasources: false
1328 prometheusServiceName: prometheus-operated
1329 label: grafana_datasource
1331 ## Field with internal link pointing to existing data source in Grafana.
1332 ## Can be provisioned via additionalDataSources
1333 exemplarTraceIdDestinations: {}
1334 # datasourceUid: Jaeger
1335 # traceIdLabelName: trace_id
1336 # urlDisplayLabel: View traces
1341 handleGrafanaManagedAlerts: false
1342 implementation: prometheus
1343 extraConfigmapMounts: []
1344 # - name: certs-configmap
1345 # mountPath: /etc/grafana/ssl/
1346 # configMap: certs-configmap
1349 deleteDatasources: []
1350 # - name: example-datasource
1353 ## Configure additional grafana datasources (passed through tpl)
1354 ## ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#datasources
1355 additionalDataSources: []
1356 # - name: prometheus-sample
1360 # basicAuthPassword: pass
1361 # basicAuthUser: daco
1364 # tlsSkipVerify: true
1367 # url: https://{{ printf "%s-prometheus.svc" .Release.Name }}:9090
1370 ## Configure additional grafana datasources as a templated string (passed through tpl)
1371 ## Useful when you need Helm flow control or templating inside the datasource definition
1372 additionalDataSourcesString: ""
1373 # Flag to mark provisioned data sources for deletion if they are no longer configured.
1374 # It takes no effect if data sources are already listed in the deleteDatasources section.
1375 # ref: https://grafana.com/docs/grafana/latest/administration/provisioning/#example-data-source-configuration-file
1377 ## Passed to grafana subchart and used by servicemonitor below
1384 # If true, a ServiceMonitor CRD is created for a prometheus operator
1385 # https://github.com/prometheus-operator/prometheus-operator
1388 # Path to use for scraping metrics. Might be different if server.root_url is set
1391 # namespace: monitoring (defaults to use the namespace this chart is deployed to)
1393 # labels for the ServiceMonitor
1395 # Scrape interval. If not set, the Prometheus default scrape interval is used.
1401 ## RelabelConfigs to apply to samples before scraping
1402 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1405 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1408 # targetLabel: nodename
1411## Flag to disable all the kubernetes component scrapers
1413kubernetesServiceMonitors:
1415## Component scraping the kube api server
1420 serverName: kubernetes
1421 insecureSkipVerify: false
1424 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1427 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1430 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1433 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1436 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1438 labelNameLengthLimit: 0
1439 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1441 labelValueLengthLimit: 0
1442 ## proxyUrl: URL of a proxy that should be used for scraping.
1448 component: apiserver
1449 provider: kubernetes
1450 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1451 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1454 # Drop excessively noisy apiserver buckets.
1456 regex: (etcd_request|apiserver_request_slo|apiserver_request_sli|apiserver_request)_duration_seconds_bucket;(0\.15|0\.2|0\.3|0\.35|0\.4|0\.45|0\.6|0\.7|0\.8|0\.9|1\.25|1\.5|1\.75|2|3|3\.5|4|4\.5|6|7|8|9|15|20|40|45|50)(\.0)?
1461 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1462 # sourceLabels: [__name__]
1464 ## RelabelConfigs to apply to samples before scraping
1465 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1469 # - __meta_kubernetes_namespace
1470 # - __meta_kubernetes_service_name
1471 # - __meta_kubernetes_endpoint_port_name
1473 # regex: default;kubernetes;https
1474 # - targetLabel: __address__
1475 # replacement: kubernetes.default.svc:443
1477 ## Additional labels
1479 additionalLabels: {}
1482 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1483 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1485 ## Override the job label used for the apiserver.
1486 ## This allows users who scrape apiserver metrics under a different job name (e.g. k3s-server via PushProx)
1487 ## to align the recording rules and alerts with their actual job label.
1489## Component scraping the kubelet and kubelet-hosted cAdvisor
1493 namespace: kube-system
1494 # Overrides the job selector in Grafana dashboards and Prometheus rules
1495 # For k3s clusters, change to k3s-server
1499 ## Enable scraping /metrics from kubelet's service
1501 ## Attach metadata to discovered targets. Requires Prometheus v2.45 for endpoints created by the operator.
1505 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1508 ## If true, Prometheus use (respect) labels provided by exporter.
1511 ## If true, Prometheus ingests metrics with timestamp provided by exporter. If false, Prometheus ingests metrics with timestamp of scrape.
1513 honorTimestamps: true
1514 ## If true, defines whether Prometheus tracks staleness of the metrics that have an explicit timestamp present in scraped data. Has no effect if `honorTimestamps` is false.
1515 ## We recommend enabling this if you want the best possible accuracy for container_ metrics scraped from cadvisor.
1516 ## For more details see: https://github.com/prometheus-community/helm-charts/pull/5063#issuecomment-2545374849
1517 trackTimestampsStaleness: true
1518 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1521 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1524 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1527 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1529 labelNameLengthLimit: 0
1530 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1532 labelValueLengthLimit: 0
1533 ## proxyUrl: URL of a proxy that should be used for scraping.
1536 ## Enable scraping the kubelet over https. For requirements to enable this see
1537 ## https://github.com/prometheus-operator/prometheus-operator/issues/926
1540 ## Skip TLS certificate validation when scraping.
1541 ## This is enabled by default because kubelet serving certificate deployed by kubeadm is by default self-signed
1542 ## ref: https://kubernetes.io/docs/tasks/administer-cluster/kubeadm/kubeadm-certs/#kubelet-serving-certs
1544 insecureSkipVerify: true
1545 ## Enable scraping /metrics/probes from kubelet's service
1548 ## Enable scraping /metrics/resource from kubelet's service
1549 ## This is disabled by default because container metrics are already exposed by cAdvisor
1552 # From kubernetes 1.18, /metrics/resource/v1alpha1 renamed to /metrics/resource
1553 resourcePath: "/metrics/resource/v1alpha1"
1554 ## Configure the scrape interval for resource metrics. This is configured to the default Kubelet cAdvisor
1555 ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1556 ## if kubelet.serviceMonitor.interval is not empty.
1557 resourceInterval: 10s
1558 ## Enable scraping /metrics/cadvisor from kubelet's service
1561 ## Configure the scrape interval for cAdvisor. This is configured to the default Kubelet cAdvisor
1562 ## minimum housekeeping interval in order to avoid missing samples. Note, this value is ignored
1563 ## if kubelet.serviceMonitor.interval is not empty.
1564 cAdvisorInterval: 10s
1565 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1566 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1568 cAdvisorMetricRelabelings:
1569 # Drop less useful container CPU metrics.
1570 - sourceLabels: [__name__]
1572 regex: 'container_cpu_(cfs_throttled_seconds_total|load_average_10s|system_seconds_total|user_seconds_total)'
1573 # Drop less useful container / always zero filesystem metrics.
1574 - sourceLabels: [__name__]
1576 regex: 'container_fs_(io_current|io_time_seconds_total|io_time_weighted_seconds_total|reads_merged_total|sector_reads_total|sector_writes_total|writes_merged_total)'
1577 # Drop less useful / always zero container memory metrics.
1578 - sourceLabels: [__name__]
1580 regex: 'container_memory_(mapped_file|swap)'
1581 # Drop less useful container process metrics.
1582 - sourceLabels: [__name__]
1584 regex: 'container_(file_descriptors|tasks_state|threads_max)'
1585 # Drop container_memory_failures_total{scope="hierarchy"} metrics,
1586 # we only need the container scope.
1587 - sourceLabels: [__name__, scope]
1589 regex: 'container_memory_failures_total;hierarchy'
1590 # Drop container_network_... metrics that match various interfaces that
1591 # correspond to CNI and similar interfaces. This avoids capturing network
1592 # metrics for host network containers.
1593 - sourceLabels: [__name__, interface]
1595 regex: 'container_network_.*;(cali|cilium|cni|lxc|nodelocaldns|tunl).*'
1596 # Drop container spec metrics that overlap with kube-state-metrics.
1597 - sourceLabels: [__name__]
1599 regex: 'container_spec.*'
1600 # Drop cgroup metrics with no pod.
1601 - sourceLabels: [id, pod]
1604 # - sourceLabels: [__name__, image]
1606 # regex: container_([a-z_]+);
1609 # - sourceLabels: [__name__]
1611 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1615 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1616 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1618 probesMetricRelabelings: []
1619 # - sourceLabels: [__name__, image]
1621 # regex: container_([a-z_]+);
1624 # - sourceLabels: [__name__]
1626 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1630 ## RelabelConfigs to apply to samples before scraping
1631 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1633 ## metrics_path is required to match upstream rules and charts
1634 cAdvisorRelabelings:
1636 sourceLabels: [__metrics_path__]
1637 targetLabel: metrics_path
1638 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1641 # targetLabel: nodename
1645 ## RelabelConfigs to apply to samples before scraping
1646 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1650 sourceLabels: [__metrics_path__]
1651 targetLabel: metrics_path
1652 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1655 # targetLabel: nodename
1659 ## RelabelConfigs to apply to samples before scraping
1660 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1662 resourceRelabelings:
1664 sourceLabels: [__metrics_path__]
1665 targetLabel: metrics_path
1666 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1669 # targetLabel: nodename
1673 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1674 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1677 # Reduce bucket cardinality of kubelet storage operations.
1679 sourceLabels: [__name__, le]
1680 regex: (csi_operations|storage_operation_duration)_seconds_bucket;(0.25|2.5|15|25|120|600)(\.0)?
1681 # - sourceLabels: [__name__, image]
1683 # regex: container_([a-z_]+);
1686 # - sourceLabels: [__name__]
1688 # regex: container_(network_tcp_usage_total|network_udp_usage_total|tasks_state|cpu_load_average_10s)
1692 ## RelabelConfigs to apply to samples before scraping
1693 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1695 ## metrics_path is required to match upstream rules and charts
1698 sourceLabels: [__metrics_path__]
1699 targetLabel: metrics_path
1700 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1703 # targetLabel: nodename
1707 ## Additional labels
1709 additionalLabels: {}
1712 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1713 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1715## Component scraping the kube controller manager
1717kubeControllerManager:
1719 # Overrides the job selector in Grafana dashboards and Prometheus rules
1720 # For k3s clusters, change to k3s-server
1722 ## If your kube controller manager is not deployed as a pod, specify IPs it can be found on
1729 ## If using kubeControllerManager.endpoints only the port and targetPort are used
1733 ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
1734 ## of default port in Kubernetes 1.22.
1740 ipFamilies: ["IPv6", "IPv4"]
1741 ipFamilyPolicy: "PreferDualStack"
1743 # component: kube-controller-manager
1746 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1749 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1752 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1755 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1758 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1760 labelNameLengthLimit: 0
1761 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1763 labelValueLengthLimit: 0
1764 ## proxyUrl: URL of a proxy that should be used for scraping.
1767 ## port: Name of the port the metrics will be scraped from
1773 # component: kube-controller-manager
1775 ## Enable scraping kube-controller-manager over https.
1776 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
1777 ## If null or unset, the value is determined dynamically based on target Kubernetes version.
1780 # Skip TLS certificate validation when scraping
1781 insecureSkipVerify: null
1782 # Name of the server to use when validating TLS certificate
1784 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1785 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1787 metricRelabelings: []
1789 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1790 # sourceLabels: [__name__]
1792 ## RelabelConfigs to apply to samples before scraping
1793 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1796 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1799 # targetLabel: nodename
1803 ## Additional labels
1805 additionalLabels: {}
1808 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1809 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1811## Component scraping coreDns. Use either this or kubeDns
1821 ipFamilies: ["IPv6", "IPv4"]
1822 ipFamilyPolicy: "PreferDualStack"
1827 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1830 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1833 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1836 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1839 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1841 labelNameLengthLimit: 0
1842 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1844 labelValueLengthLimit: 0
1845 ## proxyUrl: URL of a proxy that should be used for scraping.
1848 ## port: Name of the port the metrics will be scraped from
1856 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1857 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1859 metricRelabelings: []
1861 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1862 # sourceLabels: [__name__]
1864 ## RelabelConfigs to apply to samples before scraping
1865 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1868 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1871 # targetLabel: nodename
1875 ## Additional labels
1877 additionalLabels: {}
1880 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1881 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1883 ## File containing bearer token to be used when scraping targets
1884 ## Empty value do not send any bearer token.
1886 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1887## Component scraping kubeDns. Use either this or coreDns
1900 ipFamilies: ["IPv6", "IPv4"]
1901 ipFamilyPolicy: "PreferDualStack"
1905 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
1908 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
1911 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
1914 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1917 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1919 labelNameLengthLimit: 0
1920 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
1922 labelValueLengthLimit: 0
1923 ## proxyUrl: URL of a proxy that should be used for scraping.
1931 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1932 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1934 metricRelabelings: []
1936 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1937 # sourceLabels: [__name__]
1939 ## RelabelConfigs to apply to samples before scraping
1940 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1943 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1946 # targetLabel: nodename
1950 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
1951 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1953 dnsmasqMetricRelabelings: []
1955 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
1956 # sourceLabels: [__name__]
1958 ## RelabelConfigs to apply to samples before scraping
1959 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
1961 dnsmasqRelabelings: []
1962 # - sourceLabels: [__meta_kubernetes_pod_node_name]
1965 # targetLabel: nodename
1969 ## Additional labels
1971 additionalLabels: {}
1974 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
1975 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
1977 ## File containing bearer token to be used when scraping targets
1978 ## Empty value do not send any bearer token.
1980 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
1981## Component scraping etcd
1985 ## If your etcd is not deployed as a pod, specify IPs it can be found on
1992 ## Etcd service. If using kubeEtcd.endpoints only the port and targetPort are used
2000 ipFamilies: ["IPv6", "IPv4"]
2001 ipFamilyPolicy: "PreferDualStack"
2004 ## Configure secure access to the etcd cluster by loading a secret into prometheus and
2005 ## specifying security configuration below. For example, with a secret named etcd-client-cert
2009 ## insecureSkipVerify: false
2010 ## serverName: localhost
2011 ## caFile: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
2012 ## certFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client
2013 ## keyFile: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
2017 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2020 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2023 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2026 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2029 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2031 labelNameLengthLimit: 0
2032 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2034 labelValueLengthLimit: 0
2035 ## proxyUrl: URL of a proxy that should be used for scraping.
2039 insecureSkipVerify: false
2044 ## port: Name of the port the metrics will be scraped from
2052 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2053 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2055 metricRelabelings: []
2057 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2058 # sourceLabels: [__name__]
2060 ## RelabelConfigs to apply to samples before scraping
2061 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2064 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2067 # targetLabel: nodename
2071 ## Additional labels
2073 additionalLabels: {}
2076 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2077 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2079 ## File containing bearer token to be used when scraping targets
2080 ## Empty value do not send any bearer token.
2082 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2083## Component scraping kube scheduler
2087 # Overrides the job selector in Grafana dashboards and Prometheus rules
2088 # For k3s clusters, change to k3s-server
2090 ## If your kube scheduler is not deployed as a pod, specify IPs it can be found on
2097 ## If using kubeScheduler.endpoints only the port and targetPort are used
2101 ## If null or unset, the value is determined dynamically based on target Kubernetes version due to change
2102 ## of default port in Kubernetes 1.23.
2108 ipFamilies: ["IPv6", "IPv4"]
2109 ipFamilyPolicy: "PreferDualStack"
2111 # component: kube-scheduler
2114 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2117 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2120 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2123 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2126 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2128 labelNameLengthLimit: 0
2129 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2131 labelValueLengthLimit: 0
2132 ## proxyUrl: URL of a proxy that should be used for scraping.
2135 ## Enable scraping kube-scheduler over https.
2136 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks.
2137 ## If null or unset, the value is determined dynamically based on target Kubernetes version.
2140 ## port: Name of the port the metrics will be scraped from
2146 # component: kube-scheduler
2148 ## Skip TLS certificate validation when scraping
2149 insecureSkipVerify: null
2150 ## Name of the server to use when validating TLS certificate
2152 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2153 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2155 metricRelabelings: []
2157 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2158 # sourceLabels: [__name__]
2160 ## RelabelConfigs to apply to samples before scraping
2161 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2164 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2167 # targetLabel: nodename
2171 ## Additional labels
2173 additionalLabels: {}
2176 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2177 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2179## Component scraping kube proxy
2183 # Overrides the job selector in Grafana dashboards and Prometheus rules
2184 # For k3s clusters, change to k3s-server
2186 ## If your kube proxy is not deployed as a pod, specify IPs it can be found on
2199 ipFamilies: ["IPv6", "IPv4"]
2200 ipFamilyPolicy: "PreferDualStack"
2202 # k8s-app: kube-proxy
2205 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2208 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2211 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2214 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2217 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2219 labelNameLengthLimit: 0
2220 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2222 labelValueLengthLimit: 0
2223 ## proxyUrl: URL of a proxy that should be used for scraping.
2226 ## port: Name of the port the metrics will be scraped from
2232 # k8s-app: kube-proxy
2234 ## Enable scraping kube-proxy over https.
2235 ## Requires proper certs (not self-signed) and delegated authentication/authorization checks
2238 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2239 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2241 metricRelabelings: []
2243 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2244 # sourceLabels: [__name__]
2246 ## RelabelConfigs to apply to samples before scraping
2247 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2251 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2252 # sourceLabels: [__name__]
2254 ## Additional labels
2256 additionalLabels: {}
2259 ## defines the labels which are transferred from the associated Kubernetes Service object onto the ingested metrics.
2260 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#servicemonitor
2262 ## File containing bearer token to be used when scraping targets
2263 ## Empty value do not send any bearer token.
2265 bearerTokenFile: /var/run/secrets/kubernetes.io/serviceaccount/token
2266## Component scraping kube state metrics
2270## Configuration for kube-state-metrics subchart
2273 ## set to true to add the release label so scraping of the servicemonitor with kube-prometheus-stack works out of the box
2275 ## Enable scraping via kubernetes-service-endpoints
2276 ## Disabled by default as we service monitor is enabled below
2278 prometheusScrape: false
2281 ## Enable scraping via service monitor
2282 ## Disable to prevent duplication if you enable prometheusScrape above
2284 ## kube-state-metrics endpoint
2286 ## Keep labels from scraped data, overriding server-side labels
2288 ## selfMonitor endpoint
2290 ## Keep labels from scraped data, overriding server-side labels
2292## Deploy node exporter as a daemonset to all nodes
2303 ## ForceDeployDashboard Create dashboard configmap even if nodeExporter deployment has been disabled
2305 forceDeployDashboards: false
2306## Configuration for prometheus-node-exporter subchart
2308prometheus-node-exporter:
2309 namespaceOverride: ""
2311 ## Add the 'node-exporter' label to be used by serviceMonitor and podMonitor to match standard common usage in rules and grafana dashboards
2313 jobLabel: node-exporter
2316 - --collector.filesystem.mount-points-exclude=^/(dev|proc|sys|run/containerd/.+|var/lib/docker/.+|var/lib/kubelet/.+)($|/)
2317 - --collector.filesystem.fs-types-exclude=^(autofs|binfmt_misc|bpf|cgroup2?|configfs|debugfs|devpts|devtmpfs|fusectl|hugetlbfs|iso9660|mqueue|nsfs|overlay|proc|procfs|pstore|rpc_pipefs|securityfs|selinuxfs|squashfs|sysfs|tracefs|erofs)$
2319 portName: http-metrics
2322 ipFamilies: ["IPv6", "IPv4"]
2323 ipFamilyPolicy: "PreferDualStack"
2325 jobLabel: node-exporter
2330 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2333 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2336 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2339 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2342 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2344 labelNameLengthLimit: 0
2345 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2347 labelValueLengthLimit: 0
2348 ## How long until a scrape request times out. If not set, the Prometheus default scape timeout is used.
2351 ## proxyUrl: URL of a proxy that should be used for scraping.
2354 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
2355 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2357 metricRelabelings: []
2358 # - sourceLabels: [__name__]
2360 # regex: ^node_mountstats_nfs_(event|operations|transport)_.+
2364 ## RelabelConfigs to apply to samples before scraping
2365 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
2368 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2371 # targetLabel: nodename
2374 ## Attach node metadata to discovered targets. Requires Prometheus v2.35.0 and above.
2383 ## If true, create PSPs for node-exporter
2386## Manages Prometheus and Alertmanager components
2390 ## Use '{{ template "kube-prometheus-stack.fullname" . }}-operator' by default
2391 fullnameOverride: ""
2392 ## Number of old replicasets to retain ##
2393 ## The default value is 10, 0 will garbage-collect old replicasets ##
2394 revisionHistoryLimit: 10
2395 ## Strategy of the deployment
2398 ## Prometheus-Operator v0.39.0 and later support TLS natively.
2402 # Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2403 tlsMinVersion: VersionTLS13
2404 # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2406 ## Liveness probe for the prometheusOperator deployment
2411 initialDelaySeconds: 0
2415 ## Readiness probe for the prometheusOperator deployment
2420 initialDelaySeconds: 0
2424 ## Admission webhook support for PrometheusRules resources added in Prometheus Operator 0.30 can be enabled to prevent incorrectly formatted
2425 ## rules from making their way into prometheus and potentially preventing the container from starting
2427 ## Valid values: Fail, Ignore, IgnoreOnInstallOnly
2428 ## IgnoreOnInstallOnly - If Release.IsInstall returns "true", set "Ignore" otherwise "Fail"
2430 ## The default timeoutSeconds is 10 and the maximum value is 30.
2433 ## A PEM encoded CA bundle which will be used to validate the webhook's server certificate.
2434 ## If unspecified, system trust roots on the apiserver are used.
2436 ## If enabled, generate a self-signed certificate, then patch the webhook configurations with the generated data.
2437 ## On chart upgrades (or if the secret exists) the cert will not be re-generated. You can use this to provide your own
2438 ## certs ahead of time if you wish.
2441 # argocd.argoproj.io/hook: PreSync
2442 # argocd.argoproj.io/hook-delete-policy: HookSucceeded
2444 namespaceSelector: {}
2447 mutatingWebhookConfiguration:
2449 # argocd.argoproj.io/hook: PreSync
2450 validatingWebhookConfiguration:
2452 # argocd.argoproj.io/hook: PreSync
2455 ## Number of replicas
2458 ## Strategy of the deployment
2461 # Ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/
2462 podDisruptionBudget:
2465 # maxUnavailable: ""
2466 unhealthyPodEvictionPolicy: AlwaysAllow
2467 ## Number of old replicasets to retain ##
2468 ## The default value is 10, 0 will garbage-collect old replicasets ##
2469 revisionHistoryLimit: 10
2470 ## Prometheus-Operator v0.39.0 and later support TLS natively.
2474 # Value must match version names from https://pkg.go.dev/crypto/tls#pkg-constants
2475 tlsMinVersion: VersionTLS13
2476 # The default webhook port is 10250 in order to work out-of-the-box in GKE private clusters and avoid adding firewall rules.
2478 ## Service account for Prometheus Operator Webhook to use.
2479 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2483 automountServiceAccountToken: false
2486 ## Configuration for Prometheus operator Webhook service
2494 ipFamilies: ["IPv6", "IPv4"]
2495 ipFamilyPolicy: "PreferDualStack"
2496 ## Port to expose on each node
2497 ## Only used if service.type is 'NodePort'
2501 ## Additional ports to open for Prometheus operator Webhook service
2502 ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2506 ## Only use if service.type is "LoadBalancer"
2509 loadBalancerSourceRanges: []
2510 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2512 externalTrafficPolicy: Cluster
2514 ## NodePort, ClusterIP, LoadBalancer
2517 ## List of IP addresses at which the Prometheus server service is available
2518 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2521 # ## Labels to add to the operator webhook deployment
2524 ## Annotations to add to the operator webhook deployment
2527 ## Labels to add to the operator webhook pod
2530 ## Annotations to add to the operator webhook pod
2533 ## Assign a PriorityClassName to pods if set
2534 # priorityClassName: ""
2536 ## Define Log Format
2537 # Use logfmt (default) or json logging
2540 ## Decrease log verbosity to errors only
2543 ## Prometheus-operator webhook image
2547 repository: chainguard-private/prometheus-admission-webhook
2548 # if not set appVersion field from Chart.yaml is used
2550 sha: sha256:4d060b4fb915cf04db68d87682d21d7759d00eb866ce9ea12afe913bcdb04133
2551 pullPolicy: IfNotPresent
2552 ## Define Log Format
2553 # Use logfmt (default) or json logging
2556 ## Decrease log verbosity to errors only
2564 initialDelaySeconds: 30
2573 initialDelaySeconds: 5
2577 ## Resource limits & requests
2587 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2588 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2591 ## Define which Nodes the Pods are scheduled on.
2592 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2595 ## Tolerations for use with node taints
2596 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2602 # effect: "NoSchedule"
2604 ## Assign custom affinity rules to the prometheus operator
2605 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2609 # requiredDuringSchedulingIgnoredDuringExecution:
2610 # nodeSelectorTerms:
2611 # - matchExpressions:
2612 # - key: kubernetes.io/e2e-az-name
2621 # - ns1.svc.cluster-domain.example
2622 # - my.dns.search.suffix
2633 type: RuntimeDefault
2634 ## Container-specific security context configuration
2635 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2637 containerSecurityContext:
2638 allowPrivilegeEscalation: false
2639 readOnlyRootFilesystem: true
2643 ## If false then the user will opt out of automounting API credentials.
2645 automountServiceAccountToken: true
2650 repository: chainguard-private/kube-webhook-certgen
2652 sha: sha256:1f2157ebf63c7ebfc135640afd44383e43898fb372c2a38c1509d47cf7dd08c0
2653 pullPolicy: IfNotPresent
2655 ## Provide a priority class name to the webhook patching job
2657 priorityClassName: ""
2658 ttlSecondsAfterFinished: 60
2660 # argocd.argoproj.io/hook: PreSync
2661 # argocd.argoproj.io/hook-delete-policy: HookSucceeded
2666 ## SecurityContext holds pod-level security attributes and common container settings.
2667 ## This defaults to non root user with uid 2000 and gid 2000. *v1.PodSecurityContext false
2668 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2675 type: RuntimeDefault
2676 ## Service account for Prometheus Operator Webhook Job Patch to use.
2677 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2682 automountServiceAccountToken: true
2683 # Security context for create job container
2686 allowPrivilegeEscalation: false
2687 readOnlyRootFilesystem: true
2691 # Security context for patch job container
2694 allowPrivilegeEscalation: false
2695 readOnlyRootFilesystem: true
2699 # Use certmanager to generate webhook certs
2702 # self-signed root certificate
2704 duration: "" # default to be 5y
2705 # -- Set the revisionHistoryLimit on the Certificate. See
2706 # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2708 revisionHistoryLimit:
2710 duration: "" # default to be 1y
2711 # -- Set the revisionHistoryLimit on the Certificate. See
2712 # https://cert-manager.io/docs/reference/api-docs/#cert-manager.io/v1.CertificateSpec
2714 revisionHistoryLimit:
2717 # kind: "ClusterIssuer"
2718 ## Namespaces to scope the interaction of the Prometheus Operator and the apiserver (allow list).
2719 ## This is mutually exclusive with denyNamespaces. Setting this to an empty object will disable the configuration
2722 # releaseNamespace: true
2726 ## Namespaces not to scope the interaction of the Prometheus Operator (deny list).
2729 ## Filter namespaces to look for prometheus-operator custom resources
2731 alertmanagerInstanceNamespaces: []
2732 alertmanagerConfigNamespaces: []
2733 prometheusInstanceNamespaces: []
2734 thanosRulerInstanceNamespaces: []
2735 ## The clusterDomain value will be added to the cluster.peer option of the alertmanager.
2736 ## Without this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated:9094 (default value)
2737 ## With this specified option cluster.peer will have value alertmanager-monitoring-alertmanager-0.alertmanager-operated.namespace.svc.cluster-domain:9094
2739 # clusterDomain: "cluster.local"
2741 ## Enable creation of NetworkPolicy resources.
2744 ## Flavor of the network policy to use.
2746 # * kubernetes for networking.k8s.io/v1/NetworkPolicy
2747 # * cilium for cilium.io/v2/CiliumNetworkPolicy
2752 ## match labels used in selector
2754 ## Service account for Prometheus Operator to use.
2755 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
2760 automountServiceAccountToken: true
2762 # -- terminationGracePeriodSeconds for container lifecycle hook
2763 terminationGracePeriodSeconds: 30
2764 # -- Specify lifecycle hooks for the controller
2766 ## Configuration for Prometheus operator service
2774 ipFamilies: ["IPv6", "IPv4"]
2775 ipFamilyPolicy: "PreferDualStack"
2776 ## Port to expose on each node
2777 ## Only used if service.type is 'NodePort'
2781 ## Additional ports to open for Prometheus operator service
2782 ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#multi-port-services
2786 ## Only use if service.type is "LoadBalancer"
2789 loadBalancerSourceRanges: []
2790 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
2792 externalTrafficPolicy: Cluster
2794 ## NodePort, ClusterIP, LoadBalancer
2797 ## List of IP addresses at which the Prometheus server service is available
2798 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
2801 # ## Labels to add to the operator deployment
2804 ## Annotations to add to the operator deployment
2807 ## Labels to add to the operator pod
2810 ## Annotations to add to the operator pod
2813 ## Assign a podDisruptionBudget to the operator
2815 podDisruptionBudget:
2818 # maxUnavailable: ""
2819 unhealthyPodEvictionPolicy: AlwaysAllow
2820 ## Assign a PriorityClassName to pods if set
2821 # priorityClassName: ""
2823 ## Define Log Format
2824 # Use logfmt (default) or json logging
2827 ## Decrease log verbosity to errors only
2830 ## If true, the operator will create and maintain a service for scraping kubelets
2831 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/helm/prometheus-operator/README.md
2834 namespace: kube-system
2836 ## Use '{{ template "kube-prometheus-stack.fullname" . }}-kubelet' by default
2838 ## Create Endpoints objects for kubelet targets.
2839 kubeletEndpointsEnabled: true
2840 ## Create EndpointSlice objects for kubelet targets.
2841 kubeletEndpointSliceEnabled: false
2842 ## Extra arguments to pass to prometheusOperator
2843 # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/operator.md
2845 # - --labels="cluster=talos-cluster"
2847 ## Create a servicemonitor for the operator
2850 ## If true, create a serviceMonitor for prometheus operator
2853 ## Labels for ServiceMonitor
2854 additionalLabels: {}
2855 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
2858 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
2861 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
2864 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2867 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2869 labelNameLengthLimit: 0
2870 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
2872 labelValueLengthLimit: 0
2873 ## Scrape timeout. If not set, the Prometheus default scrape timeout is used.
2875 ## Metric relabel configs to apply to samples before ingestion.
2877 metricRelabelings: []
2879 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
2880 # sourceLabels: [__name__]
2882 # relabel configs to apply to samples before ingestion.
2885 # - sourceLabels: [__meta_kubernetes_pod_node_name]
2888 # targetLabel: nodename
2891 ## Resource limits & requests
2901 ## Operator Environment
2906 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
2907 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
2910 ## Define which Nodes the Pods are scheduled on.
2911 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
2914 ## Tolerations for use with node taints
2915 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
2921 # effect: "NoSchedule"
2923 ## Assign custom affinity rules to the prometheus operator
2924 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
2928 # requiredDuringSchedulingIgnoredDuringExecution:
2929 # nodeSelectorTerms:
2930 # - matchExpressions:
2931 # - key: kubernetes.io/e2e-az-name
2940 # - ns1.svc.cluster-domain.example
2941 # - my.dns.search.suffix
2952 type: RuntimeDefault
2953 ## Setup hostUsers for prometheus-operator
2954 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/user-namespaces/
2956 ## Container-specific security context configuration
2957 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
2959 containerSecurityContext:
2960 allowPrivilegeEscalation: false
2961 readOnlyRootFilesystem: true
2965 # Enable vertical pod autoscaler support for prometheus-operator
2966 verticalPodAutoscaler:
2968 # Recommender responsible for generating recommendation for the object.
2969 # List should be empty (then the default recommender will generate the recommendation)
2970 # or contain exactly one recommender.
2972 # - name: custom-recommender-performance
2974 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
2975 controlledResources: []
2976 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
2977 # controlledValues: RequestsAndLimits
2979 # Define the max allowed resources for the pod
2983 # Define the min allowed resources for the pod
2989 # Specifies minimal number of replicas which need to be alive for VPA Updater to attempt pod eviction
2991 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
2992 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
2993 updateMode: Recreate
2994 ## Prometheus-operator image
2998 repository: chainguard-private/prometheus-operator
2999 # if not set appVersion field from Chart.yaml is used
3001 sha: sha256:69dce7b86620326df1d4593b520b6ab81b6db3daaddb365cb094b35957b8f474
3002 pullPolicy: IfNotPresent
3003 ## Prometheus image to use for prometheuses managed by the operator
3005 # prometheusDefaultBaseImage: prometheus/prometheus
3007 ## Prometheus image registry to use for prometheuses managed by the operator
3009 # prometheusDefaultBaseImageRegistry: quay.io
3011 ## Alertmanager image to use for alertmanagers managed by the operator
3013 # alertmanagerDefaultBaseImage: prometheus/alertmanager
3015 ## Alertmanager image registry to use for alertmanagers managed by the operator
3017 # alertmanagerDefaultBaseImageRegistry: quay.io
3019 ## Prometheus-config-reloader
3021 prometheusConfigReloader:
3024 repository: chainguard-private/prometheus-config-reloader
3025 # if not set appVersion field from Chart.yaml is used
3027 sha: sha256:0bb4e4a27053c655527f5f585df9eb9b81782b81a4213622dba9c2494bcde757
3028 # add prometheus config reloader liveness and readiness probe. Default: false
3030 # resource config for prometheusConfigReloader
3038 ## Thanos side-car image when configured
3042 repository: chainguard-private/thanos
3044 sha: sha256:91616ecf31235c2f626295c55d11389fcdcfb2b1f817099a9d7460e7765bd183
3045 ## Set a Label Selector to filter watched prometheus and prometheusAgent
3047 prometheusInstanceSelector: ""
3048 ## Set a Label Selector to filter watched alertmanager
3050 alertmanagerInstanceSelector: ""
3051 ## Set a Label Selector to filter watched thanosRuler
3052 thanosRulerInstanceSelector: ""
3053 ## Set a Field Selector to filter watched secrets
3055 secretFieldSelector: "type!=kubernetes.io/dockercfg,type!=kubernetes.io/service-account-token,type!=helm.sh/release.v1"
3056 ## If false then the user will opt out of automounting API credentials.
3058 automountServiceAccountToken: true
3059 ## Additional volumes
3062 ## Additional volume mounts
3064 extraVolumeMounts: []
3065## Deploy a Prometheus instance
3069 ## Toggle prometheus into agent mode
3070 ## Note many of features described below (e.g. rules, query, alerting, remote read, thanos) will not work in agent mode.
3071 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/prometheus-agent.md
3074 ## Annotations for Prometheus
3077 ## Additional labels for Prometheus
3079 additionalLabels: {}
3080 ## Configure network policy for the prometheus
3083 ## Flavor of the network policy to use.
3085 # * kubernetes for networking.k8s.io/v1/NetworkPolicy
3086 # * cilium for cilium.io/v2/CiliumNetworkPolicy
3101 ## Service account for Prometheuses to use.
3102 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
3108 automountServiceAccountToken: true
3109 # Service for thanos service discovery on sidecar
3110 # Enable this can make Thanos Query can use
3111 # `--store=dnssrv+_grpc._tcp.${kube-prometheus-stack.fullname}-thanos-discovery.${namespace}.svc.cluster.local` to discovery
3112 # Thanos sidecar on prometheus nodes
3113 # (Please remember to change ${kube-prometheus-stack.fullname} and ${namespace}. Not just copy and paste!)
3118 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3120 externalTrafficPolicy: Cluster
3124 ## Service dual stack
3128 ipFamilies: ["IPv6", "IPv4"]
3129 ipFamilyPolicy: "PreferDualStack"
3134 ## HTTP port config (for metrics)
3137 targetHttpPort: "http"
3138 ## ClusterIP to assign
3139 # Default is to make this a headless service ("None")
3141 ## Port to expose on each node, if service type is NodePort
3145 # ServiceMonitor to scrape Sidecar metrics
3146 # Needs thanosService to be enabled as well
3147 thanosServiceMonitor:
3150 ## Additional labels
3152 additionalLabels: {}
3153 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3155 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3156 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3159 ## Metric relabel configs to apply to samples before ingestion.
3160 metricRelabelings: []
3161 ## relabel configs to apply to samples before ingestion.
3163 # Service for external access to sidecar
3164 # Enabling this creates a service to expose thanos-sidecar outside the cluster.
3165 thanosServiceExternal:
3170 loadBalancerSourceRanges: []
3175 ## HTTP port config (for metrics)
3178 targetHttpPort: "http"
3179 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3181 externalTrafficPolicy: Cluster
3185 ## Port to expose on each node
3189 ## Configuration for Prometheus service
3198 ipFamilies: ["IPv6", "IPv4"]
3199 ipFamilyPolicy: "PreferDualStack"
3200 ## Port for Prometheus Service to listen on
3203 ## To be used with a proxy extraContainer port
3205 ## Port for Prometheus Reloader to listen on
3207 reloaderWebPort: 8080
3208 ## Port to expose for Prometheus Reloader
3209 ## Only used if service.type is 'NodePort'
3211 reloaderWebNodePort: null
3212 ## List of IP addresses at which the Prometheus server service is available
3213 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
3216 ## Port to expose on each node
3217 ## Only used if service.type is 'NodePort'
3221 ## Only use if service.type is "LoadBalancer"
3223 loadBalancerSourceRanges: []
3224 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3226 externalTrafficPolicy: Cluster
3230 ## Additional ports to open for Prometheus service
3234 # - name: oauth-proxy
3237 # - name: oauth-metrics
3241 ## Consider that all endpoints are considered "ready" even if the Pods themselves are not
3242 ## Ref: https://kubernetes.io/docs/reference/kubernetes-api/service-resources/service-v1/#ServiceSpec
3243 publishNotReadyAddresses: false
3244 ## If you want to make sure that connections from a particular client are passed to the same Pod each time
3245 ## Accepts 'ClientIP' or 'None'
3247 sessionAffinity: None
3248 ## If you want to modify the ClientIP sessionAffinity timeout
3249 ## The value must be >0 && <=86400(for 1 day) if ServiceAffinity == "ClientIP"
3251 sessionAffinityConfig:
3253 timeoutSeconds: 10800
3254 ## Configuration for creating a separate Service for each statefulset Prometheus replica
3259 ## Port for Prometheus Service per replica to listen on
3262 ## To be used with a proxy extraContainer port
3264 ## Port to expose on each node
3265 ## Only used if servicePerReplica.type is 'NodePort'
3268 ## Loadbalancer source IP ranges
3269 ## Only used if servicePerReplica.type is "LoadBalancer"
3270 loadBalancerSourceRanges: []
3271 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
3273 externalTrafficPolicy: Cluster
3277 ## Service dual stack
3281 ipFamilies: ["IPv6", "IPv4"]
3282 ipFamilyPolicy: "PreferDualStack"
3283 ## Configure pod disruption budgets for Prometheus
3284 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
3286 podDisruptionBudget:
3289 # maxUnavailable: ""
3290 unhealthyPodEvictionPolicy: AlwaysAllow
3291 ## Enable vertical pod autoscaler support for Prometheus
3292 ## ref: https://github.com/kubernetes/autoscaler/tree/master/vertical-pod-autoscaler
3294 verticalPodAutoscaler:
3296 # Recommender responsible for generating recommendation for the object.
3297 # List should be empty (then the default recommender will generate the recommendation)
3298 # or contain exactly one recommender.
3300 # - name: custom-recommender-performance
3302 # List of resources that the vertical pod autoscaler can control. Defaults to cpu and memory
3303 controlledResources: []
3304 # Specifies which resource values should be controlled: RequestsOnly or RequestsAndLimits.
3305 # controlledValues: RequestsAndLimits
3307 # Define the max allowed resources for the pod
3311 # Define the min allowed resources for the pod
3317 # Specifies whether recommended updates are applied when a Pod is started and whether recommended updates
3318 # are applied during the life of a Pod. Possible values are "Off", "Initial", "Recreate", and "InPlaceOrRecreate".
3319 updateMode: Recreate
3320 # Ingress exposes thanos sidecar outside the cluster
3323 ingressClassName: ""
3327 ## Port to expose on each node
3328 ## Only used if service.type is 'NodePort'
3331 ## Hosts must be provided if Ingress is enabled.
3334 # - thanos-gateway.domain.com
3336 ## Paths to use for ingress rules
3341 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3342 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3343 # pathType: ImplementationSpecific
3345 ## TLS configuration for Thanos Ingress
3346 ## Secret must be manually created in the namespace
3349 # - secretName: thanos-gateway-tls
3351 # - thanos-gateway.domain.com
3353 ## ExtraSecret can be used to store various data in an extra secret
3354 ## (use it for example to store hashed basic auth credentials)
3356 ## if not set, name will be auto generated
3361 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
3362 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
3366 ingressClassName: ""
3369 ## Redirect ingress to an additional defined port on the service
3373 ## Must be provided if Ingress is enabled.
3376 # - prometheus.domain.com
3378 ## Paths to use for ingress rules - one path should match the prometheusSpec.routePrefix
3383 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3384 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3385 # pathType: ImplementationSpecific
3387 ## TLS configuration for Prometheus Ingress
3388 ## Secret must be manually created in the namespace
3391 # - secretName: prometheus-general-tls
3393 # - prometheus.example.com
3394 # -- BETA: Configure the gateway routes for the chart here.
3395 # More routes can be added by adding a dictionary key like the 'main' route.
3396 # Be aware that this is an early beta of this feature,
3397 # kube-prometheus-stack does not guarantee this works and is subject to change.
3398 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
3399 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
3402 # -- Enables or disables the route
3404 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
3405 apiVersion: gateway.networking.k8s.io/v1
3406 # -- Set the route kind
3407 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
3412 # - my-filter.example.com
3416 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
3417 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
3418 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
3419 httpsRedirect: false
3424 ## Filters define the filters that are applied to requests that match this rule.
3426 ## Session persistence configuration for the route rule.
3427 sessionPersistence: {}
3428 # sessionName: route
3430 # absoluteTimeout: 12h
3432 # lifetimeType: Permanent
3434 ## Additional custom rules that can be added to the route
3436 ## Configuration for creating an Ingress that will map to each Prometheus replica service
3437 ## prometheus.servicePerReplica must be enabled
3441 ingressClassName: ""
3444 ## Final form of the hostname for each per replica ingress is
3445 ## {{ ingressPerReplica.hostPrefix }}-{{ $replicaNumber }}.{{ ingressPerReplica.hostDomain }}
3447 ## Prefix for the per replica ingress that will have `-$replicaNumber`
3448 ## appended to the end
3450 ## Domain that will be used for the per replica ingress
3452 ## Paths to use for ingress rules
3457 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
3458 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
3459 # pathType: ImplementationSpecific
3461 ## Secret name containing the TLS certificate for Prometheus per replica ingress
3462 ## Secret must be manually created in the namespace
3464 ## Separated secret for each per replica Ingress. Can be used together with cert-manager
3466 tlsSecretPerReplica:
3468 ## Final form of the secret for each per replica ingress is
3469 ## {{ tlsSecretPerReplica.prefix }}-{{ $replicaNumber }}
3471 prefix: "prometheus"
3473 ## If true, create a serviceMonitor for prometheus
3476 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
3479 ## Additional labels
3481 additionalLabels: {}
3482 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
3485 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
3488 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3491 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3493 labelNameLengthLimit: 0
3494 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
3496 labelValueLengthLimit: 0
3497 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
3499 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
3500 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
3503 ## Metric relabel configs to apply to samples before ingestion.
3505 metricRelabelings: []
3507 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
3508 # sourceLabels: [__name__]
3510 # relabel configs to apply to samples before ingestion.
3513 # - sourceLabels: [__meta_kubernetes_pod_node_name]
3516 # targetLabel: nodename
3520 ## Additional Endpoints
3522 additionalEndpoints: []
3523 # - port: oauth-metrics
3525 ## Settings affecting prometheusSpec
3526 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheusspec
3529 ## Statefulset's persistent volume claim retention policy
3530 ## whenDeleted and whenScaled determine whether
3531 ## statefulset's PVCs are deleted (true) or retained (false)
3532 ## on scaling down and deleting statefulset, respectively.
3533 ## Requires Kubernetes version 1.27.0+.
3534 ## Ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#persistentvolumeclaim-retention
3535 persistentVolumeClaimRetentionPolicy: {}
3536 # whenDeleted: Retain
3537 # whenScaled: Retain
3539 ## If true, pass --storage.tsdb.max-block-duration=2h to prometheus. This is already done if using Thanos
3541 disableCompaction: false
3542 ## AutomountServiceAccountToken indicates whether a service account token should be automatically mounted in the pod,
3543 ## If the field isn't set, the operator mounts the service account token by default.
3544 ## Warning: be aware that by default, Prometheus requires the service account token for Kubernetes service discovery,
3545 ## It is possible to use strategic merge patch to project the service account token into the 'prometheus' container.
3546 automountServiceAccountToken: true
3548 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#apiserverconfig
3551 ## Allows setting additional arguments for the Prometheus container
3552 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3554 ## Convert all classic histograms to native histograms with custom buckets.
3555 ## This corresponds to the 'convert_classic_histograms_to_nhcb' field in Prometheus configuration.
3557 convertClassicHistogramsToNHCB: false
3558 ## Enable scraping of classic histograms that are also exposed as native histograms.
3559 ## This corresponds to the 'always_scrape_classic_histograms' field in Prometheus configuration.
3561 scrapeClassicHistograms: false
3562 ## Enable scraping of native histograms.
3563 ## This corresponds to the 'scrape_native_histograms' field in Prometheus configuration.
3565 scrapeNativeHistograms: false
3566 ## File to which scrape failures are logged.
3567 ## Reloading the configuration will reopen the file.
3568 ## Defaults to empty (disabled)
3569 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.Prometheus
3571 scrapeFailureLogFile: ""
3572 ## Interval between consecutive scrapes.
3574 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/release-0.44/pkg/prometheus/promcfg.go#L180-L183
3577 ## Number of seconds to wait for target to respond before erroring
3580 ## List of scrape classes to expose to scraping objects such as
3581 ## PodMonitors, ServiceMonitors, Probes and ScrapeConfigs.
3584 # - name: istio-mtls
3587 # caFile: /etc/prometheus/secrets/istio.default/root-cert.pem
3588 # certFile: /etc/prometheus/secrets/istio.default/cert-chain.pem
3590 ## PodTargetLabels are appended to the `spec.podTargetLabels` field of all PodMonitor and ServiceMonitor objects.
3595 ## Interval between consecutive evaluations.
3597 evaluationInterval: ""
3598 ## ListenLocal makes the Prometheus server listen on loopback, so that it does not bind against the Pod IP.
3601 ## enableOTLPReceiver enables the OTLP receiver for Prometheus.
3602 enableOTLPReceiver: false
3603 ## EnableAdminAPI enables Prometheus the administrative HTTP API which includes functionality such as deleting time series.
3604 ## This is disabled by default.
3605 ## ref: https://prometheus.io/docs/prometheus/latest/querying/api/#tsdb-admin-apis
3607 enableAdminAPI: false
3608 ## Sets version of Prometheus overriding the Prometheus version as derived
3609 ## from the image tag. Useful in cases where the tag does not follow semver v2.
3611 ## WebTLSConfig defines the TLS parameters for HTTPS
3612 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#webtlsconfig
3614 ## Exemplars related settings that are runtime reloadable.
3615 ## It requires to enable the exemplar storage feature to be effective.
3617 ## Maximum number of exemplars stored in memory for all series.
3618 ## If not set, Prometheus uses its default value.
3619 ## A value of zero or less than zero disables the storage.
3622 # EnableFeatures API enables access to Prometheus disabled features.
3623 # ref: https://prometheus.io/docs/prometheus/latest/feature_flags/
3625 # - exemplar-storage
3627 ## https://prometheus.io/docs/guides/opentelemetry
3630 # promoteResourceAttributes: []
3631 # keepIdentifyingResourceAttributes: false
3632 # translationStrategy: NoUTF8EscapingWithSuffixes
3633 # convertHistogramsToNHCB: false
3637 ## Image of Prometheus.
3641 repository: chainguard-private/prometheus
3643 sha: sha256:1306477d5bcf41caf21e06401b90933497a0ae84cb181376eee8ffaebe058b2b
3644 pullPolicy: IfNotPresent
3645 ## Tolerations for use with node taints
3646 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
3652 # effect: "NoSchedule"
3654 ## If specified, the pod's topology spread constraints.
3655 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
3657 topologySpreadConstraints: []
3659 # topologyKey: topology.kubernetes.io/zone
3660 # whenUnsatisfiable: DoNotSchedule
3667 disableAlerting: false
3668 ## Alertmanagers to which alerts will be sent
3669 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#alertmanagerendpoints
3671 ## Default configuration will connect to the alertmanager deployed as part of this release
3673 alertingEndpoints: []
3680 # bearerTokenFile: ""
3683 ## External labels to add to any time series or alerts when communicating with external systems
3686 ## enable --web.enable-remote-write-receiver flag on prometheus-server
3688 enableRemoteWriteReceiver: false
3689 ## Name of the external label used to denote replica name
3691 replicaExternalLabelName: ""
3692 ## If true, the Operator won't add the external label used to denote replica name
3694 replicaExternalLabelNameClear: false
3695 ## Name of the external label used to denote Prometheus instance name
3697 prometheusExternalLabelName: ""
3698 ## If true, the Operator won't add the external label used to denote Prometheus instance name
3700 prometheusExternalLabelNameClear: false
3701 ## External URL at which Prometheus will be reachable.
3704 ## Define which Nodes the Pods are scheduled on.
3705 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
3708 ## Secrets is a list of Secrets in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3709 ## The Secrets are mounted into /etc/prometheus/secrets/. Secrets changes after initial creation of a Prometheus object are not
3710 ## reflected in the running Pods. To change the secrets mounted into the Prometheus Pods, the object must be deleted and recreated
3711 ## with the new list of secrets.
3714 ## ConfigMaps is a list of ConfigMaps in the same namespace as the Prometheus object, which shall be mounted into the Prometheus Pods.
3715 ## The ConfigMaps are mounted into /etc/prometheus/configmaps/.
3718 ## QuerySpec defines the query command line flags when starting Prometheus.
3719 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#queryspec
3722 ## If nil, select own namespace. Namespaces to be selected for PrometheusRules discovery.
3723 ruleNamespaceSelector: {}
3724 ## Example which selects PrometheusRules in namespaces with label "prometheus" set to "somelabel"
3725 # ruleNamespaceSelector:
3727 # prometheus: somelabel
3729 ## If true, a nil or {} value for prometheus.prometheusSpec.ruleSelector will cause the
3730 ## prometheus resource to be created with selectors based on values in the helm deployment,
3731 ## which will also match the PrometheusRule resources created
3733 ruleSelectorNilUsesHelmValues: true
3734 ## PrometheusRules to be selected for target discovery.
3735 ## If {}, select all PrometheusRules
3738 ## Example which select all PrometheusRules resources
3739 ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
3748 ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
3751 # role: example-rules
3753 ## If true, a nil or {} value for prometheus.prometheusSpec.serviceMonitorSelector will cause the
3754 ## prometheus resource to be created with selectors based on values in the helm deployment,
3755 ## which will also match the servicemonitors created
3757 serviceMonitorSelectorNilUsesHelmValues: true
3758 ## ServiceMonitors to be selected for target discovery.
3759 ## If {}, select all ServiceMonitors
3761 serviceMonitorSelector: {}
3762 ## Example which selects ServiceMonitors with label "prometheus" set to "somelabel"
3763 # serviceMonitorSelector:
3765 # prometheus: somelabel
3767 ## Namespaces to be selected for ServiceMonitor discovery.
3769 serviceMonitorNamespaceSelector: {}
3770 ## Example which selects ServiceMonitors in namespaces with label "prometheus" set to "somelabel"
3771 # serviceMonitorNamespaceSelector:
3773 # prometheus: somelabel
3775 ## If true, a nil or {} value for prometheus.prometheusSpec.podMonitorSelector will cause the
3776 ## prometheus resource to be created with selectors based on values in the helm deployment,
3777 ## which will also match the podmonitors created
3779 podMonitorSelectorNilUsesHelmValues: true
3780 ## PodMonitors to be selected for target discovery.
3781 ## If {}, select all PodMonitors
3783 podMonitorSelector: {}
3784 ## Example which selects PodMonitors with label "prometheus" set to "somelabel"
3785 # podMonitorSelector:
3787 # prometheus: somelabel
3789 ## If nil, select own namespace. Namespaces to be selected for PodMonitor discovery.
3790 podMonitorNamespaceSelector: {}
3791 ## Example which selects PodMonitor in namespaces with label "prometheus" set to "somelabel"
3792 # podMonitorNamespaceSelector:
3794 # prometheus: somelabel
3796 ## If true, a nil or {} value for prometheus.prometheusSpec.probeSelector will cause the
3797 ## prometheus resource to be created with selectors based on values in the helm deployment,
3798 ## which will also match the probes created
3800 probeSelectorNilUsesHelmValues: true
3801 ## Probes to be selected for target discovery.
3802 ## If {}, select all Probes
3805 ## Example which selects Probes with label "prometheus" set to "somelabel"
3808 # prometheus: somelabel
3810 ## If nil, select own namespace. Namespaces to be selected for Probe discovery.
3811 probeNamespaceSelector: {}
3812 ## Example which selects Probe in namespaces with label "prometheus" set to "somelabel"
3813 # probeNamespaceSelector:
3815 # prometheus: somelabel
3817 ## If true, a nil or {} value for prometheus.prometheusSpec.scrapeConfigSelector will cause the
3818 ## prometheus resource to be created with selectors based on values in the helm deployment,
3819 ## which will also match the scrapeConfigs created
3821 ## If null and scrapeConfigSelector is also null, exclude field from the prometheusSpec
3822 ## (keeping downward compatibility with older versions of CRD)
3824 scrapeConfigSelectorNilUsesHelmValues: true
3825 ## scrapeConfigs to be selected for target discovery.
3826 ## If {}, select all scrapeConfigs
3828 scrapeConfigSelector: {}
3829 ## Example which selects scrapeConfigs with label "prometheus" set to "somelabel"
3830 # scrapeConfigSelector:
3832 # prometheus: somelabel
3834 ## If nil, select own namespace. Namespaces to be selected for scrapeConfig discovery.
3835 ## If null, exclude the field from the prometheusSpec (keeping downward compatibility with older versions of CRD)
3836 scrapeConfigNamespaceSelector: {}
3837 ## Example which selects scrapeConfig in namespaces with label "prometheus" set to "somelabel"
3838 # scrapeConfigNamespaceSelector:
3840 # prometheus: somelabel
3842 ## How long to retain metrics
3845 ## Maximum size of metrics
3846 ## Unit format should be in the form of "50GiB"
3848 ## Allow out-of-order/out-of-bounds samples ingested into Prometheus for a specified duration
3849 ## See https://prometheus.io/docs/prometheus/latest/configuration/configuration/#tsdb
3851 outOfOrderTimeWindow: 0s
3852 ## Enable compression of the write-ahead log using Snappy.
3854 walCompression: true
3855 ## If true, the Operator won't process any Prometheus configuration changes
3858 ## Number of replicas of each shard to deploy for a Prometheus deployment.
3859 ## Number of replicas multiplied by shards is the total number of Pods created.
3862 ## EXPERIMENTAL: Number of shards to distribute targets onto.
3863 ## Number of replicas multiplied by shards is the total number of Pods created.
3864 ## Note that scaling down shards will not reshard data onto remaining instances, it must be manually moved.
3865 ## Increasing shards will not reshard data either but it will continue to be available from the same instances.
3866 ## To query globally use Thanos sidecar and Thanos querier or remote write data to a central location.
3867 ## Sharding is done on the content of the `__address__` target meta-label.
3870 ## Log level for Prometheus be configured in
3873 ## Log format for Prometheus be configured in
3876 ## Prefix used to register routes, overriding externalUrl route.
3877 ## Useful for proxies that rewrite URLs.
3880 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
3881 ## Metadata Labels and Annotations gets propagated to the prometheus pods.
3886 # k8s-app: prometheus
3888 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
3889 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
3890 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
3891 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
3892 podAntiAffinity: "soft"
3893 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
3894 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
3896 podAntiAffinityTopologyKey: kubernetes.io/hostname
3897 ## Assign custom affinity rules to the prometheus instance
3898 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
3902 # requiredDuringSchedulingIgnoredDuringExecution:
3903 # nodeSelectorTerms:
3904 # - matchExpressions:
3905 # - key: kubernetes.io/e2e-az-name
3911 ## The remote_read spec configuration for Prometheus.
3912 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotereadspec
3914 # - url: http://remote1/read
3915 ## additionalRemoteRead is appended to remoteRead
3916 additionalRemoteRead: []
3917 ## The remote_write spec configuration for Prometheus.
3918 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#remotewritespec
3920 # - url: http://remote1/push
3921 ## additionalRemoteWrite is appended to remoteWrite
3922 additionalRemoteWrite: []
3923 ## Enable/Disable Grafana dashboards provisioning for prometheus remote write feature
3924 remoteWriteDashboards: false
3925 ## Resource limits & requests
3931 ## Prometheus StorageSpec for persistent data
3932 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
3935 ## Using PersistentVolumeClaim
3937 # volumeClaimTemplate:
3939 # storageClassName: gluster
3940 # accessModes: ["ReadWriteOnce"]
3946 ## Using tmpfs volume
3951 # Additional volumes on the output StatefulSet definition.
3953 # Additional VolumeMounts on the output StatefulSet definition.
3955 ## AdditionalScrapeConfigs allows specifying additional Prometheus scrape configurations. Scrape configurations
3956 ## are appended to the configurations generated by the Prometheus Operator. Job configurations must have the form
3957 ## as specified in the official Prometheus documentation:
3958 ## https://prometheus.io/docs/prometheus/latest/configuration/configuration/#scrape_config. As scrape configs are
3959 ## appended, the user is responsible to make sure it is valid. Note that using this feature may expose the possibility
3960 ## to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible
3961 ## scrape configs are going to break Prometheus after the upgrade.
3962 ## AdditionalScrapeConfigs can be defined as a list or as a templated string.
3964 ## The scrape configuration example below will find master nodes, provided they have the name .*mst.*, relabel the
3965 ## port to 2379 and allow etcd scraping provided it is running on all Kubernetes master nodes
3967 additionalScrapeConfigs: []
3968 # - job_name: kube-etcd
3969 # kubernetes_sd_configs:
3973 # ca_file: /etc/prometheus/secrets/etcd-client-cert/etcd-ca
3974 # cert_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client
3975 # key_file: /etc/prometheus/secrets/etcd-client-cert/etcd-client-key
3977 # - action: labelmap
3978 # regex: __meta_kubernetes_node_label_(.+)
3979 # - source_labels: [__address__]
3981 # target_label: __address__
3982 # regex: ([^:;]+):(\d+)
3983 # replacement: ${1}:2379
3984 # - source_labels: [__meta_kubernetes_node_name]
3987 # - source_labels: [__meta_kubernetes_node_name]
3989 # target_label: node
3992 # metric_relabel_configs:
3993 # - regex: (kubernetes_io_hostname|failure_domain_beta_kubernetes_io_region|beta_kubernetes_io_os|beta_kubernetes_io_arch|beta_kubernetes_io_instance_type|failure_domain_beta_kubernetes_io_zone)
3996 ## If scrape config contains a repetitive section, you may want to use a template.
3997 ## In the following example, you can see how to define `gce_sd_configs` for multiple zones
3998 # additionalScrapeConfigs: |
3999 # - job_name: "node-exporter"
4001 # {{range $zone := .Values.gcp_zones}}
4002 # - project: "project1"
4009 ## If additional scrape configurations are already deployed in a single secret file you can use this section.
4010 ## Expected values are the secret name and key
4011 ## Cannot be used with additionalScrapeConfigs
4012 additionalScrapeConfigsSecret: {}
4017 ## additionalPrometheusSecretsAnnotations allows to add annotations to the kubernetes secret. This can be useful
4018 ## when deploying via spinnaker to disable versioning on the secret, strategy.spinnaker.io/versioned: 'false'
4019 additionalPrometheusSecretsAnnotations: {}
4020 ## AdditionalAlertManagerConfigs allows for manual configuration of alertmanager jobs in the form as specified
4021 ## in the official Prometheus documentation https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alertmanager_config.
4022 ## AlertManager configurations specified are appended to the configurations generated by the Prometheus Operator.
4023 ## As AlertManager configs are appended, the user is responsible to make sure it is valid. Note that using this
4024 ## feature may expose the possibility to break upgrades of Prometheus. It is advised to review Prometheus release
4025 ## notes to ensure that no incompatible AlertManager configs are going to break Prometheus after the upgrade.
4027 additionalAlertManagerConfigs: []
4028 # - consul_sd_configs:
4029 # - server: consul.dev.test:8500
4032 # tag_separator: ','
4034 # - metrics-prometheus-alertmanager
4036 ## If additional alertmanager configurations are already deployed in a single secret, or you want to manage
4037 ## them separately from the helm deployment, you can use this section.
4038 ## Expected values are the secret name and key
4039 ## Cannot be used with additionalAlertManagerConfigs
4040 additionalAlertManagerConfigsSecret: {}
4045 ## AdditionalAlertRelabelConfigs allows specifying Prometheus alert relabel configurations. Alert relabel configurations specified are appended
4046 ## to the configurations generated by the Prometheus Operator. Alert relabel configurations specified must have the form as specified in the
4047 ## official Prometheus documentation: https://prometheus.io/docs/prometheus/latest/configuration/configuration/#alert_relabel_configs.
4048 ## As alert relabel configs are appended, the user is responsible to make sure it is valid. Note that using this feature may expose the
4049 ## possibility to break upgrades of Prometheus. It is advised to review Prometheus release notes to ensure that no incompatible alert relabel
4050 ## configs are going to break Prometheus after the upgrade.
4052 additionalAlertRelabelConfigs: []
4054 # regex: prometheus_replica
4058 ## If additional alert relabel configurations are already deployed in a single secret, or you want to manage
4059 ## them separately from the helm deployment, you can use this section.
4060 ## Expected values are the secret name and key
4061 ## Cannot be used with additionalAlertRelabelConfigs
4062 additionalAlertRelabelConfigsSecret: {}
4066 ## SecurityContext holds pod-level security attributes and common container settings.
4067 ## This defaults to non root user with uid 1000 and gid 2000.
4068 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md
4076 type: RuntimeDefault
4077 ## DNS configuration for Prometheus.
4078 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.PodDNSConfig
4080 ## DNS policy for Prometheus.
4081 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#dnspolicystring-alias
4083 ## Priority class assigned to the Pods
4085 priorityClassName: ""
4086 ## Thanos configuration allows configuring various aspects of a Prometheus server in a Thanos environment.
4087 ## This section is experimental, it may change significantly without deprecation notice in any release.
4088 ## This is experimental and may change significantly without backward compatibility in any release.
4089 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosspec
4092 # image: quay.io/thanos/thanos
4093 # secretProviderClass:
4097 # - resourceName: "projects/$PROJECT_ID/secrets/testsecret/versions/latest"
4098 # fileName: "objstore.yaml"
4099 ## ObjectStorageConfig configures object storage in Thanos.
4100 # objectStorageConfig:
4101 # # use existing secret, if configured, objectStorageConfig.secret will not be used
4102 # existingSecret: {}
4105 # # will render objectStorageConfig secret data and configure it to be used by Thanos custom resource,
4106 # # ignored when prometheusspec.thanos.objectStorageConfig.existingSecret is set
4107 # # https://thanos.io/tip/thanos/storage.md/#s3
4117 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to a Prometheus pod.
4118 ## if using proxy extraContainer update targetPort with proxy container port
4121 # - name: oauth-proxy
4122 # image: quay.io/oauth2-proxy/oauth2-proxy:v7.15.2
4124 # - --upstream=http://127.0.0.1:9090
4125 # - --http-address=0.0.0.0:8081
4126 # - --metrics-address=0.0.0.0:8082
4129 # - containerPort: 8081
4132 # - containerPort: 8082
4133 # name: oauth-metrics
4137 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4138 ## (permissions, dir tree) on mounted volumes before starting prometheus
4140 ## PortName to use for Prometheus.
4142 portName: "http-web"
4143 ## ArbitraryFSAccessThroughSMs configures whether configuration based on a service monitor can access arbitrary files
4144 ## on the file system of the Prometheus container e.g. bearer token files.
4145 arbitraryFSAccessThroughSMs: false
4146 ## OverrideHonorLabels if set to true overrides all user configured honor_labels. If HonorLabels is set in ServiceMonitor
4147 ## or PodMonitor to true, this overrides honor_labels to false.
4148 overrideHonorLabels: false
4149 ## OverrideHonorTimestamps allows to globally enforce honoring timestamps in all scrape configs.
4150 overrideHonorTimestamps: false
4151 ## When ignoreNamespaceSelectors is set to true, namespaceSelector from all PodMonitor, ServiceMonitor and Probe objects will be ignored,
4152 ## they will only discover targets within the namespace of the PodMonitor, ServiceMonitor and Probe object,
4153 ## and servicemonitors will be installed in the default service namespace.
4154 ## Defaults to false.
4155 ignoreNamespaceSelectors: false
4156 ## EnforcedNamespaceLabel enforces adding a namespace label of origin for each alert and metric that is user created.
4157 ## The label value will always be the namespace of the object that is being created.
4158 ## Disabled by default
4159 enforcedNamespaceLabel: ""
4160 ## PrometheusRulesExcludedFromEnforce - list of prometheus rules to be excluded from enforcing of adding namespace labels.
4161 ## Works only if enforcedNamespaceLabel set to true. Make sure both ruleNamespace and ruleName are set for each pair
4162 ## Deprecated, use `excludedFromEnforcement` instead
4163 prometheusRulesExcludedFromEnforce: []
4164 ## ExcludedFromEnforcement - list of object references to PodMonitor, ServiceMonitor, Probe and PrometheusRule objects
4165 ## to be excluded from enforcing a namespace label of origin.
4166 ## Works only if enforcedNamespaceLabel set to true.
4167 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#objectreference
4168 excludedFromEnforcement: []
4169 ## QueryLogFile specifies the file to which PromQL queries are logged. Note that this location must be writable,
4170 ## and can be persisted using an attached volume. Alternatively, the location can be set to a stdout location such
4171 ## as /dev/stdout to log querie information to the default Prometheus log stream. This is only available in versions
4172 ## of Prometheus >= 2.16.0. For more details, see the Prometheus docs (https://prometheus.io/docs/guides/query-log/)
4174 # Use to set global sample_limit for Prometheus. This act as default SampleLimit for ServiceMonitor or/and PodMonitor.
4175 # Set to 'false' to disable global sample_limit. or set to a number to override the default value.
4177 # EnforcedKeepDroppedTargetsLimit defines on the number of targets dropped by relabeling that will be kept in memory.
4178 # The value overrides any spec.keepDroppedTargets set by ServiceMonitor, PodMonitor, Probe objects unless spec.keepDroppedTargets
4179 # is greater than zero and less than spec.enforcedKeepDroppedTargets. 0 means no limit.
4180 enforcedKeepDroppedTargets: 0
4181 ## EnforcedSampleLimit defines global limit on number of scraped samples that will be accepted. This overrides any SampleLimit
4182 ## set per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the SampleLimit to keep overall
4183 ## number of samples/series under the desired limit. Note that if SampleLimit is lower that value will be taken instead.
4184 enforcedSampleLimit: false
4185 ## EnforcedTargetLimit defines a global limit on the number of scraped targets. This overrides any TargetLimit set
4186 ## per ServiceMonitor or/and PodMonitor. It is meant to be used by admins to enforce the TargetLimit to keep the overall
4187 ## number of targets under the desired limit. Note that if TargetLimit is lower, that value will be taken instead, except
4188 ## if either value is zero, in which case the non-zero value will be used. If both values are zero, no limit is enforced.
4189 enforcedTargetLimit: false
4190 ## Per-scrape limit on number of labels that will be accepted for a sample. If more than this number of labels are present
4191 ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4192 ## 2.27.0 and newer.
4193 enforcedLabelLimit: false
4194 ## Per-scrape limit on length of labels name that will be accepted for a sample. If a label name is longer than this number
4195 ## post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus versions
4196 ## 2.27.0 and newer.
4197 enforcedLabelNameLengthLimit: false
4198 ## Per-scrape limit on length of labels value that will be accepted for a sample. If a label value is longer than this
4199 ## number post metric-relabeling, the entire scrape will be treated as failed. 0 means no limit. Only valid in Prometheus
4200 ## versions 2.27.0 and newer.
4201 enforcedLabelValueLengthLimit: false
4202 ## AllowOverlappingBlocks enables vertical compaction and vertical query merge in Prometheus. This is still experimental
4203 ## in Prometheus so it may change in any upcoming release.
4204 allowOverlappingBlocks: false
4205 ## Specifies the validation scheme for metric and label names.
4206 ## Supported values are: Legacy, UTF8
4207 nameValidationScheme: ""
4208 ## Minimum number of seconds for which a newly created pod should be ready without any of its container crashing for it to
4209 ## be considered available. Defaults to 0 (pod will be considered available as soon as it is ready).
4211 ## Duration in seconds the pod needs to terminate gracefully.
4212 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4213 terminationGracePeriodSeconds: ~
4214 # Required for use in managed kubernetes clusters (such as AWS EKS) with custom CNI (such as calico),
4215 # because control-plane managed by AWS cannot communicate with pods' IP CIDR and admission webhooks are not working
4216 # Use the host's network namespace if true. Make sure to understand the security implications if you want to enable it.
4217 # When hostNetwork is enabled, this will set dnsPolicy to ClusterFirstWithHostNet automatically.
4219 ## Use the host's user namespace for Prometheus pods.
4220 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4222 # HostAlias holds the mapping between IP and hostnames that will be injected
4223 # as an entry in the pod's hosts file.
4230 ## TracingConfig configures tracing in Prometheus.
4231 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#prometheustracingconfig
4233 ## Defines the service discovery role used to discover targets from ServiceMonitor objects and Alertmanager endpoints.
4234 ## If set, the value should be either "Endpoints" or "EndpointSlice". If unset, the operator assumes the "Endpoints" role.
4235 serviceDiscoveryRole: ""
4236 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4237 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4238 podManagementPolicy: ""
4239 ## Update strategy for the StatefulSet.
4240 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4242 # type: RollingUpdate
4246 ## Additional configuration which is not covered by the properties above. (passed through tpl)
4247 additionalConfig: {}
4248 ## Additional configuration which is not covered by the properties above.
4249 ## Useful, if you need advanced templating inside alertmanagerSpec.
4250 ## Otherwise, use prometheus.prometheusSpec.additionalConfig (passed through tpl)
4251 additionalConfigString: ""
4252 ## Defines the maximum time that the `prometheus` container's startup probe
4253 ## will wait before being considered failed. The startup probe will return
4254 ## success after the WAL replay is complete. If set, the value should be
4255 ## greater than 60 (seconds). Otherwise it will be equal to 900 seconds (15
4257 maximumStartupDurationSeconds: 0
4258 ## Set default scrapeProtocols for Prometheus instances
4259 ## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#scrapeprotocolstring-alias
4261 additionalRulesForClusterRole: []
4262 # - apiGroups: [ "" ]
4265 # verbs: [ "get", "list", "watch" ]
4267 additionalServiceMonitors: []
4268 ## Name of the ServiceMonitor to create
4272 ## Additional labels to set used for the ServiceMonitorSelector. Together with standard labels from
4275 # additionalLabels: {}
4277 ## Service label for use in assembling a job name of the form <label value>-<port>
4278 ## If no label is specified, the service name is used.
4282 ## labels to transfer from the kubernetes service to the target
4286 ## labels to transfer from the kubernetes pods to the target
4288 # podTargetLabels: []
4290 ## Label selector for services to which this ServiceMonitor applies
4293 ## Example which selects all services to be monitored
4294 ## with label "monitoredby" with values any of "example-service-1" or "example-service-2"
4296 # - key: "monitoredby"
4299 # - example-service-1
4300 # - example-service-2
4302 ## label selector for services
4306 ## Namespaces from which services are selected
4308 # namespaceSelector:
4309 ## Match any namespace
4313 ## Explicit list of namespace names to select
4317 ## Endpoints of the selected service to be monitored
4320 ## Name of the endpoint's service port
4321 ## Mutually exclusive with targetPort
4324 ## Name or number of the endpoint's target port
4325 ## Mutually exclusive with port
4328 ## File containing bearer token to be used when scraping targets
4330 # bearerTokenFile: ""
4332 ## Interval at which metrics should be scraped
4336 ## HTTP path to scrape for metrics
4340 ## HTTP scheme to use for scraping
4344 ## TLS configuration to use when scraping the endpoint
4348 ## Path to the CA file
4352 ## Path to client certificate file
4356 ## Skip certificate verification
4358 # insecureSkipVerify: false
4360 ## Path to client key file
4364 ## Server name used to verify host name
4368 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4369 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4371 # metricRelabelings: []
4373 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4374 # sourceLabels: [__name__]
4376 ## RelabelConfigs to apply to samples before scraping
4377 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4380 # - sourceLabels: [__meta_kubernetes_pod_node_name]
4383 # targetLabel: nodename
4387 ## Fallback scrape protocol used by Prometheus for scraping metrics
4388 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4390 # fallbackScrapeProtocol: ""
4392 ## Attaches node metadata to the discovered targets
4393 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4397 additionalPodMonitors: []
4398 ## Name of the PodMonitor to create
4401## Additional labels to set used for the PodMonitorSelector. Together with standard labels from
4404# additionalLabels: {}
4406## Pod label for use in assembling a job name of the form <label value>-<port>
4407## If no label is specified, the pod endpoint name is used.
4411## Label selector for pods to which this PodMonitor applies
4414## Example which selects all Pods to be monitored
4415## with label "monitoredby" with values any of "example-pod-1" or "example-pod-2"
4417# - key: "monitoredby"
4423## label selector for pods
4427## PodTargetLabels transfers labels on the Kubernetes Pod onto the target.
4429# podTargetLabels: {}
4431## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4435## Namespaces from which pods are selected
4438## Match any namespace
4442## Explicit list of namespace names to select
4446## Endpoints of the selected pods to be monitored
4447## https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#podmetricsendpoint
4449# podMetricsEndpoints: []
4451## Fallback scrape protocol used by Prometheus for scraping metrics
4452## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.ScrapeProtocol
4454# fallbackScrapeProtocol: ""
4456## Attaches node metadata to the discovered targets
4457## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#monitoring.coreos.com/v1.AttachMetadata
4462## Configuration for thanosRuler
4463## ref: https://thanos.io/tip/components/rule.md/
4466 ## Deploy thanosRuler
4469 ## Annotations for ThanosRuler
4472 ## Service account for ThanosRuler to use.
4473 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/
4479 ## Configure pod disruption budgets for ThanosRuler
4480 ## ref: https://kubernetes.io/docs/tasks/run-application/configure-pdb/#specifying-a-poddisruptionbudget
4482 podDisruptionBudget:
4485 # maxUnavailable: ""
4486 unhealthyPodEvictionPolicy: AlwaysAllow
4489 ingressClassName: ""
4492 ## Hosts must be provided if Ingress is enabled.
4495 # - thanosruler.domain.com
4497 ## Paths to use for ingress rules - one path should match the thanosruler.routePrefix
4502 ## For Kubernetes >= 1.18 you should specify the pathType (determines how Ingress paths should be matched)
4503 ## See https://kubernetes.io/blog/2020/04/02/improvements-to-the-ingress-api-in-kubernetes-1.18/#better-path-matching-with-path-types
4504 # pathType: ImplementationSpecific
4506 ## TLS configuration for ThanosRuler Ingress
4507 ## Secret must be manually created in the namespace
4510 # - secretName: thanosruler-general-tls
4512 # - thanosruler.example.com
4513 # -- BETA: Configure the gateway routes for the chart here.
4514 # More routes can be added by adding a dictionary key like the 'main' route.
4515 # Be aware that this is an early beta of this feature,
4516 # kube-prometheus-stack does not guarantee this works and is subject to change.
4517 # Being BETA this can/will change in the future without notice, do not use unless you want to take that risk
4518 # [[ref]](https://gateway-api.sigs.k8s.io/reference/spec/#gateway.networking.k8s.io%2fv1alpha2)
4521 # -- Enables or disables the route
4523 # -- Set the route apiVersion, e.g. gateway.networking.k8s.io/v1 or gateway.networking.k8s.io/v1alpha2
4524 apiVersion: gateway.networking.k8s.io/v1
4525 # -- Set the route kind
4526 # Valid options are GRPCRoute, HTTPRoute, TCPRoute, TLSRoute, UDPRoute
4531 # - my-filter.example.com
4535 # -- create http route for redirect (https://gateway-api.sigs.k8s.io/guides/http-redirect-rewrite/#http-to-https-redirects)
4536 ## Take care that you only enable this on the http listener of the gateway to avoid an infinite redirect.
4537 ## matches, filters and additionalRules will be ignored if this is set to true. Be are
4538 httpsRedirect: false
4543 ## Filters define the filters that are applied to requests that match this rule.
4545 ## Session persistence configuration for the route rule.
4546 sessionPersistence: {}
4547 # sessionName: route
4549 # absoluteTimeout: 12h
4551 # lifetimeType: Permanent
4553 ## Additional custom rules that can be added to the route
4555 ## Configuration for ThanosRuler service
4564 ipFamilies: ["IPv6", "IPv4"]
4565 ipFamilyPolicy: "PreferDualStack"
4566 ## Port for ThanosRuler Service to listen on
4569 ## To be used with a proxy extraContainer port
4572 ## Port to expose on each node
4573 ## Only used if service.type is 'NodePort'
4576 ## List of IP addresses at which the Prometheus server service is available
4577 ## Ref: https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
4580 ## Additional ports to open for ThanosRuler service
4584 loadBalancerSourceRanges: []
4585 ## Denotes if this Service desires to route external traffic to node-local or cluster-wide endpoints
4587 externalTrafficPolicy: Cluster
4591 ## Configuration for creating a ServiceMonitor for the ThanosRuler service
4594 ## If true, create a serviceMonitor for thanosRuler
4597 ## Scrape interval. If not set, the Prometheus default scrape interval is used.
4600 ## Additional labels
4602 additionalLabels: {}
4603 ## SampleLimit defines per-scrape limit on number of scraped samples that will be accepted.
4606 ## TargetLimit defines a limit on the number of scraped targets that will be accepted.
4609 ## Per-scrape limit on number of labels that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4612 ## Per-scrape limit on length of labels name that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4614 labelNameLengthLimit: 0
4615 ## Per-scrape limit on length of labels value that will be accepted for a sample. Only valid in Prometheus versions 2.27.0 and newer.
4617 labelValueLengthLimit: 0
4618 ## proxyUrl: URL of a proxy that should be used for scraping.
4621 ## scheme: HTTP scheme to use for scraping. Can be used with `tlsConfig` for example if using istio mTLS.
4623 ## tlsConfig: TLS configuration to use when scraping the endpoint. For example if using istio mTLS.
4624 ## Of type: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#tlsconfig
4627 ## MetricRelabelConfigs to apply to samples after scraping, but before ingestion.
4628 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4630 metricRelabelings: []
4632 # regex: 'kube_(daemonset|deployment|pod|namespace|node|statefulset).+'
4633 # sourceLabels: [__name__]
4635 ## RelabelConfigs to apply to samples before scraping
4636 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#relabelconfig
4639 # - sourceLabels: [__meta_kubernetes_pod_node_name]
4642 # targetLabel: nodename
4646 ## Additional Endpoints
4648 additionalEndpoints: []
4649 # - port: oauth-metrics
4651 ## Settings affecting thanosRulerpec
4652 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerspec
4655 ## Standard object's metadata. More info: https://github.com/kubernetes/community/blob/master/contributors/devel/sig-architecture/api-conventions.md#metadata
4656 ## Metadata Labels and Annotations gets propagated to the ThanosRuler pods.
4661 ## Image of ThanosRuler
4665 repository: chainguard-private/thanos
4667 sha: sha256:91616ecf31235c2f626295c55d11389fcdcfb2b1f817099a9d7460e7765bd183
4668 ## Namespaces to be selected for PrometheusRules discovery.
4669 ## If nil, select own namespace. Namespaces to be selected for ServiceMonitor discovery.
4670 ## See https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#namespaceselector for usage
4672 ruleNamespaceSelector: {}
4673 ## If true, a nil or {} value for thanosRuler.thanosRulerSpec.ruleSelector will cause the
4674 ## prometheus resource to be created with selectors based on values in the helm deployment,
4675 ## which will also match the PrometheusRule resources created
4677 ruleSelectorNilUsesHelmValues: true
4678 ## PrometheusRules to be selected for target discovery.
4679 ## If {}, select all PrometheusRules
4682 ## Example which select all PrometheusRules resources
4683 ## with label "prometheus" with values any of "example-rules" or "example-rules-2"
4692 ## Example which select all PrometheusRules resources with label "role" set to "example-rules"
4695 # role: example-rules
4697 ## Define Log Format
4698 # Use logfmt (default) or json logging
4700 ## Log level for ThanosRuler to be configured with.
4703 ## Size is the expected size of the thanosRuler cluster. The controller will eventually make the size of the
4704 ## running cluster equal to the expected size.
4706 ## Time duration ThanosRuler shall retain data for. Default is '24h', and must match the regular expression
4707 ## [0-9]+(ms|s|m|h) (milliseconds seconds minutes hours).
4710 ## Interval between consecutive evaluations.
4712 evaluationInterval: ""
4713 ## Storage is the definition of how storage will be used by the ThanosRuler instances.
4714 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/platform/storage.md
4717 # volumeClaimTemplate:
4719 # storageClassName: gluster
4720 # accessModes: ["ReadWriteOnce"]
4726 ## AlertmanagerConfig define configuration for connecting to alertmanager.
4727 ## Only available with Thanos v0.10.0 and higher. Maps to the alertmanagers.config Thanos Ruler arg.
4728 alertmanagersConfig:
4729 # use existing secret, if configured, alertmanagersConfig.secret will not be used
4733 # will render alertmanagersConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when alertmanagersConfig.existingSecret is set
4734 # https://thanos.io/tip/components/rule.md/#alertmanager
4740 # username: some_user
4741 # password: some_pass
4743 # - alertmanager.thanos.io
4746 ## DEPRECATED. Define URLs to send alerts to Alertmanager. For Thanos v0.10.0 and higher, alertmanagersConfig should be used instead.
4747 ## Note: this field will be ignored if alertmanagersConfig is specified. Maps to the alertmanagers.url Thanos Ruler arg.
4750 ## The external URL the Thanos Ruler instances will be available under. This is necessary to generate correct URLs. This is necessary if Thanos Ruler is not served from root of a DNS name. string false
4753 ## If true, http://{{ template "kube-prometheus-stack.thanosRuler.name" . }}.{{ template "kube-prometheus-stack.namespace" . }}:{{ .Values.thanosRuler.service.port }}
4754 ## will be used as value for externalPrefix
4755 externalPrefixNilUsesHelmValues: true
4756 ## The route prefix ThanosRuler registers HTTP handlers for. This is useful, if using ExternalURL and a proxy is rewriting HTTP routes of a request, and the actual ExternalURL is still true,
4757 ## but the server serves requests under a different route prefix. For example for use with kubectl proxy.
4760 ## ObjectStorageConfig configures object storage in Thanos
4761 objectStorageConfig:
4762 # use existing secret, if configured, objectStorageConfig.secret will not be used
4766 # will render objectStorageConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when objectStorageConfig.existingSecret is set
4767 # https://thanos.io/tip/thanos/storage.md/#s3
4776 ## Labels by name to drop before sending to alertmanager
4777 ## Maps to the --alert.label-drop flag of thanos ruler.
4779 ## QueryEndpoints defines Thanos querier endpoints from which to query metrics.
4780 ## Maps to the --query flag of thanos ruler.
4782 ## Define configuration for connecting to thanos query instances. If this is defined, the queryEndpoints field will be ignored.
4783 ## Maps to the query.config CLI argument. Only available with thanos v0.11.0 and higher.
4785 # use existing secret, if configured, queryConfig.secret will not be used
4789 # render queryConfig secret data and configure it to be used by Thanos Ruler custom resource, ignored when queryConfig.existingSecret is set
4790 # https://thanos.io/tip/components/rule.md/#query-api
4794 # username: some_user
4795 # password: some_pass
4800 ## Labels configure the external label pairs to ThanosRuler. A default replica
4801 ## label `thanos_ruler_replica` will be always added as a label with the value
4802 ## of the pod's name and it will be dropped in the alerts.
4804 ## If set to true all actions on the underlying managed objects are not going to be performed, except for delete actions.
4807 ## Allows setting additional arguments for the ThanosRuler container
4808 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosruler
4811 # - name: remote-write.config
4814 # - "name": "receiver-0"
4815 # "remote_timeout": "30s"
4816 # "url": "http://thanos-receiver-0.thanos-receiver:8081/api/v1/receive"
4818 ## Define which Nodes the Pods are scheduled on.
4819 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/#nodeselector
4822 ## Define resources requests and limits for single Pods.
4823 ## ref: https://kubernetes.io/docs/concepts/configuration/manage-resources-containers/
4829 ## Pod anti-affinity can prevent the scheduler from placing Prometheus replicas on the same node.
4830 ## The default value "soft" means that the scheduler should *prefer* to not schedule two replica pods onto the same node but no guarantee is provided.
4831 ## The value "hard" means that the scheduler is *required* to not schedule two replica pods onto the same node.
4832 ## The value "" will disable pod anti-affinity so that no anti-affinity rules will be configured.
4834 podAntiAffinity: "soft"
4835 ## If anti-affinity is enabled sets the topologyKey to use for anti-affinity.
4836 ## This can be changed to, for example, failure-domain.beta.kubernetes.io/zone
4838 podAntiAffinityTopologyKey: kubernetes.io/hostname
4839 ## Assign custom affinity rules to the thanosRuler instance
4840 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/assign-pod-node/
4844 # requiredDuringSchedulingIgnoredDuringExecution:
4845 # nodeSelectorTerms:
4846 # - matchExpressions:
4847 # - key: kubernetes.io/e2e-az-name
4853 ## If specified, the pod's tolerations.
4854 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/taint-and-toleration/
4860 # effect: "NoSchedule"
4862 ## If specified, the pod's topology spread constraints.
4863 ## ref: https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/
4865 topologySpreadConstraints: []
4867 # topologyKey: topology.kubernetes.io/zone
4868 # whenUnsatisfiable: DoNotSchedule
4873 ## SecurityContext holds pod-level security attributes and common container settings.
4874 ## This defaults to non root user with uid 1000 and gid 2000. *v1.PodSecurityContext false
4875 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/
4883 type: RuntimeDefault
4884 ## Use the host's user namespace for ThanosRuler pods.
4885 ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/
4887 ## ListenLocal makes the ThanosRuler server listen on loopback, so that it does not bind against the Pod IP.
4888 ## Note this is only for the ThanosRuler UI, not the gossip communication.
4891 ## Containers allows injecting additional containers. This is meant to allow adding an authentication proxy to an ThanosRuler pod.
4894 # Additional volumes on the output StatefulSet definition.
4896 # Additional VolumeMounts on the output StatefulSet definition.
4898 ## InitContainers allows injecting additional initContainers. This is meant to allow doing some changes
4899 ## (permissions, dir tree) on mounted volumes before starting prometheus
4901 ## Priority class assigned to the Pods
4903 priorityClassName: ""
4904 ## PortName to use for ThanosRuler.
4907 ## Duration in seconds the pod needs to terminate gracefully.
4908 ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#pod-termination
4909 terminationGracePeriodSeconds: ~
4910 ## WebTLSConfig defines the TLS parameters for HTTPS
4911 ## ref: https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api-reference/api.md#thanosrulerwebspec
4913 ## Pod management policy. Kubernetes default is OrderedReady but prometheus-operator default is Parallel.
4914 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#pod-management-policies
4915 podManagementPolicy: ""
4916 ## Update strategy for the StatefulSet.
4917 ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies
4919 # type: RollingUpdate
4923 ## Additional configuration which is not covered by the properties above. (passed through tpl)
4924 additionalConfig: {}
4925 ## Additional configuration which is not covered by the properties above.
4926 ## Useful, if you need advanced templating
4927 additionalConfigString: ""
4928 ## ExtraSecret can be used to store various data in an extra secret
4929 ## (use it for example to store hashed basic auth credentials)
4931 ## if not set, name will be auto generated
4936 # foo:$apr1$OFG3Xybp$ckL0FHDAkoXYIlH9.cysT0
4937 # someoneelse:$apr1$DMZX2Z4q$6SbQIfyuLQd.xmo/P0m2c.
4938## Setting to true produces cleaner resource names, but requires a data migration because the name of the persistent volume changes. Therefore this should only be set once on initial installation.
4940cleanPrometheusOperatorObjectNames: false
4941## Extra manifests to deploy. Can be of type dict or list.
4942## If dict, keys are ignored and only values are used.
4943## Items contained within extraObjects can be defined as dict or string and are passed through tpl.
4949# name: prometheus-extra
4951# extra-data: "value"
4953# can also be defined as a string, useful for templating field names
4961# {{- range $key, $value := .Values.commonLabels }}
4962# {{ $key }}: {{ $value }}
4965# plaintext: Zm9vYmFy
4966# templated: '{{ print "foobar" | upper | b64enc }}'