1# Default values for opentelemetry-collector.
2# This is a YAML-formatted file.
3# Declare variables to be passed into your templates.
7# Valid values are "daemonset", "deployment", and "statefulset".
9# Override the default apiVersion for custom controllers or for testing new API versions.
11# Specify which namespace should be used to deploy the resources into
13# Handles basic configuration of components that
14# also require k8s modifications to work correctly.
15# .Values.config can be used to modify/add to a preset
16# component configuration, but CANNOT be used to remove
17# preset configuration. If you require removal of any
18# sections of a preset configuration, you cannot use
19# the preset. Instead, configure the component manually in
20# .Values.config and use the other fields supplied in the
21# values.yaml to configure k8s as necessary.
23 # Configures the collector to collect logs.
24 # Adds the filelog receiver to the logs pipeline
25 # and adds the necessary volumes and volume mounts.
26 # Best used with mode = daemonset.
27 # See https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver for details on the receiver.
30 includeCollectorLogs: false
31 # Enabling this writes checkpoints in /var/lib/otelcol/ host directory.
32 # Note this changes collector's user to root, so that it can write to host directory.
33 storeCheckpoints: false
34 # The maximum bytes size of the recombined field.
35 # Once the size exceeds the limit, all received entries of the source will be combined and flushed.
36 maxRecombineLogSize: 102400
37 # Configures the collector to collect host metrics.
38 # Adds the hostmetrics receiver to the metrics pipeline
39 # and adds the necessary volumes and volume mounts.
40 # Best used with mode = daemonset.
41 # See https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver for details on the receiver.
44 # Configures the Kubernetes Processor to add Kubernetes metadata.
45 # Adds the k8sattributes processor to all the pipelines
46 # and adds a preset of minimum required RBAC rules to ClusterRole.
47 # Best used with mode = daemonset.
48 # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor for details on the receiver.
51 # When enabled the processor will extract all labels for an associated pod and add them as resource attributes.
52 # The label's exact name will be the key.
53 extractAllPodLabels: false
54 # When enabled the processor will extract all annotations for an associated pod and add them as resource attributes.
55 # The annotation's exact name will be the key.
56 extractAllPodAnnotations: false
57 # Configures the collector to collect node, pod, and container metrics from the API server on a kubelet.
58 # Adds the kubeletstats receiver to the metrics pipeline
59 # and adds the necessary rules to ClusterRole.
60 # Best used with mode = daemonset.
61 # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver for details on the receiver.
64 # Configures the collector to collect kubernetes events.
65 # Adds the k8sobjects receiver to the logs pipeline
66 # and collects kubernetes events by default.
67 # Best used with mode = deployment or statefulset.
68 # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver for details on the receiver.
71 # Configures the Kubernetes Cluster Receiver to collect cluster-level metrics.
72 # Adds the k8s_cluster receiver to the metrics pipeline
73 # and adds the necessary rules to ClusterRole.
74 # Can be used with mode = deployment, statefulset, or daemonset.
75 # When used as a daemonset, a leader election is setup to prevent duplication
76 # See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver for details on the receiver.
79 # When enabled with mode = daemonset, leader election is setup to prevent telemetry duplication.
80 # disableLeaderElection: false
81 # Configures the collector to collect logs and metrics from pods with specific annotations.
82 # This preset can not be used together with the `logsCollection` preset.
83 # Adds the receiver_creator receiver to the logs and metrics pipelines
84 # and adds the necessary rules to ClusterRole.
85 # Best used with mode = daemonset.
86 # See https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/receivercreator/README.md#generate-receiver-configurations-from-provided-hints for details on the receiver.
92 # Configures the collector to collect profiling data.
93 # Adds profiles pipeline with the profiling receiver,
94 # and adds the necessary volumes, security context and host PID access.
96 # Warning: The profiling receiver requires privileged access and hostPID,
97 # so it should be used with a dedicated collector distribution (e.g. otelcol-ebpf-profiler)
98 # rather than the general-purpose k8s distribution. This avoids granting elevated privileges
99 # to the same collector that handles metrics, traces, and logs.
100 # See https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-ebpf-profiler for more details.
104 # Specifies whether a configMap should be created (true by default)
106 # Specifies an existing ConfigMap to be mounted to the pod
107 # The ConfigMap MUST include the collector configuration via a key named 'relay' or the collector will not start.
108 # This also supports template content, which will eventually be converted to yaml.
110 # Specifies the relative path to custom ConfigMap template file. This option SHOULD be used when bundling a custom
111 # ConfigMap template, as it enables pod restart via a template checksum annotation.
113# When enabled, the chart will configure the collector to emit its traces, metrics, and logs over http via the OTLP using the Otel Go SDK.
114# If internalTelemetryViaOTLP.metrics.enabled the chart will remove the default prometheus receiver (which was configured to scrape the Collector's metrics)
115# and the service.telemetry.metrics.address value.
116# Learn more about the Collector telemetry at https://opentelemetry.io/docs/collector/internal-telemetry/.
118# THIS OPTION IS EXPERIMENTAL AND SUBJECT TO BREAKING CHANGES
119internalTelemetryViaOTLP:
120 # The endpoint where the telemetry will be exported
122 # Optional headers to configure the exporters
124 # - name: "x-dest-auth"
125 # value: "some auth key"
128 # overrides internalTelemetryViaOTLP.endpoint for traces
130 # overrides internalTelemetryViaOTLP.headers for traces
134 # overrides internalTelemetryViaOTLP.endpoint for metrics
136 # overrides internalTelemetryViaOTLP.headers for metrics
140 # overrides internalTelemetryViaOTLP.endpoint for logs
142 # overrides internalTelemetryViaOTLP.headers for logs
144# Base collector configuration.
145# Supports templating. To escape existing instances of {{ }}, use {{` <original content> `}}.
146# For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}.
151 # The health_check extension is mandatory for this chart.
152 # Without the health_check extension the collector will fail the readiness and liveness probes.
153 # The health_check extension can be modified, but should never be removed.
155 endpoint: ${env:MY_POD_IP}:13133
158 # Default memory limiter configuration for the collector based on k8s resource limits.
160 # check_interval is the time between measurements of memory usage.
162 # By default limit_mib is set to 80% of ".Values.resources.limits.memory"
164 # By default spike_limit_mib is set to 25% of ".Values.resources.limits.memory"
165 spike_limit_percentage: 25
170 endpoint: ${env:MY_POD_IP}:14250
172 endpoint: ${env:MY_POD_IP}:14268
174 endpoint: ${env:MY_POD_IP}:6831
178 endpoint: ${env:MY_POD_IP}:4317
180 endpoint: ${env:MY_POD_IP}:4318
181 # if internalTelemetryViaOTLP.metrics.enabled = true, prometheus receiver will be removed
185 - job_name: opentelemetry-collector
189 - ${env:MY_POD_IP}:8888
191 endpoint: ${env:MY_POD_IP}:9411
195 k8s.namespace.name: "${env:OTEL_K8S_NAMESPACE}"
196 k8s.node.name: "${env:OTEL_K8S_NODE_NAME}"
197 k8s.node.ip: "${env:OTEL_K8S_NODE_IP}"
198 k8s.pod.name: "${env:OTEL_K8S_POD_NAME}"
199 k8s.pod.ip: "${env:OTEL_K8S_POD_IP}"
200 host.name: "${env:OTEL_K8S_NODE_NAME}"
206 host: ${env:MY_POD_IP}
227 # if internalTelemetryViaOTLP.metrics.enabled = true, prometheus receiver will be removed
239# Helm currently has an issue (https://github.com/helm/helm/pull/12879) when using null to remove
240# default configuration from a subchart. The result is that you cannot remove default configuration
241# from `config`, such as a specific receiver or a specific pipeline, when the chart is used as a
244# Until the helm bug is fixed, this field is provided as an alternative when using this chart as a subchart.
245# It is not recommended to use this field when installing the chart directly.
247# When not empty, `alternateConfig` will be used to set the collector's configuration. It has NO default
248# values and IS NOT MERGED with config. Any configuration provided via `config` will be ignored when
249# `alternateConfig` is set. You MUST provide your own collector configuration.
251# Reminder that the healthcheck extension (or something else that provides the same functionality) is required.
253# Components configured by presets will be injected in the same way they are for `config`.
256 # If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`.
257 repository: cgr.dev/chainguard-private/opentelemetry-collector-contrib
258 pullPolicy: IfNotPresent
259 # Overrides the image tag whose default is the chart appVersion.
261 # When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value).
262 digest: sha256:d394ea5cc4cbfc8f112cde0209fae0fc06b1421b6d1b4208e0cee95a37983354
264# OpenTelemetry Collector executable
269 # Specifies whether a service account should be created
271 # Annotations to add to the service account
273 # The name of the service account to use.
274 # If not set and create is true, a name is generated using the fullname template
276 # Automatically mount a ServiceAccount's API credentials?
277 automountServiceAccountToken: true
279 # Specifies whether a clusterRole should be created
280 # Some presets also trigger the creation of a cluster role and cluster role binding.
281 # If using one of those presets, this field is no-op.
283 # Annotations to add to the clusterRole
284 # Can be used in combination with presets that create a cluster role.
286 # The name of the clusterRole to use.
287 # If not set a name is generated using the fullname template
288 # Can be used in combination with presets that create a cluster role.
290 # A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/
291 # Can be used in combination with presets that create a cluster role to add additional rules.
304 # Annotations to add to the clusterRoleBinding
305 # Can be used in combination with presets that create a cluster role binding.
307 # The name of the clusterRoleBinding to use.
308 # If not set a name is generated using the fullname template
309 # Can be used in combination with presets that create a cluster role binding.
311podSecurityContext: {}
316topologySpreadConstraints: []
317# Allows for pod scheduler prioritisation
319# Allows for pod to use a specific runtime class, e.g. gvisor, kata-containers
320# Also useful for the pod security admissions plugins that rely on runtimeClassName
322terminationGracePeriodSeconds: 30
325# This also supports template content, which will eventually be converted to yaml.
327# This also supports template content, which will eventually be converted to yaml.
329# This also supports template content, which will eventually be converted to yaml.
331# Configuration for ports
332# nodePort is also allowed
373 # The metrics port is disabled by default. However you need to enable the port
374 # in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled).
379# When enabled, the chart will set the GOMEMLIMIT env var to 80% of the configured resources.limits.memory.
380# If no resources.limits.memory are defined then enabling does nothing.
381# It is HIGHLY recommend to enable this setting and set a value for resources.limits.memory.
383# Resource limits & requests.
384# It is HIGHLY recommended to set resource limits.
391enableConfigChecksumAnnotation: true
394# Common labels to add to all otel-collector resources. Evaluated as a template.
396# app.kubernetes.io/part-of: my-app
398# Host networking requested for this pod. Use the host's network namespace.
400# Enable sharing the host's PID namespace with the pod.
401# WARNING: This grants visibility into all host processes and should only be enabled when required.
403# Adding entries to Pod /etc/hosts with HostAliases
404# https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
410# Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None
412# Custom DNS config. Required when DNS policy is None.
414# Custom kube scheduler name.
416# only used with deployment mode
418revisionHistoryLimit: 10
420# List of extra sidecars to add.
421# This also supports template content, which will eventually be converted to yaml.
430# image: busybox:latest
435# List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook.
436# This also supports template content, which will eventually be converted to yaml.
437# Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container.
441# image: busybox:latest
451# image: busybox:latest
455# - 'chown -R 10001: /var/lib/storage/otc' # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
457# - name: opentelemetry-collector-data # use the name of the volume used for persistence
458# mountPath: /var/lib/storage/otc # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
460# Pod lifecycle policies.
469# liveness probe configuration
470# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
473 # Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
474 # initialDelaySeconds: 1
475 # How often in seconds to perform the probe.
477 # Number of seconds after which the probe times out.
479 # Minimum consecutive failures for the probe to be considered failed after having succeeded.
480 # failureThreshold: 1
481 # Duration in seconds the pod needs to terminate gracefully upon probe failure.
482 # terminationGracePeriodSeconds: 10
486# readiness probe configuration
487# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
490 # Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
491 # initialDelaySeconds: 1
492 # How often (in seconds) to perform the probe.
494 # Number of seconds after which the probe times out.
496 # Minimum consecutive successes for the probe to be considered successful after having failed.
497 # successThreshold: 1
498 # Minimum consecutive failures for the probe to be considered failed after having succeeded.
499 # failureThreshold: 1
503# startup probe configuration
504# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
507# Number of seconds after the container has started before startup probes are initiated.
508# initialDelaySeconds: 1
509# How often in seconds to perform the probe.
511# Number of seconds after which the probe times out.
513# Minimum consecutive failures for the probe to be considered failed after having succeeded.
515# Duration in seconds the pod needs to terminate gracefully upon probe failure.
516# terminationGracePeriodSeconds: 10
522 # Enable the creation of a Service.
523 # By default, it's enabled on mode != daemonset.
524 # However, to enable it on mode = daemonset, its creation must be explicitly enabled
527 # Supported values: PreferClose (deprecated in K8s 1.33+), PreferSameZone, PreferSameNode
528 # trafficDistribution: PreferClose
530 # loadBalancerIP: 1.2.3.4
531 # loadBalancerSourceRanges: []
533 # By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster'
534 # unless other value is explicitly set.
535 # Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)
536 # externalTrafficPolicy: Cluster
538 # By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset
539 # unless other value is explicitly set.
540 # Setting 'internalTrafficPolicy: Cluster' on a daemonset is not recommended
541 # internalTrafficPolicy: Cluster
545 # ingressClassName: nginx
547 # - host: collector.example.com
553 # - secretName: collector-tls
555 # - collector.example.com
557 # Additional ingresses - only created if ingress.enabled is true
558 # Useful for when differently annotated ingress services are required
559 # Each additional ingress needs key "name" set to something unique
560 additionalIngresses: []
562 # ingressClassName: nginx
565 # - host: collector.example.com
571 # - secretName: collector-tls
573 # - collector.example.com
575 # The pod monitor by default scrapes the metrics port.
576 # The metrics port needs to be enabled as well.
581 # additional labels for the PodMonitor
583 # release: kube-prometheus-stack
585 # The service monitor by default scrapes the metrics port.
586 # The metrics port needs to be enabled as well.
591 # additional labels for the ServiceMonitor
593 # release: kube-prometheus-stack
594 # Used to set relabeling and metricRelabeling configs on the ServiceMonitor
595 # https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
597 metricRelabelings: []
598# PodDisruptionBudget is used only if mode is "deployment" or "statefulset"
604# autoscaling is used only if mode is "deployment" or "statefulset"
610 targetCPUUtilizationPercentage: 80
611 # targetMemoryUtilizationPercentage: 80
612 # Supply an array of custom metrics to be used for autoscaling. It includes externalMetrics, objectMetrics, and podsMetrics.
613 additionalMetrics: []
616 # When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports
619 strategy: RollingUpdate
623 # Create default rules for monitoring the collector
626 ## Additional labels for PrometheusRule alerts
627 additionalRuleLabels: {}
628 ## Additional annotations for PrometheusRule alerts
629 additionalRuleAnnotations: {}
630 # additional labels for the PrometheusRule
633 # volumeClaimTemplates for a statefulset
634 volumeClaimTemplates: []
635 podManagementPolicy: "Parallel"
636 # Controls if and how PVCs created by the StatefulSet are deleted. Available in Kubernetes 1.23+.
637 persistentVolumeClaimRetentionPolicy:
643 # Annotations to add to the NetworkPolicy
645 # Configure the 'from' clause of the NetworkPolicy.
646 # By default this will restrict traffic to ports enabled for the Collector. If
647 # you wish to further restrict traffic to other hosts or specific namespaces,
648 # see the standard NetworkPolicy 'spec.ingress.from' definition for more info:
649 # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
651 # # Allow traffic from any pod in any namespace, but not external hosts
652 # - namespaceSelector: {}
653 # # Allow external access from a specific cidr block
655 # cidr: 192.168.1.64/32
656 # # Allow access from pods in specific namespaces
657 # - namespaceSelector:
659 # - key: kubernetes.io/metadata.name
665 # Add additional ingress rules to specific ports
666 # Useful to allow external hosts/services to access specific ports
667 # An example is allowing an external prometheus server to scrape metrics
669 # See the standard NetworkPolicy 'spec.ingress' definition for more info:
670 # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
671 extraIngressRules: []
677 # cidr: 192.168.1.64/32
679 # Restrict egress traffic from the OpenTelemetry collector pod
680 # See the standard NetworkPolicy 'spec.egress' definition for more info:
681 # https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
684 # - namespaceSelector: {}
686 # cidr: 192.168.10.10/24
690# Allow containers to share processes across pod namespace
691shareProcessNamespace: false