DirectorySecurity AdvisoriesPricing
Sign in
Directory
opentelemetry-collector logoHELM

opentelemetry-collector

Helm chart
Last changed
Request a free trial

Contact our team to test out this Helm chart and related images for free. Please also indicate any other images you would like to evaluate.

Overview
Chart versions
Default values
Chart metadata
Images

Tag:

1
# Default values for opentelemetry-collector.
2
# This is a YAML-formatted file.
3
# Declare variables to be passed into your templates.
4
5
nameOverride: ""
6
fullnameOverride: ""
7
# Valid values are "daemonset", "deployment", and "statefulset".
8
mode: ""
9
# Override the default apiVersion for custom controllers or for testing new API versions.
10
apiVersion: "apps/v1"
11
# Specify which namespace should be used to deploy the resources into
12
namespaceOverride: ""
13
# Handles basic configuration of components that
14
# also require k8s modifications to work correctly.
15
# .Values.config can be used to modify/add to a preset
16
# component configuration, but CANNOT be used to remove
17
# preset configuration. If you require removal of any
18
# sections of a preset configuration, you cannot use
19
# the preset. Instead, configure the component manually in
20
# .Values.config and use the other fields supplied in the
21
# values.yaml to configure k8s as necessary.
22
presets:
23
# Configures the collector to collect logs.
24
# Adds the filelog receiver to the logs pipeline
25
# and adds the necessary volumes and volume mounts.
26
# Best used with mode = daemonset.
27
# See https://opentelemetry.io/docs/kubernetes/collector/components/#filelog-receiver for details on the receiver.
28
logsCollection:
29
enabled: false
30
includeCollectorLogs: false
31
# Enabling this writes checkpoints in /var/lib/otelcol/ host directory.
32
# Note this changes collector's user to root, so that it can write to host directory.
33
storeCheckpoints: false
34
# The maximum bytes size of the recombined field.
35
# Once the size exceeds the limit, all received entries of the source will be combined and flushed.
36
maxRecombineLogSize: 102400
37
# Configures the collector to collect host metrics.
38
# Adds the hostmetrics receiver to the metrics pipeline
39
# and adds the necessary volumes and volume mounts.
40
# Best used with mode = daemonset.
41
# See https://opentelemetry.io/docs/kubernetes/collector/components/#host-metrics-receiver for details on the receiver.
42
hostMetrics:
43
enabled: false
44
# Configures the Kubernetes Processor to add Kubernetes metadata.
45
# Adds the k8sattributes processor to all the pipelines
46
# and adds a preset of minimum required RBAC rules to ClusterRole.
47
# Best used with mode = daemonset.
48
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-attributes-processor for details on the receiver.
49
kubernetesAttributes:
50
enabled: false
51
# When enabled the processor will extract all labels for an associated pod and add them as resource attributes.
52
# The label's exact name will be the key.
53
extractAllPodLabels: false
54
# When enabled the processor will extract all annotations for an associated pod and add them as resource attributes.
55
# The annotation's exact name will be the key.
56
extractAllPodAnnotations: false
57
# Configures the collector to collect node, pod, and container metrics from the API server on a kubelet.
58
# Adds the kubeletstats receiver to the metrics pipeline
59
# and adds the necessary rules to ClusterRole.
60
# Best used with mode = daemonset.
61
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubeletstats-receiver for details on the receiver.
62
kubeletMetrics:
63
enabled: false
64
# Configures the collector to collect kubernetes events.
65
# Adds the k8sobjects receiver to the logs pipeline
66
# and collects kubernetes events by default.
67
# Best used with mode = deployment or statefulset.
68
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-objects-receiver for details on the receiver.
69
kubernetesEvents:
70
enabled: false
71
# Configures the Kubernetes Cluster Receiver to collect cluster-level metrics.
72
# Adds the k8s_cluster receiver to the metrics pipeline
73
# and adds the necessary rules to ClusterRole.
74
# Can be used with mode = deployment, statefulset, or daemonset.
75
# When used as a daemonset, a leader election is setup to prevent duplication
76
# See https://opentelemetry.io/docs/kubernetes/collector/components/#kubernetes-cluster-receiver for details on the receiver.
77
clusterMetrics:
78
enabled: false
79
# When enabled with mode = daemonset, leader election is setup to prevent telemetry duplication.
80
# disableLeaderElection: false
81
# Configures the collector to collect logs and metrics from pods with specific annotations.
82
# This preset can not be used together with the `logsCollection` preset.
83
# Adds the receiver_creator receiver to the logs and metrics pipelines
84
# and adds the necessary rules to ClusterRole.
85
# Best used with mode = daemonset.
86
# See https://github.com/open-telemetry/opentelemetry-collector-contrib/blob/main/receiver/receivercreator/README.md#generate-receiver-configurations-from-provided-hints for details on the receiver.
87
annotationDiscovery:
88
logs:
89
enabled: false
90
metrics:
91
enabled: false
92
# Configures the collector to collect profiling data.
93
# Adds profiles pipeline with the profiling receiver,
94
# and adds the necessary volumes, security context and host PID access.
95
#
96
# Warning: The profiling receiver requires privileged access and hostPID,
97
# so it should be used with a dedicated collector distribution (e.g. otelcol-ebpf-profiler)
98
# rather than the general-purpose k8s distribution. This avoids granting elevated privileges
99
# to the same collector that handles metrics, traces, and logs.
100
# See https://github.com/open-telemetry/opentelemetry-collector-releases/tree/main/distributions/otelcol-ebpf-profiler for more details.
101
profiling:
102
enabled: false
103
configMap:
104
# Specifies whether a configMap should be created (true by default)
105
create: true
106
# Specifies an existing ConfigMap to be mounted to the pod
107
# The ConfigMap MUST include the collector configuration via a key named 'relay' or the collector will not start.
108
# This also supports template content, which will eventually be converted to yaml.
109
existingName: ""
110
# Specifies the relative path to custom ConfigMap template file. This option SHOULD be used when bundling a custom
111
# ConfigMap template, as it enables pod restart via a template checksum annotation.
112
# existingPath: ""
113
# When enabled, the chart will configure the collector to emit its traces, metrics, and logs over http via the OTLP using the Otel Go SDK.
114
# If internalTelemetryViaOTLP.metrics.enabled the chart will remove the default prometheus receiver (which was configured to scrape the Collector's metrics)
115
# and the service.telemetry.metrics.address value.
116
# Learn more about the Collector telemetry at https://opentelemetry.io/docs/collector/internal-telemetry/.
117
#
118
# THIS OPTION IS EXPERIMENTAL AND SUBJECT TO BREAKING CHANGES
119
internalTelemetryViaOTLP:
120
# The endpoint where the telemetry will be exported
121
endpoint: ""
122
# Optional headers to configure the exporters
123
headers: []
124
# - name: "x-dest-auth"
125
# value: "some auth key"
126
traces:
127
enabled: false
128
# overrides internalTelemetryViaOTLP.endpoint for traces
129
endpoint: ""
130
# overrides internalTelemetryViaOTLP.headers for traces
131
headers: []
132
metrics:
133
enabled: false
134
# overrides internalTelemetryViaOTLP.endpoint for metrics
135
endpoint: ""
136
# overrides internalTelemetryViaOTLP.headers for metrics
137
headers: []
138
logs:
139
enabled: false
140
# overrides internalTelemetryViaOTLP.endpoint for logs
141
endpoint: ""
142
# overrides internalTelemetryViaOTLP.headers for logs
143
headers: []
144
# Base collector configuration.
145
# Supports templating. To escape existing instances of {{ }}, use {{` <original content> `}}.
146
# For example, {{ REDACTED_EMAIL }} becomes {{` {{ REDACTED_EMAIL }} `}}.
147
config:
148
exporters:
149
debug: {}
150
extensions:
151
# The health_check extension is mandatory for this chart.
152
# Without the health_check extension the collector will fail the readiness and liveness probes.
153
# The health_check extension can be modified, but should never be removed.
154
health_check:
155
endpoint: ${env:MY_POD_IP}:13133
156
processors:
157
batch: {}
158
# Default memory limiter configuration for the collector based on k8s resource limits.
159
memory_limiter:
160
# check_interval is the time between measurements of memory usage.
161
check_interval: 5s
162
# By default limit_mib is set to 80% of ".Values.resources.limits.memory"
163
limit_percentage: 80
164
# By default spike_limit_mib is set to 25% of ".Values.resources.limits.memory"
165
spike_limit_percentage: 25
166
receivers:
167
jaeger:
168
protocols:
169
grpc:
170
endpoint: ${env:MY_POD_IP}:14250
171
thrift_http:
172
endpoint: ${env:MY_POD_IP}:14268
173
thrift_compact:
174
endpoint: ${env:MY_POD_IP}:6831
175
otlp:
176
protocols:
177
grpc:
178
endpoint: ${env:MY_POD_IP}:4317
179
http:
180
endpoint: ${env:MY_POD_IP}:4318
181
# if internalTelemetryViaOTLP.metrics.enabled = true, prometheus receiver will be removed
182
prometheus:
183
config:
184
scrape_configs:
185
- job_name: opentelemetry-collector
186
scrape_interval: 10s
187
static_configs:
188
- targets:
189
- ${env:MY_POD_IP}:8888
190
zipkin:
191
endpoint: ${env:MY_POD_IP}:9411
192
service:
193
telemetry:
194
resource:
195
k8s.namespace.name: "${env:OTEL_K8S_NAMESPACE}"
196
k8s.node.name: "${env:OTEL_K8S_NODE_NAME}"
197
k8s.node.ip: "${env:OTEL_K8S_NODE_IP}"
198
k8s.pod.name: "${env:OTEL_K8S_POD_NAME}"
199
k8s.pod.ip: "${env:OTEL_K8S_POD_IP}"
200
host.name: "${env:OTEL_K8S_NODE_NAME}"
201
metrics:
202
readers:
203
- pull:
204
exporter:
205
prometheus:
206
host: ${env:MY_POD_IP}
207
port: 8888
208
extensions:
209
- health_check
210
pipelines:
211
logs:
212
exporters:
213
- debug
214
processors:
215
- memory_limiter
216
- batch
217
receivers:
218
- otlp
219
metrics:
220
exporters:
221
- debug
222
processors:
223
- memory_limiter
224
- batch
225
receivers:
226
- otlp
227
# if internalTelemetryViaOTLP.metrics.enabled = true, prometheus receiver will be removed
228
- prometheus
229
traces:
230
exporters:
231
- debug
232
processors:
233
- memory_limiter
234
- batch
235
receivers:
236
- otlp
237
- jaeger
238
- zipkin
239
# Helm currently has an issue (https://github.com/helm/helm/pull/12879) when using null to remove
240
# default configuration from a subchart. The result is that you cannot remove default configuration
241
# from `config`, such as a specific receiver or a specific pipeline, when the chart is used as a
242
# subchart.
243
#
244
# Until the helm bug is fixed, this field is provided as an alternative when using this chart as a subchart.
245
# It is not recommended to use this field when installing the chart directly.
246
#
247
# When not empty, `alternateConfig` will be used to set the collector's configuration. It has NO default
248
# values and IS NOT MERGED with config. Any configuration provided via `config` will be ignored when
249
# `alternateConfig` is set. You MUST provide your own collector configuration.
250
#
251
# Reminder that the healthcheck extension (or something else that provides the same functionality) is required.
252
#
253
# Components configured by presets will be injected in the same way they are for `config`.
254
alternateConfig: {}
255
image:
256
# If you want to use the core image `otel/opentelemetry-collector`, you also need to change `command.name` value to `otelcol`.
257
repository: cgr.dev/chainguard-private/opentelemetry-collector-contrib
258
pullPolicy: IfNotPresent
259
# Overrides the image tag whose default is the chart appVersion.
260
tag: latest
261
# When digest is set to a non-empty value, images will be pulled by digest (regardless of tag value).
262
digest: sha256:d394ea5cc4cbfc8f112cde0209fae0fc06b1421b6d1b4208e0cee95a37983354
263
imagePullSecrets: []
264
# OpenTelemetry Collector executable
265
command:
266
name: ""
267
extraArgs: []
268
serviceAccount:
269
# Specifies whether a service account should be created
270
create: true
271
# Annotations to add to the service account
272
annotations: {}
273
# The name of the service account to use.
274
# If not set and create is true, a name is generated using the fullname template
275
name: ""
276
# Automatically mount a ServiceAccount's API credentials?
277
automountServiceAccountToken: true
278
clusterRole:
279
# Specifies whether a clusterRole should be created
280
# Some presets also trigger the creation of a cluster role and cluster role binding.
281
# If using one of those presets, this field is no-op.
282
create: false
283
# Annotations to add to the clusterRole
284
# Can be used in combination with presets that create a cluster role.
285
annotations: {}
286
# The name of the clusterRole to use.
287
# If not set a name is generated using the fullname template
288
# Can be used in combination with presets that create a cluster role.
289
name: ""
290
# A set of rules as documented here : https://kubernetes.io/docs/reference/access-authn-authz/rbac/
291
# Can be used in combination with presets that create a cluster role to add additional rules.
292
rules: []
293
# - apiGroups:
294
# - ''
295
# resources:
296
# - 'pods'
297
# - 'nodes'
298
# verbs:
299
# - 'get'
300
# - 'list'
301
# - 'watch'
302
303
clusterRoleBinding:
304
# Annotations to add to the clusterRoleBinding
305
# Can be used in combination with presets that create a cluster role binding.
306
annotations: {}
307
# The name of the clusterRoleBinding to use.
308
# If not set a name is generated using the fullname template
309
# Can be used in combination with presets that create a cluster role binding.
310
name: ""
311
podSecurityContext: {}
312
securityContext: {}
313
nodeSelector: {}
314
tolerations: []
315
affinity: {}
316
topologySpreadConstraints: []
317
# Allows for pod scheduler prioritisation
318
priorityClassName: ""
319
# Allows for pod to use a specific runtime class, e.g. gvisor, kata-containers
320
# Also useful for the pod security admissions plugins that rely on runtimeClassName
321
runtimeClassName: ""
322
terminationGracePeriodSeconds: 30
323
extraEnvs: []
324
extraEnvsFrom: []
325
# This also supports template content, which will eventually be converted to yaml.
326
extraVolumes: []
327
# This also supports template content, which will eventually be converted to yaml.
328
extraVolumeMounts: []
329
# This also supports template content, which will eventually be converted to yaml.
330
extraManifests: []
331
# Configuration for ports
332
# nodePort is also allowed
333
ports:
334
otlp:
335
enabled: true
336
containerPort: 4317
337
servicePort: 4317
338
hostPort: 4317
339
protocol: TCP
340
# nodePort: 30317
341
appProtocol: grpc
342
otlp-http:
343
enabled: true
344
containerPort: 4318
345
servicePort: 4318
346
hostPort: 4318
347
protocol: TCP
348
jaeger-compact:
349
enabled: true
350
containerPort: 6831
351
servicePort: 6831
352
hostPort: 6831
353
protocol: UDP
354
jaeger-thrift:
355
enabled: true
356
containerPort: 14268
357
servicePort: 14268
358
hostPort: 14268
359
protocol: TCP
360
jaeger-grpc:
361
enabled: true
362
containerPort: 14250
363
servicePort: 14250
364
hostPort: 14250
365
protocol: TCP
366
zipkin:
367
enabled: true
368
containerPort: 9411
369
servicePort: 9411
370
hostPort: 9411
371
protocol: TCP
372
metrics:
373
# The metrics port is disabled by default. However you need to enable the port
374
# in order to use the ServiceMonitor (serviceMonitor.enabled) or PodMonitor (podMonitor.enabled).
375
enabled: false
376
containerPort: 8888
377
servicePort: 8888
378
protocol: TCP
379
# When enabled, the chart will set the GOMEMLIMIT env var to 80% of the configured resources.limits.memory.
380
# If no resources.limits.memory are defined then enabling does nothing.
381
# It is HIGHLY recommend to enable this setting and set a value for resources.limits.memory.
382
useGOMEMLIMIT: true
383
# Resource limits & requests.
384
# It is HIGHLY recommended to set resource limits.
385
resources: {}
386
# resources:
387
# limits:
388
# cpu: 250m
389
# memory: 512Mi
390
391
enableConfigChecksumAnnotation: true
392
podAnnotations: {}
393
podLabels: {}
394
# Common labels to add to all otel-collector resources. Evaluated as a template.
395
additionalLabels: {}
396
# app.kubernetes.io/part-of: my-app
397
398
# Host networking requested for this pod. Use the host's network namespace.
399
hostNetwork: false
400
# Enable sharing the host's PID namespace with the pod.
401
# WARNING: This grants visibility into all host processes and should only be enabled when required.
402
hostPID: false
403
# Adding entries to Pod /etc/hosts with HostAliases
404
# https://kubernetes.io/docs/tasks/network/customize-hosts-file-for-pods/
405
hostAliases: []
406
# - ip: "1.2.3.4"
407
# hostnames:
408
# - "my.host.com"
409
410
# Pod DNS policy ClusterFirst, ClusterFirstWithHostNet, None, Default, None
411
dnsPolicy: ""
412
# Custom DNS config. Required when DNS policy is None.
413
dnsConfig: {}
414
# Custom kube scheduler name.
415
schedulerName: ""
416
# only used with deployment mode
417
replicaCount: 1
418
revisionHistoryLimit: 10
419
annotations: {}
420
# List of extra sidecars to add.
421
# This also supports template content, which will eventually be converted to yaml.
422
extraContainers: []
423
# extraContainers:
424
# - name: test
425
# command:
426
# - cp
427
# args:
428
# - /bin/sleep
429
# - /test/sleep
430
# image: busybox:latest
431
# volumeMounts:
432
# - name: test
433
# mountPath: /test
434
435
# List of init container specs, e.g. for copying a binary to be executed as a lifecycle hook.
436
# This also supports template content, which will eventually be converted to yaml.
437
# Another usage of init containers is e.g. initializing filesystem permissions to the OTLP Collector user `10001` in case you are using persistence and the volume is producing a permission denied error for the OTLP Collector container.
438
initContainers: []
439
# initContainers:
440
# - name: test
441
# image: busybox:latest
442
# command:
443
# - cp
444
# args:
445
# - /bin/sleep
446
# - /test/sleep
447
# volumeMounts:
448
# - name: test
449
# mountPath: /test
450
# - name: init-fs
451
# image: busybox:latest
452
# command:
453
# - sh
454
# - '-c'
455
# - 'chown -R 10001: /var/lib/storage/otc' # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
456
# volumeMounts:
457
# - name: opentelemetry-collector-data # use the name of the volume used for persistence
458
# mountPath: /var/lib/storage/otc # use the path given as per `extensions.file_storage.directory` & `extraVolumeMounts[x].mountPath`
459
460
# Pod lifecycle policies.
461
lifecycleHooks: {}
462
# lifecycleHooks:
463
# preStop:
464
# exec:
465
# command:
466
# - /test/sleep
467
# - "5"
468
469
# liveness probe configuration
470
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
471
##
472
livenessProbe:
473
# Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
474
# initialDelaySeconds: 1
475
# How often in seconds to perform the probe.
476
# periodSeconds: 10
477
# Number of seconds after which the probe times out.
478
# timeoutSeconds: 1
479
# Minimum consecutive failures for the probe to be considered failed after having succeeded.
480
# failureThreshold: 1
481
# Duration in seconds the pod needs to terminate gracefully upon probe failure.
482
# terminationGracePeriodSeconds: 10
483
httpGet:
484
port: 13133
485
path: /
486
# readiness probe configuration
487
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
488
##
489
readinessProbe:
490
# Number of seconds after the container has started before startup, liveness or readiness probes are initiated.
491
# initialDelaySeconds: 1
492
# How often (in seconds) to perform the probe.
493
# periodSeconds: 10
494
# Number of seconds after which the probe times out.
495
# timeoutSeconds: 1
496
# Minimum consecutive successes for the probe to be considered successful after having failed.
497
# successThreshold: 1
498
# Minimum consecutive failures for the probe to be considered failed after having succeeded.
499
# failureThreshold: 1
500
httpGet:
501
port: 13133
502
path: /
503
# startup probe configuration
504
# Ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-startup-probes/
505
##
506
startupProbe: {}
507
# Number of seconds after the container has started before startup probes are initiated.
508
# initialDelaySeconds: 1
509
# How often in seconds to perform the probe.
510
# periodSeconds: 10
511
# Number of seconds after which the probe times out.
512
# timeoutSeconds: 1
513
# Minimum consecutive failures for the probe to be considered failed after having succeeded.
514
# failureThreshold: 1
515
# Duration in seconds the pod needs to terminate gracefully upon probe failure.
516
# terminationGracePeriodSeconds: 10
517
# httpGet:
518
# port: 13133
519
# path: /
520
521
service:
522
# Enable the creation of a Service.
523
# By default, it's enabled on mode != daemonset.
524
# However, to enable it on mode = daemonset, its creation must be explicitly enabled
525
# enabled: true
526
type: ClusterIP
527
# Supported values: PreferClose (deprecated in K8s 1.33+), PreferSameZone, PreferSameNode
528
# trafficDistribution: PreferClose
529
# type: LoadBalancer
530
# loadBalancerIP: 1.2.3.4
531
# loadBalancerSourceRanges: []
532
533
# By default, Service of type 'LoadBalancer' will be created setting 'externalTrafficPolicy: Cluster'
534
# unless other value is explicitly set.
535
# Possible values are Cluster or Local (https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip)
536
# externalTrafficPolicy: Cluster
537
annotations: {}
538
# By default, Service will be created setting 'internalTrafficPolicy: Local' on mode = daemonset
539
# unless other value is explicitly set.
540
# Setting 'internalTrafficPolicy: Cluster' on a daemonset is not recommended
541
# internalTrafficPolicy: Cluster
542
ingress:
543
enabled: false
544
# annotations: {}
545
# ingressClassName: nginx
546
# hosts:
547
# - host: collector.example.com
548
# paths:
549
# - path: /
550
# pathType: Prefix
551
# port: 4318
552
# tls:
553
# - secretName: collector-tls
554
# hosts:
555
# - collector.example.com
556
557
# Additional ingresses - only created if ingress.enabled is true
558
# Useful for when differently annotated ingress services are required
559
# Each additional ingress needs key "name" set to something unique
560
additionalIngresses: []
561
# - name: cloudwatch
562
# ingressClassName: nginx
563
# annotations: {}
564
# hosts:
565
# - host: collector.example.com
566
# paths:
567
# - path: /
568
# pathType: Prefix
569
# port: 4318
570
# tls:
571
# - secretName: collector-tls
572
# hosts:
573
# - collector.example.com
574
podMonitor:
575
# The pod monitor by default scrapes the metrics port.
576
# The metrics port needs to be enabled as well.
577
enabled: false
578
metricsEndpoints:
579
- port: metrics
580
# interval: 15s
581
# additional labels for the PodMonitor
582
extraLabels: {}
583
# release: kube-prometheus-stack
584
serviceMonitor:
585
# The service monitor by default scrapes the metrics port.
586
# The metrics port needs to be enabled as well.
587
enabled: false
588
metricsEndpoints:
589
- port: metrics
590
# interval: 15s
591
# additional labels for the ServiceMonitor
592
extraLabels: {}
593
# release: kube-prometheus-stack
594
# Used to set relabeling and metricRelabeling configs on the ServiceMonitor
595
# https://prometheus.io/docs/prometheus/latest/configuration/configuration/#relabel_config
596
relabelings: []
597
metricRelabelings: []
598
# PodDisruptionBudget is used only if mode is "deployment" or "statefulset"
599
podDisruptionBudget:
600
enabled: false
601
# minAvailable: 2
602
# maxUnavailable: 1
603
604
# autoscaling is used only if mode is "deployment" or "statefulset"
605
autoscaling:
606
enabled: false
607
minReplicas: 1
608
maxReplicas: 10
609
behavior: {}
610
targetCPUUtilizationPercentage: 80
611
# targetMemoryUtilizationPercentage: 80
612
# Supply an array of custom metrics to be used for autoscaling. It includes externalMetrics, objectMetrics, and podsMetrics.
613
additionalMetrics: []
614
rollout:
615
rollingUpdate: {}
616
# When 'mode: daemonset', maxSurge cannot be used when hostPort is set for any of the ports
617
# maxSurge: 25%
618
# maxUnavailable: 0
619
strategy: RollingUpdate
620
prometheusRule:
621
enabled: false
622
groups: []
623
# Create default rules for monitoring the collector
624
defaultRules:
625
enabled: false
626
## Additional labels for PrometheusRule alerts
627
additionalRuleLabels: {}
628
## Additional annotations for PrometheusRule alerts
629
additionalRuleAnnotations: {}
630
# additional labels for the PrometheusRule
631
extraLabels: {}
632
statefulset:
633
# volumeClaimTemplates for a statefulset
634
volumeClaimTemplates: []
635
podManagementPolicy: "Parallel"
636
# Controls if and how PVCs created by the StatefulSet are deleted. Available in Kubernetes 1.23+.
637
persistentVolumeClaimRetentionPolicy:
638
enabled: false
639
whenDeleted: Retain
640
whenScaled: Retain
641
networkPolicy:
642
enabled: false
643
# Annotations to add to the NetworkPolicy
644
annotations: {}
645
# Configure the 'from' clause of the NetworkPolicy.
646
# By default this will restrict traffic to ports enabled for the Collector. If
647
# you wish to further restrict traffic to other hosts or specific namespaces,
648
# see the standard NetworkPolicy 'spec.ingress.from' definition for more info:
649
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
650
allowIngressFrom: []
651
# # Allow traffic from any pod in any namespace, but not external hosts
652
# - namespaceSelector: {}
653
# # Allow external access from a specific cidr block
654
# - ipBlock:
655
# cidr: 192.168.1.64/32
656
# # Allow access from pods in specific namespaces
657
# - namespaceSelector:
658
# matchExpressions:
659
# - key: kubernetes.io/metadata.name
660
# operator: In
661
# values:
662
# - "cats"
663
# - "dogs"
664
665
# Add additional ingress rules to specific ports
666
# Useful to allow external hosts/services to access specific ports
667
# An example is allowing an external prometheus server to scrape metrics
668
#
669
# See the standard NetworkPolicy 'spec.ingress' definition for more info:
670
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
671
extraIngressRules: []
672
# - ports:
673
# - port: metrics
674
# protocol: TCP
675
# from:
676
# - ipBlock:
677
# cidr: 192.168.1.64/32
678
679
# Restrict egress traffic from the OpenTelemetry collector pod
680
# See the standard NetworkPolicy 'spec.egress' definition for more info:
681
# https://kubernetes.io/docs/reference/kubernetes-api/policy-resources/network-policy-v1/
682
egressRules: []
683
# - to:
684
# - namespaceSelector: {}
685
# - ipBlock:
686
# cidr: 192.168.10.10/24
687
# ports:
688
# - port: 1234
689
# protocol: TCP
690
# Allow containers to share processes across pod namespace
691
shareProcessNamespace: false
692

The trusted source for open source

Talk to an expert
PrivacyTerms

Product

Chainguard ContainersChainguard LibrariesChainguard VMsChainguard OS PackagesChainguard ActionsChainguard Agent SkillsIntegrationsPricing
© 2026 Chainguard, Inc. All Rights Reserved.
Chainguard® and the Chainguard logo are registered trademarks of Chainguard, Inc. in the United States and/or other countries.
The other respective trademarks mentioned on this page are owned by the respective companies and use of them does not imply any affiliation or endorsement.