## Default values for Datadog Agent ## See Datadog helm documentation to learn more: ## https://docs.datadoghq.com/agent/kubernetes/helm/ ## FOR AN EFFORTLESS UPGRADE PATH, DO NOT COPY THIS FILE AS YOUR OWN values.yaml. ## ONLY SET THE VALUES YOU WANT TO OVERRIDE IN YOUR values.yaml. # nameOverride -- Override name of app nameOverride: # "" # fullnameOverride -- Override the full qualified app name fullnameOverride: # "" # kubeVersionOverride -- Override Kubernetes version detection. Useful for GitOps tools like FluxCD that don't expose the real cluster version to Helm kubeVersionOverride: # "1.28.0" # targetSystem -- Target OS for this deployment (possible values: linux, windows) targetSystem: "linux" # commonLabels -- Labels to apply to all resources commonLabels: {} # team_name: dev # registry -- Registry to use for all Agent images (default depends on datadog.site and registryMigrationMode values) ## Currently we offer Datadog Agent images on: ## Datadog - use registry.datadoghq.com ## GCR US - use gcr.io/datadoghq ## GCR Europe - use eu.gcr.io/datadoghq ## GCR Asia - use asia.gcr.io/datadoghq ## Azure - use datadoghq.azurecr.io ## AWS - use public.ecr.aws/datadog ## DockerHub - use docker.io/datadog ## If you are on GKE Autopilot, you must use a gcr.io variant registry. registry: # gcr.io/datadoghq # registryMigrationMode -- Controls gradual migration of default image registry to # registry.datadoghq.com, replacing site-specific regional mirrors (GCR, ACR). # This setting has no effect when `registry` is explicitly set. # GKE Autopilot and GKE GDC clusters are excluded and always use their site-specific gcr.io variant. # US1-FED (ddog-gov.com) is excluded and always uses public.ecr.aws/datadog. # US3 (us3.datadoghq.com) is excluded and always uses datadoghq.azurecr.io. ## "auto" (default): enable registry.datadoghq.com for sites where migration is rolled out. ## Currently enabled: AP1 (ap1.datadoghq.com), AP2 (ap2.datadoghq.com), US5 (us5.datadoghq.com), EU1 (datadoghq.eu), US1 (datadoghq.com, when APM is disabled). ## "all": enable registry.datadoghq.com for all sites (AP1, AP2, EU, US1, US5). ## "": disable migration, keeping site-specific registries. registryMigrationMode: "auto" datadog: # datadog.apiKey -- Your Datadog API key ## ref: https://app.datadoghq.com/account/settings#agent/kubernetes apiKey: # # datadog.apiKeyExistingSecret -- Use existing Secret which stores API key instead of creating a new one. The value should be set with the `api-key` key inside the secret. ## If set, this parameter takes precedence over "apiKey". apiKeyExistingSecret: # # datadog.appKey -- Datadog APP key required to use metricsProvider ## If you are using clusterAgent.metricsProvider.enabled = true, you must set ## a Datadog application key for read access to your metrics. appKey: # # datadog.appKeyExistingSecret -- Use existing Secret which stores APP key instead of creating a new one. The value should be set with the `app-key` key inside the secret. ## If set, this parameter takes precedence over "appKey". appKeyExistingSecret: # # agents.secretAnnotations -- Annotations to add to the Secrets secretAnnotations: {} # key: "value" ## Configure the secret backend feature https://docs.datadoghq.com/agent/guide/secrets-management ## Examples: https://docs.datadoghq.com/agent/guide/secrets-management/#setup-examples-1 secretBackend: # datadog.secretBackend.command -- Configure the secret backend command, path to the secret backend binary. ## Note: If the command value is "/readsecret_multiple_providers.sh", and datadog.secretBackend.enableGlobalPermissions is enabled below, the agents will have permissions to get secret objects across the cluster. ## Read more about "/readsecret_multiple_providers.sh": https://docs.datadoghq.com/agent/guide/secrets-management/#script-for-reading-from-multiple-secret-providers-readsecret_multiple_providerssh command: # "/readsecret.sh" or "/readsecret_multiple_providers.sh" or any custom binary path # datadog.secretBackend.arguments -- Configure the secret backend command arguments (space-separated strings). arguments: # "/etc/secret-volume" or any other custom arguments # datadog.secretBackend.timeout -- Configure the secret backend command timeout in seconds. timeout: # 30 # datadog.secretBackend.refreshInterval -- [PREVIEW] Configure the secret backend command refresh interval in seconds. refreshInterval: # 0 # datadog.secretBackend.type -- Configure the built-in secret backend type. # Alternative to command; when set, the Agent uses the built-in backend to resolve secrets. Requires Agent 7.70+. type: # Examples: "file.text", "k8s.secrets", "docker.secrets", "aws.secrets", etc. # datadog.secretBackend.config -- Additional configuration for the secret backend type. config: {} # Example for k8s.secrets: # token_path: "/custom/path/token" # ca_path: "/custom/path/ca.crt" # datadog.secretBackend.enableGlobalPermissions -- Whether to create a global permission allowing Datadog agents to read all secrets when `datadog.secretBackend.command` is set to `"/readsecret_multiple_providers.sh"` or `datadog.secretBackend.type` is set. enableGlobalPermissions: true # datadog.secretBackend.roles -- Creates roles for Datadog to read the specified secrets - replacing `datadog.secretBackend.enableGlobalPermissions`. roles: [] # - namespace: secret-location-namespace # secrets: # - secret-1 # - secret-2 # datadog.securityContext -- Allows you to overwrite the default PodSecurityContext on the Daemonset or Deployment securityContext: runAsUser: 0 # seLinuxOptions: # user: "system_u" # role: "system_r" # type: "spc_t" # level: "s0" # datadog.hostVolumeMountPropagation -- Allow to specify the `mountPropagation` value on all volumeMounts using HostPath ## ref: https://kubernetes.io/docs/concepts/storage/volumes/#mount-propagation hostVolumeMountPropagation: None # datadog.clusterName -- Set a unique cluster name to allow scoping hosts and Cluster Checks easily ## The name must be unique and must be dot-separated tokens with the following restrictions: ## * Lowercase letters, numbers, and hyphens only. ## * Must start with a letter. ## * Must end with a number or a letter. ## * Overall length should not be higher than 80 characters. ## Compared to the rules of GKE, dots are allowed whereas they are not allowed on GKE: ## https://cloud.google.com/kubernetes-engine/docs/reference/rest/v1beta1/projects.locations.clusters#Cluster.FIELDS.name clusterName: # # datadog.site -- The site of the Datadog intake to send Agent data to. # (documentation: https://docs.datadoghq.com/getting_started/site/) ## Set to 'datadoghq.com' to send data to the US1 site (default). ## Set to 'datadoghq.eu' to send data to the EU site. ## Set to 'us3.datadoghq.com' to send data to the US3 site. ## Set to 'us5.datadoghq.com' to send data to the US5 site. ## Set to 'ddog-gov.com' to send data to the US1-FED site. ## Set to 'ap1.datadoghq.com' to send data to the AP1 site. site: # datadoghq.com # datadog.dd_url -- The host of the Datadog intake server to send Agent data to, only set this option if you need the Agent to send data to a custom URL ## Overrides the site setting defined in "site". dd_url: # https://app.datadoghq.com # datadog.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, off logLevel: INFO # datadog.kubeStateMetricsEnabled -- If true, deploys the kube-state-metrics deployment ## ref: https://github.com/kubernetes/kube-state-metrics/tree/kube-state-metrics-helm-chart-2.13.2/charts/kube-state-metrics # The kubeStateMetricsEnabled option will be removed in the 4.0 version of the Datadog Agent chart. kubeStateMetricsEnabled: false kubeStateMetricsNetworkPolicy: # datadog.kubeStateMetricsNetworkPolicy.create -- If true, create a NetworkPolicy for kube state metrics create: false kubeStateMetricsCore: # datadog.kubeStateMetricsCore.enabled -- Enable the kubernetes_state_core check in the Cluster Agent (Requires Cluster Agent 1.12.0+) ## ref: https://docs.datadoghq.com/integrations/kubernetes_state_core enabled: true rbac: # datadog.kubeStateMetricsCore.rbac.create -- If true, create & use RBAC resources create: true # datadog.kubeStateMetricsCore.ignoreLegacyKSMCheck -- Disable the auto-configuration of legacy kubernetes_state check (taken into account only when datadog.kubeStateMetricsCore.enabled is true) ## Disabling this field is not recommended as it results in enabling both checks, it can be useful though during the migration phase. ## Migration guide: https://docs.datadoghq.com/integrations/kubernetes_state_core/?tab=helm#migration-from-kubernetes_state-to-kubernetes_state_core ignoreLegacyKSMCheck: true # datadog.kubeStateMetricsCore.collectSecretMetrics -- Enable watching secret objects and collecting their corresponding metrics kubernetes_state.secret.* ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. collectSecretMetrics: true # datadog.kubeStateMetricsCore.collectConfigMaps -- Enable watching configmap objects and collecting their corresponding metrics kubernetes_state.configmap.* ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. collectConfigMaps: true # datadog.kubeStateMetricsCore.collectVpaMetrics -- Enable watching VPA objects and collecting their corresponding metrics kubernetes_state.vpa.* ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. collectVpaMetrics: false # datadog.kubeStateMetricsCore.collectCrdMetrics -- Enable watching CRD objects and collecting their corresponding metrics kubernetes_state.crd.* ## Configuring this field will change the default kubernetes_state_core check configuration to run the kubernetes_state_core check. collectCrdMetrics: false # datadog.kubeStateMetricsCore.collectCrMetrics -- Enable watching CustomResource objects and collecting their corresponding metrics kubernetes_state_customresource.* (Requires Cluster Agent 7.63.0+) ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. ## ## See https://github.com/kubernetes/kube-state-metrics/blob/main/docs/metrics/extend/customresourcestate-metrics.md for a full description of each field. collectCrMetrics: [] # - groupVersionKind: # group: myteam.io # kind: "Foo" # version: "v1" # resource: "foos" # optional, if not set, the resource will be pluralized from the kind by adding "s" to the end # metrics: # - name: "uptime" # help: "Foo uptime" # each: # type: Gauge # gauge: # path: [status, uptime] # datadog.kubeStateMetricsCore.collectApiServicesMetrics -- Enable watching apiservices objects and collecting their corresponding metrics kubernetes_state.apiservice.* (Requires Cluster Agent 7.45.0+) ## Configuring this field will change the default kubernetes_state_core check configuration and the RBACs granted to Datadog Cluster Agent to run the kubernetes_state_core check. collectApiServicesMetrics: false # datadog.kubeStateMetricsCore.useClusterCheckRunners -- For large clusters where the Kubernetes State Metrics Check Core needs to be distributed on dedicated workers. ## Configuring this field will create a separate deployment which will run Cluster Checks, including Kubernetes State Metrics Core. ## If clusterChecksRunner.enabled is true, it's recommended to set this flag to true as well to better utilize dedicated workers and reduce load on the Cluster Agent. ## ref: https://docs.datadoghq.com/agent/cluster_agent/clusterchecksrunner?tab=helm useClusterCheckRunners: false # datadog.kubeStateMetricsCore.labelsAsTags -- Extra labels to collect from resources and to turn into datadog tag. ## It has the following structure: ## labelsAsTags: ## : # can be pod, deployment, node, etc. ## : # where is the kubernetes label and is the datadog tag ## : ## : ## : ## labelsAsTags: {} # pod: # app: app # node: # zone: zone # team: team # datadog.kubeStateMetricsCore.annotationsAsTags -- Extra annotations to collect from resources and to turn into datadog tag. ## It has the following structure: ## annotationsAsTags: ## : # can be pod, deployment, node, etc. ## : # where is the kubernetes annotation and is the datadog tag ## : ## : ## : ## ## Warning: the annotation must match the transformation done by kube-state-metrics, ## for example tags.datadoghq.com/version becomes tags_datadoghq_com_version. annotationsAsTags: {} # pod: # app: app # node: # zone: zone # team: team # datadog.kubeStateMetricsCore.tags -- List of static tags to attach to all KSM metrics tags: [] # datadog.kubeStateMetricsCore.namespaces -- Restrict the kubernetes_state_core check to collect metrics only from the specified namespaces. ## When set, namespace-scoped RBAC is created as Role+RoleBinding per listed namespace instead of a cluster-wide ClusterRole. ## Cluster-scoped resources (nodes, persistentvolumes, storageclasses, etc.) are still collected via a ClusterRole. namespaces: [] # - default # - kube-system ## Manage Cluster checks feature ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ ## Autodiscovery via Kube Service annotations is automatically enabled clusterChecks: # datadog.clusterChecks.enabled -- Enable the Cluster Checks feature on both the cluster-agents and the daemonset enabled: true # datadog.clusterChecks.shareProcessNamespace -- Set the process namespace sharing on the cluster checks agent shareProcessNamespace: false # datadog.nodeLabelsAsTags -- Provide a mapping of Kubernetes Node Labels to Datadog Tags nodeLabelsAsTags: {} # beta.kubernetes.io/instance-type: aws-instance-type # kubernetes.io/role: kube_role # : # datadog.podLabelsAsTags -- Provide a mapping of Kubernetes Labels to Datadog Tags podLabelsAsTags: {} # app: kube_app # release: helm_release # : # datadog.podAnnotationsAsTags -- Provide a mapping of Kubernetes Annotations to Datadog Tags podAnnotationsAsTags: {} # iam.amazonaws.com/role: kube_iamrole # : # datadog.namespaceLabelsAsTags -- Provide a mapping of Kubernetes Namespace Labels to Datadog Tags namespaceLabelsAsTags: {} # env: environment # : # datadog.namespaceAnnotationsAsTags -- Provide a mapping of Kubernetes Namespace Annotations to Datadog Tags namespaceAnnotationsAsTags: {} # env: environment # : # datadog.kubernetesResourcesLabelsAsTags -- Provide a mapping of Kubernetes Resources Labels to Datadog Tags kubernetesResourcesLabelsAsTags: {} # pods: # x-ref: reference # namespaces: # kubernetes.io/metadata.name: name-as-tag # : # : # datadog.kubernetesResourcesAnnotationsAsTags -- Provide a mapping of Kubernetes Resources Annotations to Datadog Tags kubernetesResourcesAnnotationsAsTags: {} # pods: # x-ann: annotation-reference # namespaces: # stale-annotation: annotation-as-tag # : # : originDetectionUnified: # datadog.originDetectionUnified.enabled -- Enabled enables unified mechanism for origin detection. Default: false. (Requires Agent 7.54.0+). enabled: false # datadog.tags -- List of static tags to attach to every metric, event and service check collected by this Agent. ## Learn more about tagging: https://docs.datadoghq.com/tagging/ tags: [] # - ":" # - ":" # datadog.checksCardinality -- Sets the tag cardinality for the checks run by the Agent. ## ref: https://docs.datadoghq.com/getting_started/tagging/assigning_tags/?tab=containerizedenvironments#environment-variables checksCardinality: # low, orchestrator or high (not set by default to avoid overriding existing DD_CHECKS_TAG_CARDINALITY configurations, the default value in the Agent is low) # kubelet configuration kubelet: # datadog.kubelet.host -- Override kubelet IP host: valueFrom: fieldRef: fieldPath: status.hostIP # datadog.kubelet.tlsVerify -- Toggle kubelet TLS verification # @default -- true tlsVerify: # false # datadog.kubelet.hostCAPath -- Path (on host) where the Kubelet CA certificate is stored # @default -- None (no mount from host) hostCAPath: # datadog.kubelet.agentCAPath -- Path (inside Agent containers) where the Kubelet CA certificate is stored # @default -- /var/run/host-kubelet-ca.crt if hostCAPath else /var/run/secrets/kubernetes.io/serviceaccount/ca.crt agentCAPath: # datadog.kubelet.podLogsPath -- Path (on host) where the PODs logs are located # @default -- /var/log/pods on Linux, C:\var\log\pods on Windows podLogsPath: # datadog.kubelet.coreCheckEnabled -- Toggle if kubelet core check should be used instead of Python check. (Requires Agent/Cluster Agent 7.53.0+) # @default -- true coreCheckEnabled: true # datadog.kubelet.podResourcesSocketDir -- Path (on host) where the kubelet.sock socket for the PodResources API is located # @default -- /var/lib/kubelet/pod-resources podResourcesSocketDir: /var/lib/kubelet/pod-resources # datadog.kubelet.useApiServer -- Enable this to query the pod list from the API Server instead of the Kubelet. (Requires Agent 7.65.0+) # @default -- false useApiServer: false # datadog.kubelet.fineGrainedAuthorization -- Enable fine-grained authentication for kubelet (requires: Kubernetes 1.32+) fineGrainedAuthorization: false # datadog.expvarPort -- Specify the port to expose pprof and expvar to not interfere with the agent metrics port from the cluster-agent, which defaults to 5000 expvarPort: 6000 ## dogstatsd configuration ## ref: https://docs.datadoghq.com/agent/kubernetes/dogstatsd/ ## To emit custom metrics from your Kubernetes application, use DogStatsD. dogstatsd: # datadog.dogstatsd.port -- Override the Agent DogStatsD port ## Note: Make sure your client is sending to the same UDP port. port: 8125 # datadog.dogstatsd.originDetection -- Enable origin detection for container tagging ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging originDetection: false # datadog.dogstatsd.tags -- List of static tags to attach to every custom metric, event and service check collected by Dogstatsd. ## Learn more about tagging: https://docs.datadoghq.com/tagging/ tags: [] # - ":" # - ":" # datadog.dogstatsd.tagCardinality -- Sets the tag cardinality relative to the origin detection ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/#using-origin-detection-for-container-tagging tagCardinality: low # datadog.dogstatsd.useSocketVolume -- Enable dogstatsd over Unix Domain Socket with an HostVolume ## ref: https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ useSocketVolume: true # datadog.dogstatsd.socketPath -- Path to the DogStatsD socket socketPath: /var/run/datadog/dsd.socket # datadog.dogstatsd.hostSocketPath -- Host path to the DogStatsD socket hostSocketPath: /var/run/datadog # datadog.dogstatsd.useHostPort -- Sets the hostPort to the same value of the container port ## Needs to be used for sending custom metrics. ## The ports need to be available on all hosts. ## ## WARNING: Make sure that hosts using this are properly firewalled otherwise ## metrics and traces are accepted from any host able to connect to this host. useHostPort: false # datadog.dogstatsd.useHostPID -- Run the agent in the host's PID namespace ## DEPRECATED: use datadog.useHostPID instead. ## This is required for Dogstatsd origin detection to work. ## See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ useHostPID: false # datadog.dogstatsd.nonLocalTraffic -- Enable this to make each node accept non-local statsd traffic (from outside of the pod) ## ref: https://github.com/DataDog/docker-dd-agent#environment-variables nonLocalTraffic: true # datadog.useHostPID -- Run the agent in the host's PID namespace, required for origin detection # / unified service tagging ## This is required for Dogstatsd origin detection to work in dogstatsd and trace agent ## See https://docs.datadoghq.com/developers/dogstatsd/unix_socket/ useHostPID: true # datadog.collectEvents -- Enables this to start event collection from the kubernetes API ## ref: https://docs.datadoghq.com/agent/kubernetes/#event-collection collectEvents: true # datadog.kubernetesUseEndpointSlices -- Enable this to map Kubernetes services to endpointslices instead of endpoints. (Requires Cluster Agent 7.62.0+). kubernetesUseEndpointSlices: true # datadog.kubernetesKubeServiceIgnoreReadiness -- Enable this to attach kube_service tag unconditionally. (Requires Cluster Agent 7.76.0+). kubernetesKubeServiceIgnoreReadiness: false # Configure Kubernetes events collection kubernetesEvents: # datadog.kubernetesEvents.sourceDetectionEnabled -- Enable this to map Kubernetes events to integration sources based on controller names. (Requires Cluster Agent 7.56.0+). sourceDetectionEnabled: false # datadog.kubernetesEvents.filteringEnabled -- Enable this to only include events that match the pre-defined allowed events. (Requires Cluster Agent 7.57.0+). filteringEnabled: false # datadog.kubernetesEvents.unbundleEvents -- Allow unbundling kubernetes events, 1:1 mapping between Kubernetes and Datadog events. (Requires Cluster Agent 7.42.0+). unbundleEvents: false # datadog.kubernetesEvents.collectedEventTypes -- Event types to be collected. This requires datadog.kubernetesEvents.unbundleEvents to be set to true. collectedEventTypes: # - kind: # (optional if `source`` is provided) # source: # (optional if `kind`` is provided) # reasons: # (optional) if empty accept all event reasons # - - kind: Pod reasons: - Failed - BackOff - Unhealthy - FailedScheduling - FailedMount - FailedAttachVolume - kind: Node reasons: - TerminatingEvictedPod - NodeNotReady - Rebooted - HostPortConflict - kind: CronJob reasons: - SawCompletedJob # datadog.kubernetesEvents.maxEventsPerRun -- Maximum number of events you wish to collect per check run. maxEventsPerRun: # datadog.kubernetesEvents.kubernetesEventResyncPeriodS -- Specify the frequency in seconds at which the Agent should list all events to re-sync following the informer pattern kubernetesEventResyncPeriodS: clusterTagger: # datadog.clusterTagger.collectKubernetesTags -- Enables Kubernetes resources tags collection. collectKubernetesTags: false # datadog.leaderElection -- Enables leader election mechanism for event collection leaderElection: true # datadog.leaderLeaseDuration -- Set the lease time for leader election in second leaderLeaseDuration: # 60 # datadog.leaderElectionResource -- Selects the default resource to use for leader election. # Can be: # * "lease" / "leases". Only supported in agent 7.47+ # * "configmap" / "configmaps". # "" to automatically detect which one to use. leaderElectionResource: configmap remoteConfiguration: # datadog.remoteConfiguration.enabled -- Set to true to enable remote configuration. # DEPRECATED: Consider using remoteConfiguration.enabled instead enabled: true privateActionRunner: # datadog.privateActionRunner.enabled -- Enable the Private Action Runner on the node agent to execute workflow actions enabled: false # datadog.privateActionRunner.selfEnroll -- Enable self-enrollment for the Private Action Runner ## When enabled, the runner will automatically register itself with Datadog using the provided API/APP keys ## and store its identity in a local file. Requires leader election to be enabled. selfEnroll: true # datadog.privateActionRunner.urn -- URN of the Private Action Runner (required if selfEnroll is false) ## Format: urn:datadog:private-action-runner:organization::runner: urn: # "urn:datadog:private-action-runner:organization:123456:runner:abc-def" # datadog.privateActionRunner.privateKey -- Private key for the Private Action Runner (required if selfEnroll is false) ## This key is used to authenticate the runner with Datadog privateKey: # "" # datadog.privateActionRunner.identityFromExistingSecret -- Use existing Secret which stores the Private Action Runner URN and private key ## The secret should contain 'urn' and 'private_key' keys ## If set, this parameter takes precedence over "urn" and "privateKey" identityFromExistingSecret: # "" # datadog.privateActionRunner.actionsAllowlist -- List of actions executable by the Private Action Runner actionsAllowlist: [] # - "com.datadoghq.http.request" # - "com.datadoghq.gitlab.branches.*" ## Enable logs agent and provide custom configs logs: # datadog.logs.enabled -- Enables this to activate Datadog Agent log collection ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup enabled: false # datadog.logs.containerCollectAll -- Enable this to allow log collection for all containers ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup containerCollectAll: false # datadog.logs.containerCollectUsingFiles -- Collect logs from files in /var/log/pods instead of using container runtime API ## It's usually the most efficient way of collecting logs. ## ref: https://docs.datadoghq.com/agent/basic_agent_usage/kubernetes/#log-collection-setup containerCollectUsingFiles: true # datadog.logs.autoMultiLineDetection -- Allows the Agent to detect common multi-line patterns automatically. ## ref: https://docs.datadoghq.com/agent/logs/advanced_log_collection/?tab=configurationfile#automatic-multi-line-aggregation autoMultiLineDetection: false ## Enable apm agent and provide custom configs ## ## APM is enabled by default. If local service Internal Traffic Policy is allowed (Kubernetes v1.22+), the agent service is created with the APM local traceport. apm: # datadog.apm.socketEnabled -- Enable APM over Socket (Unix Socket or windows named pipe) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ socketEnabled: true # datadog.apm.portEnabled -- Enable APM over TCP communication (hostPort 8126 by default) ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ portEnabled: false # datadog.apm.useLocalService -- Enable APM over TCP communication to use the local service only (requires Kubernetes v1.22+) # Note: The hostPort 8126 is disabled when this is enabled. ## ref: https://docs.datadoghq.com/tracing/guide/setting_up_apm_with_kubernetes_service/?tab=helm useLocalService: false # datadog.apm.enabled -- Enable this to enable APM and tracing, on port 8126 # DEPRECATED. Use datadog.apm.portEnabled instead ## ref: https://github.com/DataDog/docker-dd-agent#tracing-from-the-host enabled: false # datadog.apm.port -- Override the trace Agent port ## Note: Make sure your client is sending to the same UDP port. port: 8126 # datadog.apm.useSocketVolume -- Enable APM over Unix Domain Socket # DEPRECATED. Use datadog.apm.socketEnabled instead ## ref: https://docs.datadoghq.com/agent/kubernetes/apm/ useSocketVolume: false # datadog.apm.socketPath -- Path to the trace-agent socket socketPath: /var/run/datadog/apm.socket # datadog.apm.hostSocketPath -- Host path to the trace-agent socket hostSocketPath: /var/run/datadog # Error Tracking backend errorTrackingStandalone: # datadog.apm.errorTrackingStandalone.enabled -- Enables Error Tracking for backend services. enabled: false # APM Single Step Instrumentation # Requires Cluster Agent 7.49+. instrumentation: # datadog.apm.instrumentation.enabled -- Enable injecting the Datadog APM libraries into all pods in the cluster. enabled: false # datadog.apm.instrumentation.enabledNamespaces -- Enable injecting the Datadog APM libraries into pods in specific namespaces. enabledNamespaces: [] # datadog.apm.instrumentation.disabledNamespaces -- Disable injecting the Datadog APM libraries into pods in specific namespaces. disabledNamespaces: [] # datadog.apm.instrumentation.libVersions -- Inject specific version of tracing libraries with Single Step Instrumentation. libVersions: {} # datadog.apm.instrumentation.targets -- Enable target based workload selection. # Requires Cluster Agent 7.64.0+. # # ddTraceConfigs[]valueFrom Requires Cluster Agent 7.66.0+. targets: [] # - name: "example" # podSelector: # matchLabels: # language: "python" # namespaceSelector: # matchNames: # - "applications" # ddTraceVersions: # python: "v2" # ddTraceConfigs: # - name: "DD_PROFILING_ENABLED" # value: "true" # - name: "DD_SERVICE" # valueFrom: # fieldRef: # fieldPath: metadata.labels[my-label] # datadog.apm.instrumentation.skipKPITelemetry -- Disable generating Configmap for APM Instrumentation KPIs skipKPITelemetry: false # Language detection currently only detects languages and adds them as annotations on deployments, but doesn't use these languages for injecting libraries to applicative pods. # It requires Agent 7.52+ and Cluster Agent 7.52+ language_detection: # datadog.apm.instrumentation.language_detection.enabled -- Run language detection to automatically detect languages of user workloads (preview). enabled: true # datadog.apm.instrumentation.injectionMode -- The injection mode to use for libraries injection. # Valid values are: "auto", "init_container", "csi" (experimental, requires Cluster Agent 7.76.0+ and Datadog CSI Driver), "image_volume" (experimental, requires Cluster Agent 7.77.0+) # Empty by default so the Cluster Agent can apply its own defaults. injectionMode: "" # This feature is in preview. It requires Cluster Agent 7.57+. injector: # datadog.apm.instrumentation.injector.imageTag -- The image tag to use for the APM Injector (preview). imageTag: "" ## Application Security Managment (ASM) configuration ## ## ASM is disabled by default and can be enabled by setting the various `enabled` fields to `true` under the `datadog.asm` section. ## Manually adding the various environment variables to a pod will take precedence over the ones in the Helm chart. ## These will only have an effect on containers that have Datadog client libraries installed, either manually or via Single Step Instrumentation (under the `datadog.apm.instrumentation` section). ## It requires Datadog Cluster Agent 7.53.0+. asm: threats: # datadog.asm.threats.enabled -- Enable Application Security Management Threats App & API Protection by injecting `DD_APPSEC_ENABLED=true` environment variable to all pods in the cluster enabled: false sca: # datadog.asm.sca.enabled -- Enable Application Security Management Software Composition Analysis by injecting `DD_APPSEC_SCA_ENABLED=true` environment variable to all pods in the cluster enabled: false iast: # datadog.asm.iast.enabled -- Enable Application Security Management Interactive Application Security Testing by injecting `DD_IAST_ENABLED=true` environment variable to all pods in the cluster enabled: false ## App & API Protection configuration ## ## App & API Protection is disabled by default and can be enabled by setting the `enabled` field to `true` under the `datadog.appsec.injector` section. ## The Datadog Helm Chart offer the option to auto-instrument supported proxies in the cluster to forward traffic to a custom security processor delegating ## traffic analysis, WAF capabilities and API Posture management to Datadog's App and API Protection product that has to be deployed separately. Please follow the documentation to deploy the processor: ## https://docs.datadoghq.com/security/application_security/setup/#proxies ## It requires Datadog Cluster Agent 7.73.0+. appsec: # App & API Protection Injector is used to automatically configure your proxy to forward traffic to a custom security processor delegating # traffic analysis, WAF capabilities and API Posture management to Datadog's App and API Protection product. injector: # datadog.appsec.injector.enabled -- Enable App & API Protection on your cluster ingress usage across all your cluster at once enabled: false # datadog.appsec.injector.autoDetect -- Automatically detect and inject supported proxies in the cluster (Envoy Gateway, Istio Gateway API, native Istio Gateway) autoDetect: true # datadog.appsec.injector.mode -- Deployment mode for the AppSec processor. Valid values: "sidecar", "external". Leave empty to use the agent default (sidecar). Upgrading users who rely on the external-processor flow (processor.address / processor.service.*) should set this to "external" explicitly. mode: "" # datadog.appsec.injector.proxies -- Manually specify which proxy types to inject. Valid values: "envoy-gateway", "istio", "istio-gateway" # When autoDetect is true, detected proxies are added to this list # When autoDetect is false, only proxies in this list are enabled proxies: [] # - envoy-gateway: Configures Envoy Gateway resources for AppSec injection # - istio: Watches Istio-managed Kubernetes Gateway API GatewayClasses for AppSec injection # - istio-gateway: Watches native Istio Gateway resources for AppSec injection sidecar: # datadog.appsec.injector.sidecar.image -- Container image for the AppSec sidecar processor image: "ghcr.io/datadog/dd-trace-go/service-extensions-callout" # datadog.appsec.injector.sidecar.imageTag -- Image tag for the AppSec sidecar processor imageTag: "v2.6.0" # datadog.appsec.injector.sidecar.port -- Listening port for the AppSec sidecar processor port: 8080 # datadog.appsec.injector.sidecar.healthPort -- Health check port for the AppSec sidecar processor healthPort: 8081 # datadog.appsec.injector.sidecar.bodyParsingSizeLimit -- Request body parsing size limit in bytes for the AppSec sidecar processor. Set to 0 to leave it unset (default agent behavior). Set to a negative value (e.g. -1) to disable body parsing entirely. bodyParsingSizeLimit: 0 resources: requests: # datadog.appsec.injector.sidecar.resources.requests.cpu -- CPU request for the AppSec sidecar processor cpu: "10m" # datadog.appsec.injector.sidecar.resources.requests.memory -- Memory request for the AppSec sidecar processor memory: "128Mi" limits: # datadog.appsec.injector.sidecar.resources.limits.cpu -- Optional CPU limit for the AppSec sidecar processor cpu: "" # datadog.appsec.injector.sidecar.resources.limits.memory -- Optional memory limit for the AppSec sidecar processor memory: "" processor: # datadog.appsec.injector.processor.address -- Address of the AppSec processor service # Defaults to `{service.name}.{service.namespace}.svc` address: "" # datadog.appsec.injector.processor.port -- Port of the AppSec processor service (defaults to 443) port: 443 # datadog.appsec.injector.service -- Required service information to connect to the AppSec processor # This service should point to a deployment of the image `ghcr.io/DataDog/dd-trace-go/service-extensions-callout:latest` # This deployment is not managed by the Datadog Helm chart. service: # datadog.appsec.injector.processor.service.name -- Name of the AppSec processor service name: "" # datadog.appsec.injector.processor.service.namespace -- Namespace where the AppSec processor service is deployed namespace: "" ## OTLP ingest related configuration otlp: receiver: protocols: # datadog.otlp.receiver.protocols.grpc - OTLP/gRPC configuration grpc: # datadog.otlp.receiver.protocols.grpc.enabled -- Enable the OTLP/gRPC endpoint enabled: false # datadog.otlp.receiver.protocols.grpc.endpoint -- OTLP/gRPC endpoint endpoint: "0.0.0.0:4317" # datadog.otlp.receiver.protocols.grpc.useHostPort -- Enable the Host Port for the OTLP/gRPC endpoint useHostPort: true # datadog.otlp.receiver.protocols.http - OTLP/HTTP configuration http: # datadog.otlp.receiver.protocols.http.enabled -- Enable the OTLP/HTTP endpoint enabled: false # datadog.otlp.receiver.protocols.http.endpoint -- OTLP/HTTP endpoint endpoint: "0.0.0.0:4318" # datadog.otlp.receiver.protocols.http.useHostPort -- Enable the Host Port for the OTLP/HTTP endpoint useHostPort: true logs: # datadog.otlp.logs.enabled -- Enable logs support in the OTLP ingest endpoint enabled: false ## Host Profiler related configuration for the host-profiler in Agent Daemonset. Note this is experimental and subject to change hostProfiler: # datadog.hostProfiler.enabled -- Enable the Host Profiler. This feature is experimental and subject to change. enabled: false # datadog.hostProfiler.image -- Image the Host Profiler. This parameter is experimental and will be removed once official image is available. image: "" ## OTel collector related configuration for the otel-agent in Agent Daemonset otelCollector: # datadog.otelCollector.enabled -- Enable the OTel Collector enabled: false # datadog.otelCollector.ports -- Ports that OTel Collector is listening on ports: # Default GRPC port of OTLP receiver - containerPort: "4317" name: otel-grpc protocol: TCP # Default HTTP port of OTLP receiver - containerPort: "4318" name: otel-http protocol: TCP # datadog.otelCollector.config -- OTel collector configuration config: null # datadog.otelCollector.configMap -- Use an existing ConfigMap for DDOT Collector configuration configMap: # datadog.otelCollector.configMap.name -- Name of the existing ConfigMap that contains the DDOT Collector configuration name: null # datadog.otelCollector.configMap.items -- Items within the ConfigMap that contain DDOT Collector configuration items: # - key: otel-config.yaml # path: otel-config.yaml # - key: otel-config-two.yaml # path: otel-config-two.yaml # datadog.otelCollector.configMap.key -- Key within the ConfigMap that contains the DDOT Collector configuration key: otel-config.yaml # datadog.otelCollector.featureGates -- Feature gates to pass to OTel collector, as a comma separated list featureGates: null # datadog.otelCollector.useStandaloneImage -- If true, the OTel Collector will use the `ddot-collector` image instead of the `agent` image # The tag is retrieved from the `agents.image.tag` value. # This is only supported for agent versions 7.67.0+ # If set to false, you will need to set `agents.image.tagSuffix` to `full` useStandaloneImage: true ## Provide OTel Collector RBAC configuration rbac: # datadog.otelCollector.rbac.create -- If true, check OTel Collector config for k8sattributes processor # and create required ClusterRole to access Kubernetes API create: true # datadog.otelCollector.rbac.rules -- A set of additional RBAC rules to apply to OTel Collector's ClusterRole rules: [] # - apiGroups: [""] # resources: ["pods", "nodes"] # verbs: ["get", "list", "watch"] ## Provide OTel Collector logs configuration logs: # datadog.otelCollector.logs.enabled -- Enable logs support in the OTel Collector. # If true, checks OTel Collector config for filelog receiver and mounts additional volumes to collect containers # and pods logs. enabled: false ## Continuous Profiler configuration ## ## Continuous Profiler is disabled by default and can be enabled by setting the `enabled` field to ## either `auto` or `true` value under the `datadog.profiling` section. ## Manually adding the `DD_PROFILING_ENABLED` variable to a pod will take precedence over the ## value in the Helm chart. ## These will only have an effect on containers that have Datadog client libraries installed, ## either manually or via Single Step Instrumentation (under the `datadog.apm.instrumentation` ## section). ## It requires Datadog Cluster Agent 7.57.0+. profiling: # datadog.profiling.enabled -- Enable Continuous Profiler by injecting `DD_PROFILING_ENABLED` # environment variable with the same value to all pods in the cluster # Valid values are: # - false: Profiler is turned off and can not be turned on by other means. # - null: Profiler is turned off, but can be turned on by other means. # - auto: Profiler is turned off, but the library will turn it on if the application is a good candidate for profiling. # - true: Profiler is turned on. enabled: null # datadog.envFrom -- Set environment variables for all Agents directly from configMaps and/or secrets ## envFrom to pass configmaps or secrets as environment envFrom: [] # - configMapRef: # name: # - secretRef: # name: # datadog.env -- Set environment variables for all Agents ## The Datadog Agent supports many environment variables. ## ref: https://docs.datadoghq.com/agent/docker/?tab=standard#environment-variables env: [] # - name: # value: # datadog.envDict -- Set environment variables for all Agents defined in a dict envDict: {} # : # datadog.confd -- Provide additional check configurations (static and Autodiscovery) ## Each key becomes a file in /conf.d ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes ## ref: https://docs.datadoghq.com/agent/autodiscovery/ confd: {} # redisdb.yaml: |- # init_config: # instances: # - host: "name" # port: "6379" # kubernetes_state.yaml: |- # ad_identifiers: # - kube-state-metrics # init_config: # instances: # - kube_state_url: http://%%host%%:8080/metrics # datadog.checksd -- Provide additional custom checks as python code ## Each key becomes a file in /checks.d ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#optional-volumes checksd: {} # service.py: |- # datadog.dockerSocketPath -- Path to the docker socket dockerSocketPath: # /var/run/docker.sock # datadog.criSocketPath -- Path to the container runtime socket (if different from Docker) criSocketPath: # /var/run/containerd/containerd.sock # Configure how the agent interact with the host's container runtime containerRuntimeSupport: # datadog.containerRuntimeSupport.enabled -- Set this to false to disable agent access to container runtime. enabled: true ## Enable process agent and provide custom configs processAgent: # datadog.processAgent.enabled -- Set this to true to enable live process monitoring agent # DEPRECATED. Set `datadog.processAgent.processCollection` or `datadog.processAgent.containerCollection` instead. ## Note: /etc/passwd is automatically mounted when `processCollection`, `processDiscovery`, or `containerCollection` is enabled. ## ref: https://docs.datadoghq.com/graphing/infrastructure/process/#kubernetes-daemonset enabled: true # datadog.processAgent.processCollection -- Set this to true to enable process collection processCollection: false # datadog.processAgent.stripProcessArguments -- Set this to scrub all arguments from collected processes ## Requires datadog.processAgent.processCollection to be set to true to have any effect ## ref: https://docs.datadoghq.com/infrastructure/process/?tab=linuxwindows#process-arguments-scrubbing stripProcessArguments: false # datadog.processAgent.processDiscovery -- Enables or disables autodiscovery of integrations processDiscovery: true # datadog.processAgent.runInCoreAgent -- Set this to true to run the following features in the core agent: Live Processes, Live Containers, Process Discovery. ## This requires Agent 7.60.0+ and Linux. ## DEPRECATED: This behavior will be enabled by default for installations that meet the requirements. ## For Agent 7.78.0+, this setting is ignored — process checks always run in the core agent on Linux. runInCoreAgent: true # datadog.processAgent.containerCollection -- Set this to true to enable container collection ## ref: https://docs.datadoghq.com/infrastructure/containers/?tab=helm containerCollection: true # datadog.disableDefaultOsReleasePaths -- Set this to true to disable mounting datadog.osReleasePath in all containers disableDefaultOsReleasePaths: false # datadog.disablePasswdMount -- Set this to true to disable mounting /etc/passwd in all containers disablePasswdMount: false # datadog.osReleasePath -- Specify the path to your os-release file osReleasePath: /etc/os-release ## Enable systemProbe agent and provide custom configs systemProbe: # datadog.systemProbe.debugPort -- Specify the port to expose pprof and expvar for system-probe agent debugPort: 0 # datadog.systemProbe.enableConntrack -- Enable the system-probe agent to connect to the netlink/conntrack subsystem to add NAT information to connection data ## ref: http://conntrack-tools.netfilter.org/ enableConntrack: true # datadog.systemProbe.seccomp -- Apply an ad-hoc seccomp profile to the system-probe agent to restrict its privileges ## Note that this will break `kubectl exec … -c system-probe -- /bin/bash` seccomp: localhost/system-probe # datadog.systemProbe.seccompRoot -- Specify the seccomp profile root directory seccompRoot: /var/lib/kubelet/seccomp # datadog.systemProbe.bpfDebug -- Enable logging for kernel debug bpfDebug: false # datadog.systemProbe.apparmor -- Specify a apparmor profile for system-probe apparmor: unconfined # datadog.systemProbe.enableTCPQueueLength -- Enable the TCP queue length eBPF-based check enableTCPQueueLength: false # datadog.systemProbe.enableOOMKill -- Enable the OOM kill eBPF-based check enableOOMKill: false # datadog.systemProbe.mountPackageManagementDirs -- Enables mounting of specific package management directories when runtime compilation is enabled mountPackageManagementDirs: [] ## For runtime compilation to be able to download kernel headers, the host's package management folders ## must be mounted to the /host directory. For example, for Ubuntu & Debian the following mount would be necessary: # - name: "apt-config-dir" # hostPath: /etc/apt # mountPath: /host/etc/apt ## If this list is empty, then all necessary package management directories (for all supported OSs) will be mounted. # datadog.systemProbe.runtimeCompilationAssetDir -- Specify a directory for runtime compilation assets to live in runtimeCompilationAssetDir: /var/tmp/datadog-agent/system-probe # datadog.systemProbe.btfPath -- Specify the path to a BTF file for your kernel btfPath: "" # datadog.systemProbe.collectDNSStats -- Enable DNS stat collection collectDNSStats: true # datadog.systemProbe.maxTrackedConnections -- the maximum number of tracked connections maxTrackedConnections: 131072 # datadog.systemProbe.maxConnectionStateBuffered -- Maximum number of concurrent connections for Cloud Network Monitoring maxConnectionStateBuffered: # datadog.systemProbe.conntrackMaxStateSize -- the maximum size of the userspace conntrack cache conntrackMaxStateSize: 131072 # 2 * maxTrackedConnections by default, per https://github.com/DataDog/datadog-agent/blob/d1c5de31e1bba72dfac459aed5ff9562c3fdcc20/pkg/process/config/config.go#L229 # datadog.systemProbe.conntrackInitTimeout -- the time to wait for conntrack to initialize before failing conntrackInitTimeout: 10s # DEPRECATED. Use datadog.disableDefaultOsReleasePaths instead. # datadog.systemProbe.enableDefaultOsReleasePaths -- enable default os-release files mount enableDefaultOsReleasePaths: true # datadog.systemProbe.enableDefaultKernelHeadersPaths -- Enable mount of default paths where kernel headers are stored enableDefaultKernelHeadersPaths: true containerImageCollection: # datadog.containerImageCollection.enabled -- Enable collection of container image metadata # This parameter requires Agent version 7.46+ enabled: true orchestratorExplorer: # datadog.orchestratorExplorer.enabled -- Set this to false to disable the orchestrator explorer ## This requires processAgent.enabled and clusterAgent.enabled to be set to true ## ref: TODO - add doc link enabled: true # datadog.orchestratorExplorer.container_scrubbing -- Enable the scrubbing of containers in the kubernetes resource YAML for sensitive information ## The container scrubbing is taking significant resources during data collection. ## If you notice that the cluster-agent uses too much CPU in larger clusters ## turning this option off will improve the situation. container_scrubbing: enabled: true # datadog.orchestratorExplorer.kubelet_configuration_check.enabled -- Enable the orchestrator kubelet configuration check ## this enables the collection of the kubelet configuration for viewing in the orchestrator kubelet_configuration_check: enabled: true # datadog.orchestratorExplorer.customResources -- Defines custom resources for the orchestrator explorer to collect # customResources is required for RBAC creation if a custom orchestrator explorer configuration is provided in `clusterAgent.confd` or `clusterAgent.advancedConfd` # Each item should follow group/version/name, for example # customResources: # - datadoghq.com/v1alpha1/datadogmetrics # - datadoghq.com/v1alpha1/watermarkpodautoscalers customResources: [] helmCheck: # datadog.helmCheck.enabled -- Set this to true to enable the Helm check (Requires Agent 7.35.0+ and Cluster Agent 1.19.0+) # This requires clusterAgent.enabled to be set to true enabled: false # datadog.helmCheck.collectEvents -- Set this to true to enable event collection in the Helm Check (Requires Agent 7.36.0+ and Cluster Agent 1.20.0+) # This requires datadog.HelmCheck.enabled to be set to true collectEvents: false # datadog.helmCheck.valuesAsTags -- Collects Helm values from a release and uses them as tags (Requires Agent and Cluster Agent 7.40.0+). # This requires datadog.HelmCheck.enabled to be set to true valuesAsTags: {} # : networkMonitoring: # datadog.networkMonitoring.enabled -- Enable Cloud Network Monitoring enabled: false # datadog.networkMonitoring.dnsMonitoringPorts -- List of ports to monitor for DNS traffic # @default -- `[53]` (set by agent) dnsMonitoringPorts: [] networkPath: connectionsMonitoring: # datadog.networkPath.connectionsMonitoring.enabled -- Enable Network Path's "Network traffic paths" feature. Requires the `traceroute` system-probe module to be enabled. enabled: false collector: # datadog.networkPath.collector.workers -- Override the number of workers workers: # datadog.networkPath.collector.pathtestTTL -- Override TTL in minutes for pathtests pathtestTTL: # datadog.networkPath.collector.pathtestInterval -- Override time interval between pathtest runs pathtestInterval: # datadog.networkPath.collector.pathtestContextsLimit -- Override maximum number of pathtests stored to run pathtestContextsLimit: # datadog.networkPath.collector.pathtestMaxPerMinute -- Override limit for total pathtests run, per minute pathtestMaxPerMinute: serviceMonitoring: # datadog.serviceMonitoring.enabled -- Enable Universal Service Monitoring enabled: false # datadog.serviceMonitoring.httpMonitoringEnabled -- Enable HTTP monitoring for Universal Service Monitoring (Requires Agent 7.40.0+). Empty values use the default setting in the datadog agent. httpMonitoringEnabled: # datadog.serviceMonitoring.http2MonitoringEnabled -- Enable HTTP2 & gRPC monitoring for Universal Service Monitoring (Requires Agent 7.53.0+ and kernel 5.2 or later). Empty values use the default setting in the datadog agent. http2MonitoringEnabled: tls: go: # datadog.serviceMonitoring.tls.go.enabled -- (bool) Enable TLS monitoring for Golang services (Requires Agent 7.51.0+). Empty values use the default setting in the datadog agent. enabled: istio: # datadog.serviceMonitoring.tls.istio.enabled -- (bool) Enable TLS monitoring for Istio services (Requires Agent 7.50.0+). Empty values use the default setting in the datadog agent. enabled: nodejs: # datadog.serviceMonitoring.tls.nodejs.enabled -- (bool) Enable TLS monitoring for Node.js services (Requires Agent 7.54.0+). Empty values use the default setting in the datadog agent. enabled: native: # datadog.serviceMonitoring.tls.native.enabled -- (bool) Enable TLS monitoring for native (openssl, libssl, gnutls) services (Requires Agent 7.51.0+). Empty values use the default setting in the datadog agent. enabled: traceroute: # datadog.traceroute.enabled -- (bool) Enable traceroutes in system-probe for Network Path enabled: false discovery: # datadog.discovery.enabled -- (bool) Enable Service Discovery. If omitted, the chart auto-enables it when the effective node Agent version resolved by the chart is >= 7.78.0, except on GKE Autopilot clusters where system-probe is not supported. If that resolution still yields a non-semver-ish tag, discovery treats it as latest. Explicit true/false always takes precedence. On supported Agent versions, the chart also enables `discovery.use_system_probe_lite` so discovery-only deployments can exec into `system-probe-lite`. enabled: # false # datadog.discovery.networkStats.enabled -- (bool) Enable Service Discovery Network Stats networkStats: enabled: true gpuMonitoring: # datadog.gpuMonitoring.enabled -- Enable GPU monitoring core check enabled: false # datadog.gpuMonitoring.privilegedMode -- Enable advanced GPU metrics and monitoring via system-probe # Note: system-probe component of the agent runs with elevated privileges privilegedMode: false # datadog.gpuMonitoring.configureCgroupPerms -- Configure cgroup permissions for GPU monitoring configureCgroupPerms: false # datadog.gpuMonitoring.runtimeClassName -- Runtime class name for the agent pods to get access to NVIDIA resources. Can be left empty to use the default runtime class. runtimeClassName: "nvidia" # Software Bill of Materials configuration sbom: containerImage: # datadog.sbom.containerImage.enabled -- Enable SBOM collection for container images enabled: false # datadog.sbom.containerImage.uncompressedLayersSupport -- Use container runtime snapshotter # This should be set to true when using EKS, GKE or if containerd is configured to # discard uncompressed layers. # This feature will cause the SYS_ADMIN capability to be added to the Agent container. # Setting this to false could cause a high error rate when generating SBOMs due to missing uncompressed layer. # See https://docs.datadoghq.com/security/cloud_security_management/troubleshooting/vulnerabilities/#uncompressed-container-image-layers uncompressedLayersSupport: true # datadog.sbom.containerImage.overlayFSDirectScan -- Use experimental overlayFS direct scan overlayFSDirectScan: false # datadog.sbom.containerImage.containerExclude -- Exclude containers from SBOM generation, as a space-separated list ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers containerExclude: # "image:datadog/agent" # datadog.sbom.containerImage.containerInclude -- Include containers in SBOM generation, as a space-separated list. # If a container matches an include rule, it’s always included in SBOM generation ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: # datadog.sbom.containerImage.analyzers -- List of analyzers to use for container image SBOM generation analyzers: - "os" host: # datadog.sbom.host.enabled -- Enable SBOM collection for host filesystems enabled: false # datadog.sbom.host.analyzers -- List of analyzers to use for host SBOM generation analyzers: - "os" enrichment: usage: # datadog.sbom.enrichment.usage.enabled -- Enable runtime "package in use" SBOM enrichment. # Requires the system-probe container (auto-enabled when set to true) for eBPF-based file # access tracking, and sets `hostPID: true` on the agent pod. Requires Agent 7.79.0+. enabled: false ## Enable security agent and provide custom configs securityAgent: compliance: # datadog.securityAgent.compliance.enabled -- Set to true to enable Cloud Security Posture Management (CSPM) enabled: false # datadog.securityAgent.compliance.configMap -- Contains CSPM compliance benchmarks that will be used configMap: # datadog.securityAgent.compliance.checkInterval -- Compliance check run interval checkInterval: 20m # datadog.securityAgent.compliance.containerInclude -- Include containers in CSPM monitoring, as a space-separated list. # If a container matches an include rule, it’s always included ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: # DEPRECATED. Use datadog.securityAgent.compliance.host_benchmarks.enabled instead. xccdf: enabled: false # datadog.securityAgent.compliance.host_benchmarks.enabled -- Set to false to disable host benchmarks. If enabled, this feature requires 160 MB extra memory for the `security-agent` container. (Requires Agent 7.47.0+) host_benchmarks: enabled: true # datadog.securityAgent.compliance.runInSystemProbe -- Set to true to run compliance checks in system-probe instead of security-agent. # When enabled in conjunction with datadog.securityAgent.runtime.directSendFromSystemProbe, the security-agent container will not be created. runInSystemProbe: false runtime: # datadog.securityAgent.runtime.enabled -- Set to true to enable Cloud Workload Security (CWS) enabled: false # datadog.securityAgent.runtime.fimEnabled -- Set to true to enable Cloud Workload Security (CWS) File Integrity Monitoring # DEPRECATED. This option has no effect. Cloud Workload Security is now only controlled by datadog.securityAgent.runtime.enabled. fimEnabled: false # datadog.securityAgent.runtime.useSecruntimeTrack -- Set to true to send Cloud Workload Security (CWS) events directly to the Agent events explorer. This value shouldn't be changed unless advised by Datadog support. useSecruntimeTrack: true # datadog.securityAgent.runtime.directSendFromSystemProbe -- Set to true to enable direct sending of CWS events from system-probe to Datadog, bypassing security-agent. # When enabled, the security-agent container will not be created for CWS functionality (it may still be created if compliance features are enabled). directSendFromSystemProbe: false ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers containerExclude: # "image:datadog/agent" # datadog.securityAgent.runtime.containerInclude -- Include containers in runtime security monitoring, as a space-separated list. # If a container matches an include rule, it’s always included ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: policies: # datadog.securityAgent.runtime.policies.configMap -- Contains CWS policies that will be used configMap: syscallMonitor: # datadog.securityAgent.runtime.syscallMonitor.enabled -- Set to true to enable the Syscall monitoring (recommended for troubleshooting only) enabled: false network: # datadog.securityAgent.runtime.network.enabled -- Set to true to enable the collection of CWS network events enabled: true activityDump: # datadog.securityAgent.runtime.activityDump.enabled -- Set to true to enable the collection of CWS activity dumps enabled: true # datadog.securityAgent.runtime.activityDump.tracedCgroupsCount -- Set to the number of containers that should be traced concurrently tracedCgroupsCount: 3 # datadog.securityAgent.runtime.activityDump.cgroupDumpTimeout -- Set to the desired duration of a single container tracing (in minutes) cgroupDumpTimeout: 20 # datadog.securityAgent.runtime.activityDump.cgroupWaitListSize -- Set to the size of the wait list for already traced containers cgroupWaitListSize: 0 pathMerge: # datadog.securityAgent.runtime.activityDump.pathMerge.enabled -- Set to true to enable the merging of similar paths enabled: false securityProfile: # datadog.securityAgent.runtime.securityProfile.enabled -- Set to true to enable CWS runtime security profiles enabled: true anomalyDetection: # datadog.securityAgent.runtime.securityProfile.anomalyDetection.enabled -- Set to true to enable CWS runtime drift events enabled: true autoSuppression: # datadog.securityAgent.runtime.securityProfile.autoSuppression.enabled -- Set to true to enable CWS runtime auto suppression enabled: true enforcement: # datadog.securityAgent.runtime.enforcement.enabled -- Set to false to disable CWS runtime enforcement enabled: true ## Manage NetworkPolicy networkPolicy: # datadog.networkPolicy.create -- If true, create NetworkPolicy for all the components create: false # datadog.networkPolicy.flavor -- Flavor of the network policy to use. # Can be: # * kubernetes for networking.k8s.io/v1/NetworkPolicy # * cilium for cilium.io/v2/CiliumNetworkPolicy flavor: kubernetes cilium: # datadog.networkPolicy.cilium.dnsSelector -- Cilium selector of the DNS server entity # @default -- kube-dns in namespace kube-system dnsSelector: toEndpoints: - matchLabels: "k8s:io.kubernetes.pod.namespace": kube-system "k8s:k8s-app": kube-dns ## Configure prometheus scraping autodiscovery ## ref: https://docs.datadoghq.com/agent/kubernetes/prometheus/ prometheusScrape: # datadog.prometheusScrape.enabled -- Enable autodiscovering pods and services exposing prometheus metrics. enabled: false # datadog.prometheusScrape.serviceEndpoints -- Enable generating dedicated checks for service endpoints. serviceEndpoints: false # datadog.prometheusScrape.additionalConfigs -- Allows adding advanced openmetrics check configurations with custom discovery rules. (Requires Agent version 7.27+) additionalConfigs: [] # - # autodiscovery: # kubernetes_annotations: # include: # custom_include_label: 'true' # exclude: # custom_exclude_label: 'true' # kubernetes_container_names: # - my-app # configurations: # - send_distribution_buckets: true # timeout: 5 # datadog.prometheusScrape.version -- Version of the openmetrics check to schedule by default. # See https://datadoghq.dev/integrations-core/legacy/prometheus/#config-changes-between-versions for the differences between the two versions. # (Version 2 requires Agent version 7.34+) version: 2 # datadog.ignoreAutoConfig -- List of integration to ignore auto_conf.yaml. ## ref: https://docs.datadoghq.com/agent/faq/auto_conf/ ignoreAutoConfig: [] # - redisdb # - kubernetes_state # datadog.containerExclude -- Exclude containers from Agent Autodiscovery, as a space-separated list ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers containerExclude: # "image:datadog/agent" # datadog.containerInclude -- Include containers in Agent Autodiscovery, as a space-separated list. # If a container matches an include rule, it’s always included in Autodiscovery ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: # datadog.containerExcludeLogs -- Exclude logs from Agent Autodiscovery, as a space-separated list containerExcludeLogs: # datadog.containerIncludeLogs -- Include logs in Agent Autodiscovery, as a space-separated list containerIncludeLogs: # datadog.containerExcludeMetrics -- Exclude metrics from Agent Autodiscovery, as a space-separated list containerExcludeMetrics: # datadog.containerIncludeMetrics -- Include metrics in Agent Autodiscovery, as a space-separated list containerIncludeMetrics: # datadog.celWorkloadExclude -- Exclude workloads using a CEL-based definition in the Agent. (Requires Agent 7.73.0+) # ref: https://docs.datadoghq.com/containers/guide/container-discovery-management/ celWorkloadExclude: # datadog.excludePauseContainer -- Exclude pause containers from Agent Autodiscovery. ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#pause-containers excludePauseContainer: true containerLifecycle: # datadog.containerLifecycle.enabled -- Enable container lifecycle events collection enabled: true csi: # datadog.csi.enabled -- Enable datadog csi driver # Requires version 7.67 or later of the cluster agent # Note: # - When set to true, the CSI driver subchart will be installed automatically. # - Do not install the CSI driver separately if this is enabled, or you may hit conflicts. enabled: false dataPlane: # datadog.dataPlane.enabled -- Whether or not the data plane is enabled # # Requires version 7.74 or later of the Datadog Agent. # # The data plane feature is currently in preview. Please reach out to your Datadog representative for more information. enabled: false dogstatsd: # datadog.dataPlane.dogstatsd.enabled -- Whether or not DogStatsD is enabled in the data plane enabled: true ## Datadog Operator ## * Enable the Datadog Operator chart dependency. ## * Configure the Datadog Operator sub-chart using the values config, `operator`. ## For all available Operator chart options see: https://github.com/DataDog/helm-charts/blob/main/charts/datadog-operator/values.yaml operator: # datadog.operator.enabled -- Enable the Datadog Operator. enabled: true # datadog.operator.migration.enabled -- Enable migration of Agent workloads to be managed by the Datadog Operator. # Creates a DatadogAgent manifest based on current release's values.yaml. migration: enabled: false # datadog.operator.migration.preview -- Set to true to preview the DatadogAgent manifest mapped from the # Helm release's values.yaml. Mapped DatadogAgent manifest can be viewed by checking the `dda-mapper` # container logs in the migration job. preview: false # datadog.operator.migration.userValues -- Provide datadog chart values as a YAML string to be mapped to the DatadogAgent manifest. # Use --set-file to pass the file contents: helm install datadog ./charts/datadog --set-file datadog.operator.migration.userValues=myValues.yaml -f myValues.yaml userValues: "" # Configuration related to Dynamic Instrumentation for Go services. dynamicInstrumentationGo: # datadog.dynamicInstrumentationGo.enabled -- Enable Dynamic Instrumentation and Live Debugger for Go services. enabled: false # Configuration related to Workload Autoscaling autoscaling: workload: # datadog.autoscaling.workload.enabled -- Enable Workload Autoscaling. enabled: ## This is the Datadog Cluster Agent implementation that handles cluster-wide ## metrics more cleanly, separates concerns for better rbac, and implements ## the external metrics API so you can autoscale HPAs based on datadog metrics ## ref: https://docs.datadoghq.com/agent/kubernetes/cluster/ clusterAgent: # clusterAgent.enabled -- Set this to false to disable Datadog Cluster Agent enabled: true # clusterAgent.shareProcessNamespace -- Set the process namespace sharing on the Datadog Cluster Agent shareProcessNamespace: false ## Define the Datadog Cluster-Agent image to work with image: # clusterAgent.image.name -- Cluster Agent image name to use (relative to `registry`) name: cluster-agent # clusterAgent.image.tag -- Cluster Agent image tag to use tag: 7.78.0 # clusterAgent.image.digest -- Cluster Agent image digest to use, takes precedence over tag if specified digest: "" # clusterAgent.image.repository -- Override default registry + image.name for Cluster Agent repository: # clusterAgent.image.pullPolicy -- Cluster Agent image pullPolicy pullPolicy: IfNotPresent # clusterAgent.image.pullSecrets -- Cluster Agent repository pullSecret (ex: specify docker registry credentials) ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod pullSecrets: [] # - name: "" # clusterAgent.image.doNotCheckTag -- Skip the version and chart compatibility check ## By default, the version passed in clusterAgent.image.tag is checked ## for compatibility with the version of the chart. ## This boolean permits completely skipping this check. ## This is useful, for example, for custom tags that are not ## respecting semantic versioning. doNotCheckTag: # false # clusterAgent.securityContext -- Allows you to overwrite the default PodSecurityContext on the cluster-agent pods. securityContext: {} containers: clusterAgent: # clusterAgent.containers.clusterAgent.securityContext -- Specify securityContext on the cluster-agent container. securityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true initContainers: # clusterAgent.containers.initContainers.securityContext -- Specify securityContext on the initContainers. securityContext: {} # clusterAgent.containers.initContainers.resources -- Resource requests and limits for the Cluster Agent init containers resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # clusterAgent.command -- Command to run in the Cluster Agent container as entrypoint command: [] # clusterAgent.token -- Cluster Agent token is a preshared key between node agents and cluster agent (autogenerated if empty, needs to be at least 32 characters a-zA-z) token: "" # clusterAgent.tokenExistingSecret -- Existing secret name to use for Cluster Agent token. Put the Cluster Agent token in a key named `token` inside the Secret tokenExistingSecret: "" # clusterAgent.replicas -- Specify the of cluster agent replicas, if > 1 it allow the cluster agent to work in HA mode. replicas: 1 # clusterAgent.revisionHistoryLimit -- The number of old ReplicaSets to keep in this Deployment. revisionHistoryLimit: 10 ## Provide Cluster Agent Deployment pod(s) RBAC configuration rbac: # clusterAgent.rbac.create -- If true, create & use RBAC resources create: true # clusterAgent.rbac.flareAdditionalPermissions -- If true, add Secrets and Configmaps get/list permissions to retrieve user Datadog Helm values from Cluster Agent namespace flareAdditionalPermissions: true # clusterAgent.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterAgent.rbac.create is false serviceAccountName: default # clusterAgent.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterAgent.rbac.create is true serviceAccountAnnotations: {} # clusterAgent.rbac.serviceAccountAdditionalLabels -- Labels to add to the ServiceAccount if clusterAgent.rbac.create is true serviceAccountAdditionalLabels: {} # clusterAgent.rbac.automountServiceAccountToken -- If true, automatically mount the ServiceAccount's API credentials if clusterAgent.rbac.create is true automountServiceAccountToken: true ## Provide Cluster Agent pod security configuration podSecurity: podSecurityPolicy: # clusterAgent.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Cluster Agent pods create: false securityContextConstraints: # clusterAgent.podSecurity.securityContextConstraints.create -- If true, create a SCC resource for Cluster Agent pods create: false # Enable the metricsProvider to be able to scale based on metrics in Datadog metricsProvider: # clusterAgent.metricsProvider.enabled -- Set this to true to enable Metrics Provider enabled: false # clusterAgent.metricsProvider.registerAPIService -- Set this to false to disable external metrics registration as an APIService registerAPIService: true # clusterAgent.metricsProvider.wpaController -- Enable informer and controller of the watermark pod autoscaler ## Note: You need to install the `WatermarkPodAutoscaler` CRD before wpaController: false # clusterAgent.metricsProvider.useDatadogMetrics -- Enable usage of DatadogMetric CRD to autoscale on arbitrary Datadog queries ## Note: It will install DatadogMetrics CRD automatically (it may conflict with previous installations) useDatadogMetrics: false # clusterAgent.metricsProvider.createReaderRbac -- Create `external-metrics-reader` RBAC automatically (to allow HPA to read data from Cluster Agent) createReaderRbac: true # clusterAgent.metricsProvider.aggregator -- Define the aggregator the cluster agent will use to process the metrics. The options are (avg, min, max, sum) aggregator: avg ## Configuration for the service for the cluster-agent metrics server service: # clusterAgent.metricsProvider.service.type -- Set type of cluster-agent metrics server service type: ClusterIP # clusterAgent.metricsProvider.service.port -- Set port of cluster-agent metrics server service (Kubernetes >= 1.15) port: 8443 # clusterAgent.metricsProvider.endpoint -- Override the external metrics provider endpoint. If not set, the cluster-agent defaults to `datadog.site` endpoint: # https://api.datadoghq.com # clusterAgent.env -- Set environment variables specific to Cluster Agent ## The Cluster-Agent supports many additional environment variables ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options env: [] # clusterAgent.envFrom -- Set environment variables specific to Cluster Agent from configMaps and/or secrets ## The Cluster-Agent supports many additional environment variables ## ref: https://docs.datadoghq.com/agent/cluster_agent/commands/#cluster-agent-options envFrom: [] # - configMapRef: # name: # - secretRef: # name: # clusterAgent.envDict -- Set environment variables specific to Cluster Agent defined in a dict envDict: {} # : admissionController: # clusterAgent.admissionController.enabled -- Enable the admissionController to be able to inject APM/Dogstatsd config and standard tags (env, service, version) automatically into your pods enabled: true # clusterAgent.admissionController.validation -- Validation Webhook configuration options validation: # clusterAgent.admissionController.validation.enabled -- Enabled enables the Admission Controller validation webhook. Default: true. (Requires Agent 7.59.0+). enabled: true # clusterAgent.admissionController.mutation -- Mutation Webhook configuration options mutation: # clusterAgent.admissionController.mutation.enabled -- Enabled enables the Admission Controller mutation webhook. Default: true. (Requires Agent 7.59.0+). enabled: true # clusterAgent.admissionController.webhookName -- Name of the validatingwebhookconfiguration and mutatingwebhookconfiguration created by the cluster-agent webhookName: datadog-webhook # clusterAgent.admissionController.mutateUnlabelled -- Enable injecting config without having the pod label 'admission.datadoghq.com/enabled="true"' mutateUnlabelled: false # clusterAgent.admissionController.configMode -- The kind of configuration to be injected, it can be "hostip", "service", "socket" or "csi". ## If clusterAgent.admissionController.configMode is not set: ## * and datadog.apm.socketEnabled is true, the Admission Controller uses socket. ## * and datadog.apm.portEnabled is true, the Admission Controller uses hostip. ## * and datadog.apm.useLocalService is true and the aformentioned two are false, the Admission Controller uses service. ## * Otherwise, the Admission Controller defaults to hostip. ## Note: "service" mode relies on the internal traffic service to target the agent running on the local node (requires Kubernetes v1.22+). ## Note: "csi" mode requires enabling csi with `datadog.csi.enabled`. If not set, the admission controller will fallback to "socket" mode. ## Note: "csi" mode requires version 7.65 or later of the cluster agent. ## ref: https://docs.datadoghq.com/agent/cluster_agent/admission_controller/#configure-apm-and-dogstatsd-communication-mode configMode: # "hostip", "socket", "csi" or "service" # clusterAgent.admissionController.failurePolicy -- Set the failure policy for dynamic admission control.' ## The default of Ignore means that pods will still be admitted even if the webhook is unavailable to inject them. ## Setting to Fail will require the admission controller to be present and pods to be injected before they are allowed to run. failurePolicy: Ignore # clusterAgent.admissionController.containerRegistry -- Override the default registry for the admission controller. ## The clusterAgent uses this configuration for apm.instrumentation, agentSidecar, and cwsInstrumentation, if ## not otherwise specified. containerRegistry: remoteInstrumentation: # clusterAgent.admissionController.remoteInstrumentation.enabled -- Enable polling and applying library injection using Remote Config. ## This feature is in beta, and enables Remote Config in the Cluster Agent. It also requires Cluster Agent version 7.43+. ## Enabling this feature grants the Cluster Agent the permissions to patch Deployment objects in the cluster. enabled: false # clusterAgent.admissionController.port -- Set port of cluster-agent admission controller service port: 8000 cwsInstrumentation: # clusterAgent.admissionController.cwsInstrumentation.enabled -- Enable the CWS Instrumentation admission controller endpoint. enabled: false # clusterAgent.admissionController.cwsInstrumentation.mode -- Mode defines how the CWS Instrumentation should behave. # Options are "remote_copy" or "init_container" mode: remote_copy kubernetesAdmissionEvents: # clusterAgent.admissionController.kubernetesAdmissionEvents.enabled -- Enable the Kubernetes Admission Events feature. enabled: false probe: # clusterAgent.admissionController.probe.enabled -- Enable the admission controller connectivity probe. ## The probe periodically sends dry-run ConfigMap creation requests to verify the webhook is reachable from the API server. ## (Requires Cluster Agent 7.78.0+). enabled: false # clusterAgent.admissionController.probe.interval -- Seconds between probe executions. interval: 60 # clusterAgent.admissionController.probe.gracePeriod -- Seconds to wait at startup before the first probe. gracePeriod: 60 agentSidecarInjection: # clusterAgent.admissionController.agentSidecarInjection.enabled -- Enables Datadog Agent sidecar injection. ## When enabled, the admission controller mutating webhook will inject an Agent sidecar with minimal configuration in every pod meeting the configured criteria. enabled: false # clusterAgent.admissionController.agentSidecarInjection.provider -- Used by the admission controller to add infrastructure provider-specific configurations to the Agent sidecar. ## Currently only "fargate" is supported. To use the feature in other environments (including local testing) omit the config. ## ref: https://docs.datadoghq.com/integrations/eks_fargate provider: # clusterAgent.admissionController.agentSidecarInjection.clusterAgentCommunicationEnabled -- Enable communication between Agent sidecars and the Cluster Agent. clusterAgentCommunicationEnabled: true # clusterAgent.admissionController.agentSidecarInjection.clusterAgentTlsVerification -- TLS verification configuration for sidecar-to-cluster-agent communication. clusterAgentTlsVerification: # clusterAgent.admissionController.agentSidecarInjection.clusterAgentTlsVerification.enabled -- Enable TLS verification for Agent sidecars communicating with the Cluster Agent. enabled: false # clusterAgent.admissionController.agentSidecarInjection.clusterAgentTlsVerification.copyCaConfigMap -- Enable automatic creation of a ConfigMap containing the Cluster Agent's CA certificate in namespaces where sidecar injection occurs. copyCaConfigMap: false # clusterAgent.admissionController.agentSidecarInjection.containerRegistry -- Override the default registry for the sidecar Agent. containerRegistry: # clusterAgent.admissionController.imageName -- Override the default agents.image.name for the Agent sidecar. imageName: # clusterAgent.admissionController.imageTag -- Override the default agents.image.tag for the Agent sidecar. imageTag: # clusterAgent.admissionController.agentSidecarInjection.selectors -- Defines the pod selector for sidecar injection, currently only one rule is supported. selectors: [] # - objectSelector: # matchLabels: # "podlabelKey1": podlabelValue1 # "podlabelKey2": podlabelValue2 # namespaceSelector: # matchLabels: # "nsLabelKey1": nsLabelValue1 # "nsLabelKey2": nsLabelValue2 # clusterAgent.admissionController.agentSidecarInjection.profiles -- Defines the sidecar configuration override, currently only one profile is supported. ## This setting allows overriding the sidecar Agent configuration by adding environment variables and providing resource settings. profiles: [] # - env: # - name: DD_ORCHESTRATOR_EXPLORER_ENABLED # value: "true" # resources: # requests: # cpu: "1" # memory: "512Mi" # limits: # cpu: "2" # memory: "1024Mi" # clusterAgent.confd -- Provide additional cluster check configurations. Each key will become a file in /conf.d. ## ref: https://docs.datadoghq.com/agent/autodiscovery/ confd: {} # mysql.yaml: |- # cluster_check: true # instances: # - host: # port: 3306 # username: datadog # password: # clusterAgent.advancedConfd -- Provide additional cluster check configurations. Each key is an integration containing several config files. ## ref: https://docs.datadoghq.com/agent/autodiscovery/ advancedConfd: {} # mysql.d: # 1.yaml: |- # cluster_check: true # instances: # - host: # port: 3306 # username: datadog # password: # 2.yaml: |- # cluster_check: true # instances: # - host: # port: 3306 # username: datadog # password: ## clusterAgent.kubernetesApiserverCheck -- correspond to options for configuring the kube_apiserver integration. kubernetesApiserverCheck: # clusterAgent.kubernetesApiserverCheck.disableUseComponentStatus -- Set this to true to disable use_component_status for the kube_apiserver integration. disableUseComponentStatus: false # clusterAgent.resources -- Datadog cluster-agent resource requests and limits. resources: {} # requests: # cpu: 200m # memory: 256Mi # limits: # cpu: 200m # memory: 256Mi # clusterAgent.priorityClassName -- Name of the priorityClass to apply to the Cluster Agent priorityClassName: # system-cluster-critical # clusterAgent.nodeSelector -- Allow the Cluster Agent Deployment to be scheduled on selected nodes ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # clusterAgent.tolerations -- Allow the Cluster Agent Deployment to schedule on tainted nodes ((requires Kubernetes >= 1.6)) ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # clusterAgent.affinity -- Allow the Cluster Agent Deployment to schedule using affinity rules ## By default, Cluster Agent Deployment Pods are forced to run on different Nodes. ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # clusterAgent.topologySpreadConstraints -- Allow the Cluster Agent Deployment to schedule using pod topology spreading ## By default, no constraints are set, allowing cluster defaults to be used for scheduling ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ topologySpreadConstraints: [] # clusterAgent.healthPort -- Port number to use in the Cluster Agent for the healthz endpoint healthPort: 5556 privateActionRunner: # clusterAgent.privateActionRunner.enabled -- Enable the Private Action Runner to execute workflow actions enabled: false # clusterAgent.privateActionRunner.selfEnroll -- Enable self-enrollment for the Private Action Runner ## When enabled, the runner will automatically register itself with Datadog using the provided API/APP keys ## and store its identity in a Kubernetes secret. Requires leader election to be enabled. selfEnroll: true # clusterAgent.privateActionRunner.identitySecretName -- Name of the Kubernetes secret used to store PAR identity when self-enrollment is enabled ## The Cluster Agent will create and manage this secret for storing the enrolled runner's URN and private key ## RBAC permissions are granted specifically for this secret name identitySecretName: "datadog-private-action-runner-identity" # clusterAgent.privateActionRunner.urn -- URN of the Private Action Runner (required if selfEnroll is false) ## Format: urn:datadog:private-action-runner:organization::runner: urn: # "urn:datadog:private-action-runner:organization:123456:runner:abc-def" # clusterAgent.privateActionRunner.privateKey -- Private key for the Private Action Runner (required if selfEnroll is false) ## This key is used to authenticate the runner with Datadog privateKey: # "" # clusterAgent.privateActionRunner.identityFromExistingSecret -- Use existing Secret which stores the Private Action Runner URN and private key ## The secret should contain 'urn' and 'private_key' keys ## If set, this parameter takes precedence over "urn" and "privateKey" identityFromExistingSecret: # "" # clusterAgent.privateActionRunner.actionsAllowlist -- List of actions executable by the Private Action Runner actionsAllowlist: [] # - "com.datadoghq.http.request" # - "com.datadoghq.kubernetes.core.*" # clusterAgent.privateActionRunner.k8sRemediationEnabled -- Enable k8s remediation RBAC for the Private Action Runner ## When enabled, a ClusterRole and ClusterRoleBinding are created granting the Cluster Agent ## permissions to read/patch workloads (Deployments, DaemonSets, StatefulSets, ReplicaSets, Pods) ## and manage ConfigMaps and Events cluster-wide. k8sRemediationEnabled: false # clusterAgent.livenessProbe -- Override default Cluster Agent liveness probe settings # @default -- Every 15s / 6 KO / 1 OK livenessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterAgent.readinessProbe -- Override default Cluster Agent readiness probe settings # @default -- Every 15s / 6 KO / 1 OK readinessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterAgent.startupProbe -- Override default Cluster Agent startup probe settings # @default -- Every 15s / 6 KO / 1 OK startupProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterAgent.strategy -- Allow the Cluster Agent deployment to perform a rolling update on helm update ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 # clusterAgent.deploymentAnnotations -- Annotations to add to the cluster-agents's deployment deploymentAnnotations: {} # key: "value" # clusterAgent.podAnnotations -- Annotations to add to the cluster-agents's pod(s) podAnnotations: {} # key: "value" # clusterAgent.useHostNetwork -- Bind ports on the hostNetwork ## Useful for CNI networking where hostPort might ## not be supported. The ports need to be available on all hosts. It can be ## used for custom metrics instead of a service endpoint. ## ## WARNING: Make sure that hosts using this are properly firewalled otherwise ## metrics and traces are accepted from any host able to connect to this host. # useHostNetwork: false # clusterAgent.dnsConfig -- Specify dns configuration options for datadog cluster agent containers e.g ndots ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config dnsConfig: {} # options: # - name: ndots # value: "1" # clusterAgent.volumes -- Specify additional volumes to mount in the cluster-agent container volumes: [] # - hostPath: # path: # name: # clusterAgent.volumeMounts -- Specify additional volumes to mount in the cluster-agent container volumeMounts: [] # - name: # mountPath: # readOnly: true # clusterAgent.datadog_cluster_yaml -- Specify custom contents for the datadog cluster agent config (datadog-cluster.yaml) datadog_cluster_yaml: {} # clusterAgent.createPodDisruptionBudget -- Create pod disruption budget for Cluster Agent deployments # DEPRECATED. Use clusterAgent.pdb.create instead createPodDisruptionBudget: false pdb: # clusterAgent.pdb.create -- Enable pod disruption budget for Cluster Agent deployments. ## Only one of `minAvailable` or `maxUnavailable` can be set. More information: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## By default, minAvailable is set to 1 for cluster agent. create: false # clusterAgent.pdb.minAvailable -- Minimum number of pods that must remain available during a disruption -- default to 1 minAvailable: # clusterAgent.pdb.maxUnavailable -- Maximum number of pods that can be unavailable during a disruption maxUnavailable: networkPolicy: # clusterAgent.networkPolicy.create -- If true, create a NetworkPolicy for the cluster agent. # DEPRECATED. Use datadog.networkPolicy.create instead create: false # clusterAgent.additionalLabels -- Adds labels to the Cluster Agent deployment and pods additionalLabels: {} # key: "value" # clusterAgent.containerExclude -- Exclude containers from the Cluster Agent # Autodiscovery, as a space-separated list. (Requires Agent/Cluster Agent 7.50.0+) ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#exclude-containers containerExclude: # "image:datadog/agent" # clusterAgent.containerInclude -- Include containers in the Cluster Agent Autodiscovery, # as a space-separated list. If a container matches an include rule, it’s # always included in the Autodiscovery. (Requires Agent/Cluster Agent 7.50.0+) ## ref: https://docs.datadoghq.com/agent/guide/autodiscovery-management/?tab=containerizedagent#include-containers containerInclude: # clusterAgent.celWorkloadExclude -- Exclude workloads using a CEL-based definition in the Cluster Agent. (Requires Agent 7.73.0+) # ref: https://docs.datadoghq.com/containers/guide/container-discovery-management/ celWorkloadExclude: ## This section lets you configure the agents deployed by this chart to connect to a Cluster Agent ## deployed independently existingClusterAgent: # existingClusterAgent.join -- set this to true if you want the agents deployed by this chart to # connect to a Cluster Agent deployed independently join: false # existingClusterAgent.tokenSecretName -- Existing secret name to use for external Cluster Agent token tokenSecretName: # # existingClusterAgent.serviceName -- Existing service name to use for reaching the external Cluster Agent serviceName: # # existingClusterAgent.clusterchecksEnabled -- set this to false if you don’t want the agents to run the cluster checks of the joined external cluster agent clusterchecksEnabled: true # useFIPSAgent -- Setting useFIPSAgent to true makes the helm chart use Agent images that are FIPS-compliant for use in GOVCLOUD environments. # Setting this to true disables the fips-proxy sidecar and is the recommended method for enabling FIPS compliance. useFIPSAgent: false ## fips is used to enable and configure the fips-proxy sidecar. fips: # fips.enabled -- Enable fips proxy sidecar. # The fips-proxy method is getting phased out in favor of FIPS-compliant images (refer to the `useFIPSAgent` setting). enabled: false # TODO: Option to override config of the FIPS side car: /etc/datadog-fips-proxy/datadog-fips-proxy.cfg # customConfig: false # fips.port -- Specifies which port is used by the containers to communicate to the FIPS sidecar. # This setting is only used for the fips-proxy sidecar. port: 9803 # fips.portRange -- Specifies the number of ports used, defaults to 13 https://github.com/DataDog/datadog-agent/blob/7.44.x/pkg/config/config.go#L1564-L1577. # This setting is only used for the fips-proxy sidecar. portRange: 15 # fips.use_https -- Option to enable https. # This setting is only used for the fips-proxy sidecar. use_https: false # fips.resources -- Resource requests and limits for the FIPS sidecar container. # This setting is only used for the fips-proxy sidecar. resources: {} # limits: # cpu: 100m # memory: 256Mi # requests: # cpu: 20m # memory: 64Mi # fips.local_address -- Set local IP address. # This setting is only used for the fips-proxy sidecar. local_address: "127.0.0.1" ## Define the Datadog image to work with image: ## fips.image.name -- Define the FIPS sidecar container image name. name: fips-proxy # fips.image.tag -- Define the FIPS sidecar container version to use. tag: 1.1.23 # fips.image.pullPolicy -- Datadog the FIPS sidecar image pull policy pullPolicy: IfNotPresent # fips.image.digest -- Define the FIPS sidecar image digest to use, takes precedence over `fips.image.tag` if specified. digest: "" # fips.image.repository -- Override default registry + image.name for the FIPS sidecar container. repository: # fips.customFipsConfig -- Configure a custom configMap to provide the FIPS configuration. Specify custom contents for the FIPS proxy sidecar container config (/etc/datadog-fips-proxy/datadog-fips-proxy.cfg). If empty, the default FIPS proxy sidecar container config is used. ## Note: Use `|` to declare multi-line configuration. ## ref: https://docs.datadoghq.com/agent/guide/agent-fips-proxy customFipsConfig: {} # | # foobar # foo bar baz agents: # agents.enabled -- You should keep Datadog DaemonSet enabled! ## The exceptional case could be a situation when you need to run ## single Datadog pod per every namespace, but you do not need to ## re-create a DaemonSet for every non-default namespace install. ## Note: StatsD and DogStatsD work over UDP, so you may not ## get guaranteed delivery of the metrics in Datadog-per-namespace setup! enabled: true # agents.shareProcessNamespace -- Set the process namespace sharing on the Datadog Daemonset shareProcessNamespace: false # agents.revisionHistoryLimit -- The number of ControllerRevision to keep in this DaemonSet. revisionHistoryLimit: 10 ## Define the Datadog image to work with image: # agents.image.name -- Datadog Agent image name to use (relative to `registry`) ## use "dogstatsd" for Standalone Datadog Agent DogStatsD 7 name: agent # agents.image.tag -- Define the Agent version to use tag: 7.78.0 # agents.image.digest -- Define Agent image digest to use, takes precedence over tag if specified digest: "" # agents.image.tagSuffix -- Suffix to append to Agent tag ## Ex: ## jmx to enable jmx fetch collection ## servercore to get Windows images based on servercore ## full to get as many features as possible, currently ddot-collector and jmx (e.g. 7.67.0-full) tagSuffix: "" # agents.image.repository -- Override default registry + image.name for Agent repository: # agents.image.doNotCheckTag -- Skip the version and chart compatibility check ## By default, the version passed in agents.image.tag is checked ## for compatibility with the version of the chart. ## This boolean permits to completely skip this check. ## This is useful, for example, for custom tags that are not ## respecting semantic versioning doNotCheckTag: # false # agents.image.pullPolicy -- Datadog Agent image pull policy pullPolicy: IfNotPresent # agents.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod pullSecrets: [] # - name: "" ## Provide Daemonset RBAC configuration rbac: # agents.rbac.create -- If true, create & use RBAC resources create: true # agents.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if agents.rbac.create is false serviceAccountName: default # agents.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if agents.rbac.create is true serviceAccountAnnotations: {} # agents.rbac.serviceAccountAdditionalLabels -- Labels to add to the ServiceAccount if agents.rbac.create is true serviceAccountAdditionalLabels: {} # agents.rbac.automountServiceAccountToken -- If true, automatically mount the ServiceAccount's API credentials if agents.rbac.create is true automountServiceAccountToken: true ## Provide Daemonset PodSecurityPolicy configuration podSecurity: podSecurityPolicy: # agents.podSecurity.podSecurityPolicy.create -- If true, create a PodSecurityPolicy resource for Agent pods create: false securityContextConstraints: # agents.podSecurity.securityContextConstraints.create -- If true, create a SecurityContextConstraints resource for Agent pods create: false # agents.podSecurity.seLinuxContext -- Provide seLinuxContext configuration for PSP/SCC # @default -- Must run as spc_t seLinuxContext: rule: MustRunAs seLinuxOptions: user: system_u role: system_r type: spc_t level: s0 # agents.podSecurity.privileged -- If true, Allow to run privileged containers privileged: false # agents.podSecurity.capabilities -- Allowed capabilities ## note: capabilities must contain all agents.containers.*.securityContext.capabilities. capabilities: - SYS_ADMIN - SYS_RESOURCE - SYS_PTRACE - NET_ADMIN - NET_BROADCAST - NET_RAW - IPC_LOCK - CHOWN - AUDIT_CONTROL - AUDIT_READ - DAC_READ_SEARCH - MKNOD # agents.podSecurity.allowedUnsafeSysctls -- Allowed unsafe sysclts allowedUnsafeSysctls: [] # agents.podSecurity.volumes -- Allowed volumes types volumes: - configMap - downwardAPI - emptyDir - hostPath - secret # agents.podSecurity.seccompProfiles -- Allowed seccomp profiles seccompProfiles: - "runtime/default" - "localhost/system-probe" apparmor: # agents.podSecurity.apparmor.enabled -- If true, enable apparmor enforcement ## see: https://kubernetes.io/docs/tutorials/clusters/apparmor/ enabled: true # agents.podSecurity.apparmorProfiles -- Allowed apparmor profiles apparmorProfiles: - "runtime/default" - "unconfined" # agents.podSecurity.defaultApparmor -- Default AppArmor profile for all containers but system-probe defaultApparmor: runtime/default containers: agent: # agents.containers.agent.env -- Additional environment variables for the agent container env: [] # agents.containers.agent.envFrom -- Set environment variables specific to agent container from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.agent.envDict -- Set environment variables specific to agent container defined in a dict envDict: {} # : # agents.containers.agent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # agents.containers.agent.resources -- Resource requests and limits for the agent container. resources: {} # requests: # cpu: 200m # memory: 256Mi # limits: # cpu: 200m # memory: 256Mi # agents.containers.agent.healthPort -- Port number to use in the node agent for the healthz endpoint healthPort: 5555 # agents.containers.agent.livenessProbe -- Override default agent liveness probe settings # @default -- Every 15s / 6 KO / 1 OK livenessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # agents.containers.agent.readinessProbe -- Override default agent readiness probe settings # @default -- Every 15s / 6 KO / 1 OK readinessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # agents.containers.agent.startupProbe -- Override default agent startup probe settings # @default -- Every 15s / 6 KO / 1 OK startupProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # agents.containers.agent.securityContext -- Allows you to overwrite the default container SecurityContext for the agent container. securityContext: readOnlyRootFilesystem: true # agents.containers.agent.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] privateActionRunner: # agents.containers.privateActionRunner.env -- Additional environment variables for the private-action-runner container env: [] # agents.containers.privateActionRunner.envFrom -- Set environment variables specific to private-action-runner from configMaps and/or secrets envFrom: [] # agents.containers.privateActionRunner.envDict -- Set environment variables specific to private-action-runner defined in a dict envDict: {} # agents.containers.privateActionRunner.logLevel -- Set logging verbosity for the private-action-runner container logLevel: # agents.containers.privateActionRunner.resources -- Resource requests and limits for the private-action-runner container. resources: {} # requests: # cpu: 100m # memory: 128Mi # limits: # cpu: 100m # memory: 128Mi # agents.containers.privateActionRunner.securityContext -- Specify securityContext on the private-action-runner container. securityContext: readOnlyRootFilesystem: true capabilities: add: ["NET_RAW"] processAgent: # agents.containers.processAgent.env -- Additional environment variables for the process-agent container env: [] # agents.containers.processAgent.envFrom -- Set environment variables specific to process-agent from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.processAgent.envDict -- Set environment variables specific to process-agent defined in a dict envDict: {} # : # agents.containers.processAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # agents.containers.processAgent.resources -- Resource requests and limits for the process-agent container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.processAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the process-agent container. securityContext: readOnlyRootFilesystem: true # agents.containers.processAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] otelAgent: # agents.containers.otelAgent.env -- Additional environment variables for the otel-agent container env: [] # agents.containers.otelAgent.envFrom -- Set environment variables specific to otel-agent from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.otelAgent.envDict -- Set environment variables specific to otel-agent defined in a dict envDict: {} # : # agents.containers.otelAgent.resources -- Resource requests and limits for the otel-agent container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.otelAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the otel-agent container. securityContext: readOnlyRootFilesystem: true # agents.containers.otelAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] # agents.containers.otelAgent.volumeMounts -- Specify additional volumes to mount in the otel-agent container volumeMounts: [] # - name: # mountPath: # readOnly: true hostProfiler: # agents.containers.hostProfiler.env -- Additional environment variables for the host-profiler container env: [] # agents.containers.hostProfiler.envFrom -- Set environment variables specific to host-profiler from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.hostProfiler.envDict -- Set environment variables specific to host-profiler defined in a dict envDict: {} # : # agents.containers.hostProfiler.resources -- Resource requests and limits for the host-profiler container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.hostProfiler.securityContext -- Allows you to overwrite the default container SecurityContext for the host-profiler container. securityContext: readOnlyRootFilesystem: true privileged: true # agents.containers.hostProfiler.volumeMounts -- Specify additional volumes to mount in the host-profiler container volumeMounts: [] # - name: # mountPath: # readOnly: true traceAgent: # agents.containers.traceAgent.env -- Additional environment variables for the trace-agent container env: [] # agents.containers.traceAgent.envFrom -- Set environment variables specific to trace-agent from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.traceAgent.envDict -- Set environment variables specific to trace-agent defined in a dict envDict: {} # : # agents.containers.traceAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off logLevel: # INFO # agents.containers.traceAgent.resources -- Resource requests and limits for the trace-agent container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.traceAgent.livenessProbe -- Override default agent liveness probe settings # @default -- Every 15s livenessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 # agents.containers.traceAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the trace-agent container. securityContext: readOnlyRootFilesystem: true # agents.containers.traceAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] systemProbe: # agents.containers.systemProbe.env -- Additional environment variables for the system-probe container env: [] # agents.containers.systemProbe.envFrom -- Set environment variables specific to system-probe from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.systemProbe.envDict -- Set environment variables specific to system-probe defined in a dict envDict: {} # : # agents.containers.systemProbe.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # agents.containers.systemProbe.resources -- Resource requests and limits for the system-probe container resources: {} # requests: # cpu: 150m # memory: 200Mi # limits: # cpu: 300m # memory: 400Mi # agents.containers.systemProbe.securityContext -- Allows you to overwrite the default container SecurityContext for the system-probe container. ## agents.podSecurity.capabilities must reflect the changed made in securityContext.capabilities. securityContext: readOnlyRootFilesystem: true privileged: false capabilities: add: ["SYS_ADMIN", "SYS_RESOURCE", "SYS_PTRACE", "NET_ADMIN", "NET_BROADCAST", "NET_RAW", "IPC_LOCK", "CHOWN", "DAC_READ_SEARCH"] # agents.containers.systemProbe.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] securityAgent: # agents.containers.securityAgent.env -- Additional environment variables for the security-agent container env: [] # agents.containers.securityAgent.envFrom -- Set environment variables specific to security-agent from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.securityAgent.envDict -- Set environment variables specific to security-agent defined in a dict envDict: {} # : # agents.containers.securityAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # agents.containers.securityAgent.resources -- Resource requests and limits for the security-agent container resources: {} # requests: # cpu: 100m # memory: 300Mi # limits: # cpu: 100m # memory: 300Mi # agents.containers.securityAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the security-agent container. securityContext: readOnlyRootFilesystem: true # agents.containers.securityAgent.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] agentDataPlane: # agents.containers.agentDataPlane.env -- Additional environment variables for the agent-data-plane container env: [] # agents.containers.agentDataPlane.envFrom -- Set environment variables specific to agent-data-plane container from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # agents.containers.agentDataPlane.envDict -- Set environment variables specific to agent-data-plane container defined in a dict envDict: {} # : # agents.containers.agentDataPlane.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # agents.containers.agentDataPlane.resources -- Resource requests and limits for the agent-data-plane container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.agentDataPlane.unprivilegedApiPort -- Port for unprivileged API server, used primarily for health checks unprivilegedApiPort: 5100 # agents.containers.agentDataPlane.privilegedApiPort -- Port for privileged API server, used for lower-level operations that # can alter the state of the ADP process or expose internal information privilegedApiPort: 5101 # agents.containers.agentDataPlane.telemetryApiPort -- Port for telemetry API server, used for exposing internal # telemetry to be scraped by the Agent telemetryApiPort: 5102 # agents.containers.agentDataPlane.livenessProbe -- Override default agent-data-plane liveness probe settings # @default -- Every 5s / 12 KO / 1 OK livenessProbe: initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 12 # agents.containers.agentDataPlane.readinessProbe -- Override default agent-data-plane readiness probe settings # @default -- Every 5s / 12 KO / 1 OK readinessProbe: initialDelaySeconds: 5 periodSeconds: 5 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 12 # agents.containers.agentDataPlane.securityContext -- Allows you to overwrite the default container SecurityContext for the agent-data-plane container. securityContext: readOnlyRootFilesystem: true # agents.containers.agentDataPlane.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] initContainers: # agents.containers.initContainers.resources -- Resource requests and limits for the init containers resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # agents.containers.initContainers.securityContext -- Allows you to overwrite the default container SecurityContext for the init containers. securityContext: {} # agents.containers.initContainers.volumeMounts -- Specify additional volumes to mount for the init containers volumeMounts: [] # agents.volumes -- Specify additional volumes to mount in the dd-agent container volumes: [] # - hostPath: # path: # name: # agents.volumeMounts -- Specify additional volumes to mount in all containers of the agent pod volumeMounts: [] # - name: # mountPath: # readOnly: true # agents.useHostNetwork -- Bind ports on the hostNetwork ## Useful for CNI networking where hostPort might ## not be supported. The ports need to be available on all hosts. It Can be ## used for custom metrics instead of a service endpoint. ## ## WARNING: Make sure that hosts using this are properly firewalled otherwise ## metrics and traces are accepted from any host able to connect to this host. useHostNetwork: false # agents.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config dnsConfig: {} # options: # - name: ndots # value: "1" # agents.daemonsetAnnotations -- Annotations to add to the DaemonSet daemonsetAnnotations: {} # key: "value" # agents.podAnnotations -- Annotations to add to the DaemonSet's Pods podAnnotations: {} # key: "value" # agents.tolerations -- Allow the DaemonSet to schedule on tainted nodes (requires Kubernetes >= 1.6) tolerations: [] # agents.nodeSelector -- Allow the DaemonSet to schedule on selected nodes ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # agents.affinity -- Allow the DaemonSet to schedule using affinity rules ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # agents.updateStrategy -- Allow the DaemonSet to perform a rolling update on helm update ## ref: https://kubernetes.io/docs/tasks/manage-daemon/update-daemon-set/ updateStrategy: type: RollingUpdate rollingUpdate: maxUnavailable: "10%" # agents.priorityClassCreate -- Creates a priorityClass for the Datadog Agent's Daemonset pods. priorityClassCreate: false # agents.priorityClassName -- Sets PriorityClassName if defined priorityClassName: # agents.priorityPreemptionPolicyValue -- Set to "Never" to change the PriorityClass to non-preempting priorityPreemptionPolicyValue: PreemptLowerPriority # agents.priorityClassValue -- Value used to specify the priority of the scheduling of Datadog Agent's Daemonset pods. ## The PriorityClass uses PreemptLowerPriority. priorityClassValue: 1000000000 # agents.podLabels -- Sets podLabels if defined ## Note: These labels are also used as label selectors so they are immutable. podLabels: {} # agents.additionalLabels -- Adds labels to the Agent daemonset and pods additionalLabels: {} # key: "value" # agents.useConfigMap -- Configures a configmap to provide the agent configuration. Use this in combination with the `agents.customAgentConfig` parameter. useConfigMap: # false # agents.customAgentConfig -- Specify custom contents for the datadog agent config (datadog.yaml) ## ref: https://docs.datadoghq.com/agent/guide/agent-configuration-files/?tab=agentv6 ## ref: https://github.com/DataDog/datadog-agent/blob/main/pkg/config/config_template.yaml ## Note the `agents.useConfigMap` needs to be set to `true` for this parameter to be taken into account. customAgentConfig: {} # # # Enable java cgroup handling. Only one of those options should be enabled, # # depending on the agent version you are using along that chart. # # # agent version < 6.15 # # jmx_use_cgroup_memory_limit: true # # # agent version >= 6.15 # # jmx_use_container_support: true networkPolicy: # agents.networkPolicy.create -- If true, create a NetworkPolicy for the agents. # DEPRECATED. Use datadog.networkPolicy.create instead create: false localService: # agents.localService.overrideName -- Name of the internal traffic service to target the agent running on the local node overrideName: "" # agents.localService.forceLocalServiceEnabled -- Force the creation of the internal traffic policy service to target the agent running on the local node. # By default, the internal traffic service is created only on Kubernetes 1.22+ where the feature became beta and enabled by default. # This option allows to force the creation of the internal traffic service on kubernetes 1.21 where the feature was alpha and required a feature gate to be explicitly enabled. forceLocalServiceEnabled: false # agents.lifecycle -- Configure the lifecycle of the Agent. # Note: The `exec` lifecycle handler is not supported in GKE Autopilot. lifecycle: {} # preStop: # sleep: # seconds: 5 # exec: # command: ["/bin/sh", "-c", "sleep 70"] # postStart: # exec: # command: ["/bin/sh", "-c", "sleep 70"] # sleep: # seconds: 5 # agents.terminationGracePeriodSeconds -- (int) Configure the termination grace period for the Agent terminationGracePeriodSeconds: # 70 clusterChecksRunner: # clusterChecksRunner.enabled -- If true, deploys agent dedicated for running the Cluster Checks instead of running in the Daemonset's agents. ## If both clusterChecksRunner.enabled and datadog.kubeStateMetricsCore.enabled are true, consider enabling datadog.kubeStateMetricsCore.useClusterCheckRunners as well. ## If datadog.kubeStateMetricsCore.useClusterCheckRunners is enabled, it's recommended to enable this flag as well so all Cluster Checks run on Cluster Checks Runners instead of node agents. ## ref: https://docs.datadoghq.com/agent/autodiscovery/clusterchecks/ enabled: false remoteConfiguration: # clusterChecksRunner.remoteConfiguration.enabled -- Enable remote configuration on the Cluster Checks Runner. # Set to true to enable remote configuration on the Cluster Checks Runner. enabled: false ## Define the Datadog image to work with. image: # clusterChecksRunner.image.name -- Datadog Agent image name to use (relative to `registry`) name: agent # clusterChecksRunner.image.tag -- Define the Agent version to use tag: 7.78.0 # clusterChecksRunner.image.digest -- Define Agent image digest to use, takes precedence over tag if specified digest: "" # clusterChecksRunner.image.tagSuffix -- Suffix to append to Agent tag ## Ex: ## jmx to enable jmx fetch collection ## servercore to get Windows images based on servercore tagSuffix: "" # clusterChecksRunner.image.repository -- Override default registry + image.name for Cluster Check Runners repository: # clusterChecksRunner.image.pullPolicy -- Datadog Agent image pull policy pullPolicy: IfNotPresent # clusterChecksRunner.image.pullSecrets -- Datadog Agent repository pullSecret (ex: specify docker registry credentials) ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod pullSecrets: [] # - name: "" # clusterChecksRunner.createPodDisruptionBudget -- Create the pod disruption budget to apply to the cluster checks agents # DEPRECATED. Use clusterChecksRunner.pdb.create instead createPodDisruptionBudget: false pdb: # clusterChecksRunner.pdb.create -- Enable pod disruption budget for Cluster Checks Runner deployments. ## Only one of `minAvailable` or `maxUnavailable` can be set. More information: https://kubernetes.io/docs/tasks/run-application/configure-pdb/ ## By default, maxUnavailable is set to 1 for cluster checks runners. create: false # clusterChecksRunner.pdb.minAvailable -- Minimum number of pods that must remain available during a disruption minAvailable: # clusterChecksRunner.pdb.maxUnavailable -- Maximum number of pods that can be unavailable during a disruption maxUnavailable: # Provide Cluster Checks Deployment pods RBAC configuration rbac: # clusterChecksRunner.rbac.create -- If true, create & use RBAC resources create: true # clusterChecksRunner.rbac.dedicated -- If true, use a dedicated RBAC resource for the cluster checks agent(s) dedicated: false # clusterChecksRunner.rbac.serviceAccountAnnotations -- Annotations to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true serviceAccountAnnotations: {} # clusterChecksRunner.rbac.serviceAccountAdditionalLabels -- Labels to add to the ServiceAccount if clusterChecksRunner.rbac.dedicated is true serviceAccountAdditionalLabels: {} # clusterChecksRunner.rbac.automountServiceAccountToken -- If true, automatically mount the ServiceAccount's API credentials if clusterChecksRunner.rbac.create is true automountServiceAccountToken: true # clusterChecksRunner.rbac.serviceAccountName -- Specify a preexisting ServiceAccount to use if clusterChecksRunner.rbac.create is false serviceAccountName: default # clusterChecksRunner.replicas -- Number of Cluster Checks Runner instances ## If you want to deploy the clusterChecks agent in HA, keep at least clusterChecksRunner.replicas set to 2. ## And increase the clusterChecksRunner.replicas according to the number of Cluster Checks. replicas: 2 # clusterChecksRunner.revisionHistoryLimit -- The number of old ReplicaSets to keep in this Deployment. revisionHistoryLimit: 10 # clusterChecksRunner.resources -- Datadog clusterchecks-agent resource requests and limits. resources: {} # requests: # cpu: 200m # memory: 500Mi # limits: # cpu: 200m # memory: 500Mi # clusterChecksRunner.affinity -- Allow the ClusterChecks Deployment to schedule using affinity rules. ## By default, ClusterChecks Deployment Pods are preferred to run on different Nodes. ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # clusterChecksRunner.topologySpreadConstraints -- Allow the ClusterChecks Deployment to schedule using pod topology spreading ## By default, no constraints are set, allowing cluster defaults to be used for scheduling ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ topologySpreadConstraints: [] # clusterChecksRunner.strategy -- Allow the ClusterChecks deployment to perform a rolling update on helm update ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 # clusterChecksRunner.dnsConfig -- specify dns configuration options for datadog cluster agent containers e.g ndots ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config dnsConfig: {} # options: # - name: ndots # value: "1" # clusterChecksRunner.priorityClassName -- Name of the priorityClass to apply to the Cluster checks runners priorityClassName: # system-cluster-critical # clusterChecksRunner.nodeSelector -- Allow the ClusterChecks Deployment to schedule on selected nodes ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # clusterChecksRunner.tolerations -- Tolerations for pod assignment ## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ tolerations: [] # clusterChecksRunner.healthPort -- Port number to use in the Cluster Checks Runner for the healthz endpoint healthPort: 5557 # clusterChecksRunner.livenessProbe -- Override default agent liveness probe settings # @default -- Every 15s / 6 KO / 1 OK ## In case of issues with the probe, you can disable it with the ## following values, to allow easier investigating: # # livenessProbe: # exec: # command: ["/bin/true"] # livenessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterChecksRunner.readinessProbe -- Override default agent readiness probe settings # @default -- Every 15s / 6 KO / 1 OK ## In case of issues with the probe, you can disable it with the ## following values, to allow easier investigating: # # readinessProbe: # exec: # command: ["/bin/true"] # readinessProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterChecksRunner.startupProbe -- Override default agent startup probe settings # @default -- Every 15s / 6 KO / 1 OK ## In case of issues with the probe, you can disable it with the ## following values, to allow easier investigating: # # startupProbe: # exec: # command: ["/bin/true"] # startupProbe: initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # clusterChecksRunner.deploymentAnnotations -- Annotations to add to the cluster-checks-runner's Deployment deploymentAnnotations: {} # key: "value" # clusterChecksRunner.podAnnotations -- Annotations to add to the cluster-checks-runner's pod(s) podAnnotations: {} # key: "value" # clusterChecksRunner.env -- Environment variables specific to Cluster Checks Runner ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#environment-variables env: [] # - name: # value: # clusterChecksRunner.envFrom -- Set environment variables specific to Cluster Checks Runner from configMaps and/or secrets ## envFrom to pass configmaps or secrets as environment ## ref: https://github.com/DataDog/datadog-agent/tree/main/Dockerfiles/agent#environment-variables envFrom: [] # - configMapRef: # name: # - secretRef: # name: # clusterChecksRunner.envDict -- Set environment variables specific to Cluster Checks Runner defined in a dict envDict: {} # : # clusterChecksRunner.volumes -- Specify additional volumes to mount in the cluster checks container volumes: [] # - hostPath: # path: # name: # clusterChecksRunner.volumeMounts -- Specify additional volumes to mount in the cluster checks container volumeMounts: [] # - name: # mountPath: # readOnly: true networkPolicy: # clusterChecksRunner.networkPolicy.create -- If true, create a NetworkPolicy for the cluster checks runners. # DEPRECATED. Use datadog.networkPolicy.create instead create: false # clusterChecksRunner.additionalLabels -- Adds labels to the cluster checks runner deployment and pods additionalLabels: {} # key: "value" # clusterChecksRunner.securityContext -- Allows you to overwrite the default PodSecurityContext on the clusterchecks pods. securityContext: {} containers: agent: # clusterChecksRunner.containers.agent.securityContext -- Specify securityContext on the agent container securityContext: readOnlyRootFilesystem: true initContainers: # clusterChecksRunner.containers.initContainers.securityContext -- Specify securityContext on the init containers securityContext: {} # clusterChecksRunner.ports -- Allows to specify extra ports (hostPorts for instance) for this container ports: [] operator: image: # operator.image.tag -- Define the Datadog Operator version to use tag: 1.25.0 datadogAgent: # operator.datadogAgent.enabled -- Enables Datadog Agent controller enabled: true datadogAgentInternal: # operator.datadogAgentInternal.enabled -- Enables the Datadog Agent Internal controller enabled: false datadogDashboard: # operator.datadogDashboard.enabled -- Enables the Datadog Dashboard controller enabled: false datadogGenericResource: # operator.datadogGenericResource.enabled -- Enables the Datadog Generic Resource controller enabled: false datadogMonitor: # operator.datadogMonitor.enabled -- Enables the Datadog Monitor controller enabled: false datadogSLO: # operator.datadogSLO.enabled -- Enables the Datadog SLO controller enabled: false datadogCRDs: # operator.datadogCRDs.keepCrds -- Set to true to keep the CRDs when the helm chart is uninstalled. This must be set to true if datadog.operator.migration.enabled is set to true. keepCrds: false crds: # operator.datadogCRDs.crds.datadogAgents -- Set to true to deploy the DatadogAgents CRD datadogAgents: true # operator.datadogCRDs.crds.datadogMonitors -- Set to true to deploy the DatadogMonitors CRD datadogMonitors: true # operator.datadogCRDs.crds.datadogSLOs -- Set to true to deploy the DatadogSLO CRD datadogSLOs: true # operator.datadogCRDs.crds.datadogDashboards -- Set to true to deploy the DatadogDashboard CRD datadogDashboards: true # operator.datadogCRDs.crds.datadogGenericResources -- Set to true to deploy the DatadogGenericResource CRD datadogGenericResources: true # operator.datadogCRDs.crds.datadogMetrics -- Set to true to deploy the DatadogMetrics CRD datadogMetrics: false # operator.datadogCRDs.crds.datadogPodAutoscalers -- Set to true to deploy the DatadogPodAutoscalers CRD datadogPodAutoscalers: false # operator.datadogCRDs.crds.datadogAgentInternals -- Set to true to deploy the DatadogAgentInternals CRD datadogAgentInternals: false datadog-crds: crds: # datadog-crds.crds.datadogMetrics -- Set to true to deploy the DatadogMetrics CRD datadogMetrics: true # datadog-crds.crds.datadogPodAutoscalers -- Set to true to deploy the DatadogPodAutoscalers CRD datadogPodAutoscalers: true kube-state-metrics: # kube-state-metrics.image.repository -- Default kube-state-metrics image repository. image: repository: registry.k8s.io/kube-state-metrics/kube-state-metrics rbac: # kube-state-metrics.rbac.create -- If true, create & use RBAC resources create: true serviceAccount: # kube-state-metrics.serviceAccount.create -- If true, create ServiceAccount, require rbac kube-state-metrics.rbac.create true create: true # kube-state-metrics.serviceAccount.name -- The name of the ServiceAccount to use. ## If not set and create is true, a name is generated using the fullname template name: # kube-state-metrics.resources -- Resource requests and limits for the kube-state-metrics container. resources: {} # requests: # cpu: 200m # memory: 256Mi # limits: # cpu: 200m # memory: 256Mi # kube-state-metrics.nodeSelector -- Node selector for KSM. KSM only supports Linux. nodeSelector: kubernetes.io/os: linux providers: gke: # providers.gke.autopilot -- Enables Datadog Agent deployment on GKE Autopilot autopilot: false # providers.gke.cos -- Enables Datadog Agent deployment on GKE with Container-Optimized OS (COS) cos: false # providers.gke.gdc -- Enables Datadog Agent deployment on GKE on Google Distributed Cloud (GDC) gdc: false eks: # providers.eks.controlPlaneMonitoring -- Enable control plane monitoring checks in the EKS cluster. controlPlaneMonitoring: false ec2: # providers.eks.ec2.useHostnameFromFile -- Use hostname from EC2 filesystem instead of fetching from metadata endpoint. ## When deploying to EC2-backed EKS infrastructure, there are situations where the ## IMDS metadata endpoint is not accessible to containers. This flag mounts the host's ## `/var/lib/cloud/data/instance-id` and uses that for Agent's hostname instead. useHostnameFromFile: false aks: # providers.aks.enabled -- Activate all specificities related to AKS configuration. Required as currently we cannot auto-detect AKS. enabled: false openshift: # providers.openshift.controlPlaneMonitoring -- Enable control plane monitoring checks in the OpenShift cluster. # Certificates are needed to communicate with the Etcd service, which can be found in the secret `etcd-metric-client` in the `openshift-etcd-operator` namespace. # To give the Datadog Agent access to these certificates, copy them into the same namespace the Datadog Agent is running in: # `oc get secret etcd-metric-client -n openshift-etcd-operator -o yaml | sed 's/namespace: openshift-etcd-operator/namespace: /' | oc create -f -` controlPlaneMonitoring: false talos: # providers.talos.enabled -- Activate all required specificities related to Talos.dev configuration, # as currently the chart cannot auto-detect Talos.dev cluster. # Note: The Agent deployment requires additional privileges that are not permitted by the default pod security policy. # The annotation `pod-security.kubernetes.io/enforce=privileged` must be applied to the Datadog installation # Kubernetes namespace. For more information on pod security policies in Talos.dev clusters, see: # https://www.talos.dev/v1.8/kubernetes-guides/configuration/pod-security/ enabled: false remoteConfiguration: # remoteConfiguration.enabled -- Set to true to enable remote configuration on the Cluster Agent (if set) and the node agent. # Can be overridden if `datadog.remoteConfiguration.enabled` # Preferred way to enable Remote Configuration. enabled: true ## OTel collector related configuration for otel-agent in Gateway Deployment ## Note this is different from the otel-agent in Daemonset (datadog.otelCollector) otelAgentGateway: # otelAgentGateway.enabled -- Enable otel-agent Gateway enabled: false # otelAgentGateway.ports -- Ports that OTel Collector is listening on ports: # Default GRPC port of OTLP receiver - containerPort: "4317" name: otel-grpc protocol: TCP # Default HTTP port of OTLP receiver - containerPort: "4318" name: otel-http protocol: TCP # otelAgentGateway.config -- Gateway OTel Agent configuration config: null ## otelAgentGateway.configMap -- Use an existing ConfigMap for Gateway OTel Agent configuration configMap: # otelAgentGateway.configMap.name -- Name of the existing ConfigMap that contains the Gateway OTel Agent configuration name: null # otelAgentGateway.configMap.checksum -- Checksum of the existing ConfigMap that contains the Gateway OTel Agent configuration checksum: null # otelAgentGateway.configMap.items -- Items within the ConfigMap that contain Gateway OTel Agent configuration items: # - key: otel-gateway-config.yaml # path: otel-gateway-config.yaml # - key: otel-gateway-config-two.yaml # path: otel-gateway-config-two.yaml # otelAgentGateway.configMap.key -- Key within the ConfigMap that contains the Gateway OTel Agent configuration key: otel-gateway-config.yaml # otelAgentGateway.featureGates -- Feature gates to pass to OTel collector, as a comma separated list featureGates: null # otelAgentGateway.replicas -- Number of otel-agent instances in the Gateway Deployment replicas: 1 # otelAgentGateway.revisionHistoryLimit -- The number of old ReplicaSets to keep in this Deployment. revisionHistoryLimit: 10 # otelAgentGateway.deploymentAnnotations -- Annotations to add to the otel-agent Gateway Deployment deploymentAnnotations: {} # key: "value" # otelAgentGateway.podAnnotations -- Annotations to add to the Gateway Deployment's Pods podAnnotations: {} # key: "value" # otelAgentGateway.tolerations -- Allow the Gateway Deployment to schedule on tainted nodes (requires Kubernetes >= 1.6) tolerations: [] # otelAgentGateway.useHostNetwork -- Bind ports on the hostNetwork ## Useful for CNI networking where hostPort might ## not be supported. The ports need to be available on all hosts. It can be ## used for custom metrics instead of a service endpoint. ## ## WARNING: Make sure that hosts using this are properly firewalled otherwise ## metrics and traces are accepted from any host able to connect to this host. # useHostNetwork: false # otelAgentGateway.dnsConfig -- Specify dns configuration options for otel agent containers e.g ndots ## ref: https://kubernetes.io/docs/concepts/services-networking/dns-pod-service/#pod-dns-config dnsConfig: {} # options: # - name: ndots # value: "1" # otelAgentGateway.volumes -- Specify additional volumes to mount in the otel-agent container volumes: [] # - hostPath: # path: # name: # otelAgentGateway.volumeMounts -- Specify additional volumes to mount in the otel-agent container volumeMounts: [] # - name: # mountPath: # readOnly: true # otelAgentGateway.nodeSelector -- Allow the Gateway Deployment to schedule on selected nodes ## Ref: https://kubernetes.io/docs/user-guide/node-selection/ nodeSelector: {} # otelAgentGateway.affinity -- Allow the Gateway Deployment to schedule using affinity rules ## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity affinity: {} # otelAgentGateway.strategy -- Allow the otel-agent Gateway Deployment to perform a rolling update on helm update ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy strategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 # otelAgentGateway.priorityClassCreate -- Creates a priorityClass for the otel-agent Gateway Deployment pods. priorityClassCreate: false # otelAgentGateway.priorityClassName -- Sets PriorityClassName if defined priorityClassName: null # otelAgentGateway.priorityPreemptionPolicyValue -- Set to "Never" to change the PriorityClass to non-preempting priorityPreemptionPolicyValue: PreemptLowerPriority # otelAgentGateway.priorityClassValue -- Value used to specify the priority of the scheduling of otel-agent Gateway Deployment pods. ## The PriorityClass uses PreemptLowerPriority. priorityClassValue: 1000000000 # otelAgentGateway.podLabels -- Sets podLabels if defined ## Note: These labels are also used as label selectors so they are immutable. podLabels: {} # otelAgentGateway.additionalLabels -- Adds labels to the Agent Gateway Deployment and pods additionalLabels: {} # otelAgentGateway.shareProcessNamespace -- Set the process namespace sharing on the otel-agent shareProcessNamespace: false # otelAgentGateway.lifecycle -- Configure the lifecycle of the otel-agent lifecycle: {} # preStop: # exec: # command: ["/bin/sh", "-c", "sleep 70"] # otelAgentGateway.terminationGracePeriodSeconds -- (int) Configure the termination grace period for the otel-agent terminationGracePeriodSeconds: # 70 # otelAgentGateway.topologySpreadConstraints -- Allow the otel-agent Gateway Deployment to schedule using pod topology spreading ## By default, no constraints are set, allowing cluster defaults to be used for scheduling ## Ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ topologySpreadConstraints: [] ## Configuration for the service for the OTel Agent Gateway service: # otelAgentGateway.service.type -- Set type of otel-agent-gateway service type: ClusterIP ## Allow to override the Datadog otel-agent image image: # otelAgentGateway.image.name -- otel agent image name to use (relative to `registry`) name: ddot-collector # otelAgentGateway.image.tag -- Override the image tag of otel agent tag: "" # otelAgentGateway.image.tagSuffix -- Suffix to append to image tag of otel agent tagSuffix: "" # otelAgentGateway.image.digest -- Override the image digest of otel agent, takes precedence over tag if specified digest: "" # otelAgentGateway.image.repository -- Override the image repository to override default registry repository: # otelAgentGateway.image.doNotCheckTag -- Skip the version and chart compatibility check ## By default, the version passed in otelAgentGateway.image.tag is checked ## for compatibility with the version of the chart. ## This boolean permits completely skipping this check. ## This is useful, for example, for custom tags that are not ## respecting semantic versioning. doNotCheckTag: # false # otelAgentGateway.image.pullPolicy -- otel Agent image pullPolicy pullPolicy: IfNotPresent # otelAgentGateway.image.pullSecrets -- otel Agent repository pullSecret (ex: specify docker registry credentials) ## See https://kubernetes.io/docs/concepts/containers/images/#specifying-imagepullsecrets-on-a-pod pullSecrets: [] # - name: "" initContainers: # otelAgentGateway.initContainers.securityContext -- Allows you to overwrite the default container SecurityContext for init containers securityContext: # otelAgentGateway.initContainers.resources -- Resource requests and limits for init containers resources: # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi containers: otelAgent: # otelAgentGateway.containers.otelAgent.env -- Additional environment variables for the otel-agent container env: [] # otelAgentGateway.containers.otelAgent.envFrom -- Set environment variables specific to otel-agent from configMaps and/or secrets envFrom: [] # - configMapRef: # name: # - secretRef: # name: # otelAgentGateway.containers.otelAgent.envDict -- Set environment variables specific to otel-agent defined in a dict envDict: {} # : # otelAgentGateway.containers.otelAgent.resources -- Resource requests and limits for the otel-agent container resources: {} # requests: # cpu: 100m # memory: 200Mi # limits: # cpu: 100m # memory: 200Mi # otelAgentGateway.containers.otelAgent.securityContext -- Allows you to overwrite the default container SecurityContext for the otel-agent container. securityContext: {} # otelAgentGateway.containers.otelAgent.logLevel -- Set logging verbosity, valid log levels are: trace, debug, info, warn, error, critical, and off. # If not set, fall back to the value of datadog.logLevel. logLevel: # INFO # otelAgentGateway.containers.otelAgent.healthPort -- Port number to use for the otel-agent-gateway health check endpoint (OTel health_check extension) healthPort: 13133 # otelAgentGateway.containers.otelAgent.livenessProbe -- otel-agent-gateway liveness probe settings. # Set enabled to true to activate. The OTel config must expose the health_check extension # on healthPort (default 13133); the generated default config does this automatically. livenessProbe: enabled: false initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 # otelAgentGateway.containers.otelAgent.readinessProbe -- otel-agent-gateway readiness probe settings. # Set enabled to true to activate. The OTel config must expose the health_check extension # on healthPort (default 13133); the generated default config does this automatically. readinessProbe: enabled: false initialDelaySeconds: 15 periodSeconds: 15 timeoutSeconds: 5 successThreshold: 1 failureThreshold: 6 ## Provide OTel Collector RBAC configuration in Gateway rbac: # otelAgentGateway.rbac.create -- If true, check OTel Collector config for k8sattributes processor # and create required ClusterRole to access Kubernetes API create: true # otelAgentGateway.rbac.rules -- A set of additional RBAC rules to apply to OTel Collector's ClusterRole rules: [] # - apiGroups: [""] # resources: ["pods", "nodes"] # verbs: ["get", "list", "watch"] ## Provide OTel Collector logs configuration logs: # otelAgentGateway.logs.enabled -- Enable logs support in the OTel Collector. # If true, checks OTel Collector config for filelog receiver and mounts additional volumes to collect containers # and pods logs. enabled: false ## Provide Horizontal Pod Autoscaler (HPA) configuration in OTel Agent Gateway, requires k8s 1.23.0 and above autoscaling: # otelAgentGateway.autoscaling.enabled -- enable autoscaling using Horizontal Pod Autoscaler (HPA), requires k8s 1.23.0 and above. # Will override otelAgentGateway.replicas. enabled: false # otelAgentGateway.autoscaling.annotations -- annotations for OTel Agent Gateway HPA annotations: {} # otelAgentGateway.autoscaling.minReplicas -- min number of replicas for OTel Agent Gateway HPA minReplicas: 0 # otelAgentGateway.autoscaling.maxReplicas -- max number of replicas for OTel Agent Gateway HPA maxReplicas: 0 # otelAgentGateway.autoscaling.metrics -- the metrics used for OTel Agent Gateway HPA metrics: [] # otelAgentGateway.autoscaling.behavior -- defines the scaling behavior in OTel Agent Gateway HPA behavior: # otelAgentGateway.autoscaling.behavior.scaleUp -- defines the scaling up behavior in OTel Agent Gateway HPA scaleUp: {} # otelAgentGateway.autoscaling.behavior.scaleDown -- defines the scaling down behavior in OTel Agent Gateway HPA scaleDown: {}