apiVersion: v1 kind: ConfigMap metadata: name: kubecost-2.5.2-d2iq-defaults namespace: ${releaseNamespace} data: # Using just values.yaml will result in kubecost running in agent mode. values.yaml: | --- global: prometheus: enabled: true grafana: enabled: false # Cannot use grafana when federatedETL.agentOnly is true. proxy: false # Installs custom CA certificates onto Kubecost pods updateCaTrust: enabled: true caCertsSecret: tls-root-ca # The name of the Secret containing custom CA certificates to mount to the cost-model container. forecasting: # Enable this to use kubecost's cost forecosting model enabled: false upgrade: toV2: false # TODO(takirala): Handle upgrades. federatedETL: federatedCluster: true agentOnly: true ingress: enabled: false kubecostModel: federatedStorageConfigSecret: "federated-store" # Secret should have a key named "federated-store.yaml" with the federated storage credentials kubecostAggregator: deployMethod: disabled priority: enabled: true name: dkp-high-priority priority: enabled: true name: dkp-high-priority prometheus: kubeStateMetrics: enabled: false kube-state-metrics: disabled: true extraScrapeConfigs: | - job_name: kubecost honor_labels: true scrape_interval: 1m scrape_timeout: 10s metrics_path: /metrics scheme: http dns_sd_configs: - names: - {{ .Release.Name }}-cost-analyzer type: 'A' port: 9003 - job_name: kubecost-networking kubernetes_sd_configs: - role: pod relabel_configs: # Scrape only the the targets matching the following metadata - source_labels: [__meta_kubernetes_pod_label_app] action: keep regex: {{ .Release.Name }}-network-costs server: priorityClassName: dkp-high-priority retention: 14d fullnameOverride: "kubecost-prometheus-server" # If clusterIDConfigmap is defined, instead use user-generated configmap with key CLUSTER_ID # to use as unique cluster ID in kubecost cost-analyzer deployment. # This overrides the cluster_id set in prometheus.server.global.external_labels. # NOTE: This does not affect the external_labels set in prometheus config. clusterIDConfigmap: kubecost-cluster-info-configmap extraFlags: - web.enable-admin-api - web.enable-lifecycle - storage.tsdb.wal-compression resources: limits: cpu: 1000m memory: 2500Mi requests: cpu: 300m memory: 1500Mi global: scrape_interval: 1m scrape_timeout: 10s evaluation_interval: 1m external_labels: cluster_id: $CLUSTER_ID persistentVolume: size: 32Gi enabled: true extraArgs: log.level: info log.format: json storage.tsdb.min-block-duration: 2h storage.tsdb.max-block-duration: 2h query.max-concurrency: 1 query.max-samples: 100000000 enableAdminApi: true service: gRPC: enabled: true configmapReload: prometheus: enabled: true alertmanager: enabled: true alertmanager: fullnameOverride: "kubecost-prometheus-alertmanager" image: repository: quay.io/prometheus/alertmanager tag: v0.28.0 priorityClassName: dkp-high-priority enabled: true resources: limits: cpu: 50m memory: 100Mi requests: cpu: 10m memory: 50Mi persistentVolume: enabled: true pushgateway: enabled: false persistentVolume: enabled: false serverFiles: alerts: groups: - name: Kubecost rules: - alert: kubecostDown expr: up{job="kubecost"} == 0 annotations: message: 'Kubecost metrics endpoint is not being scraped successfully.' for: 10m labels: severity: warning - alert: kubecostMetricsUnavailable expr: sum(sum_over_time(node_cpu_hourly_cost[5m])) == 0 annotations: message: 'Kubecost metrics are not available in Prometheus.' for: 10m labels: severity: warning - alert: kubecostRecordingRulesNotEvaluated expr: avg_over_time(kubecost_cluster_memory_working_set_bytes[5m]) == 0 annotations: message: 'Kubecost recording rules are not being successfully evaluated.' for: 10m labels: severity: warning kubecostProductConfigs: clusterName: "" clusterProfile: production cloudIntegrationSecret: "" currencyCode: USD productKey: enabled: false #key: YOUR_KEY # Overrides for kubecost to run in primary mode (single cluster with no object storage) primary-values.yaml: | global: notifications: alertmanager: # If true, allow kubecost to write to alertmanager enabled: true federatedETL: federatedCluster: false agentOnly: false kubecostModel: federatedStorageConfigSecret: "" kubecostAggregator: # deployMethod determines how Aggregator is deployed. Current options are # "singlepod" (within cost-analyzer Pod) "statefulset" (separate # StatefulSet), and "disabled". deployMethod: singlepod persistentConfigsStorage: storageClass: "" # default storage class storageRequest: 1Gi aggregatorDbStorage: storageClass: "" # default storage class storageRequest: 32Gi cloudCost: # The cloudCost component of Aggregator depends on # kubecostAggregator.deployMethod: # kA.dM = "singlepod" -> cloudCost is run as container inside cost-analyzer # kA.dM = "statefulset" -> cloudCost is run as single-replica Deployment enabled: false # Log level for the aggregator container. Options are "trace", "debug", "info", "warn", "error", "fatal", "panic" logLevel: info resources: requests: cpu: 1000m memory: 1Gi jaeger: # Enable this to use jaeger for tracing, useful for debugging enabled: false image: jaegertracing/all-in-one imageVersion: 1.64.0 # Pin the image here to avoid pulling in latest as that would affect CVE scans kubecostFrontend: enabled: true deployMethod: singlepod # Other possible value is `haMode` that is supported only with enterprise license. ipv6: enabled: false # Define persistence volume for cost-analyzer, more information at https://github.com/kubecost/docs/blob/master/storage.md persistentVolume: # Upgrades from original default 0.2Gi may break if automatic disk resize is not supported # https://github.com/kubecost/cost-analyzer-helm-chart/issues/507 size: 32Gi # Note that setting this to false means configurations will be wiped out on pod restart. enabled: true # storageClass: "-" ingress: enabled: true annotations: kubernetes.io/ingress.class: kommander-traefik ingress.kubernetes.io/auth-response-headers: X-Forwarded-User traefik.ingress.kubernetes.io/router.tls: "true" traefik.ingress.kubernetes.io/router.middlewares: "${releaseNamespace}-forwardauth@kubernetescrd,${releaseNamespace}-stripprefixes@kubernetescrd" paths: - "/dkp/kommander/kubecost/frontend/" # This used to be the ingress of centralized-kubecost in 2.13.x and older versions of DKP hosts: - "" tls: [] grafana: priorityClassName: dkp-high-priority sidecar: dashboards: enabled: true label: grafana_dashboard_kommander labelValue: 1 datasources: enabled: true defaultDatasourceEnabled: false label: grafana_datasource_kommander kubecostProductConfigs: grafanaURL: "/dkp/kommander/monitoring/grafana" # Overrides for kubecost to create cosi resources. primary-cosi-values.yaml: | --- cosiBucketKit: enabled: true bucketClaims: - name: cosi-kubecost # Max length should be less than 26 chars. namespace: ${releaseNamespace} annotations: helm.sh/resource-policy: keep bucketClassName: cosi-ceph-nkp protocols: - s3 bucketAccesses: - name: cosi-kubecost namespace: ${releaseNamespace} annotations: helm.sh/resource-policy: keep bucketAccessClassName: cosi-ceph-nkp bucketClaimName: cosi-kubecost protocol: s3 credentialsSecretName: federated-store transformations: priorityClassName: dkp-high-priority kubectlImage: ${kubetoolsImageRepository:=bitnami/kubectl}:${kubetoolsImageTag:=1.31.4} kubecost: enabled: true # Overrides for kubecost to run in primary mode for multi cluster setup with object storage. primary-object-storage-ready-values.yaml: | --- kubecostAggregator: # deployMethod determines how Aggregator is deployed. Current options are # "singlepod" (within cost-analyzer Pod) "statefulset" (separate # StatefulSet), and "disabled". deployMethod: statefulset federatedETL: federatedCluster: true kubecostModel: federatedStorageConfigSecret: "federated-store" # Secret should have a key named "federated-store.yaml" with the federated storage credentials