# Default values for pyroscope. # This is a YAML-formatted file. # Declare variables to be passed into your templates. pyroscope: replicaCount: 1 # -- Enable or disable Self profile push, useful to test disableSelfProfile: true # -- Kubernetes cluster domain suffix for DNS discovery cluster_domain: .cluster.local. image: repository: grafana/pyroscope pullPolicy: IfNotPresent # Allows to override the image tag, which defaults to the appVersion in the chart metadata tag: "" extraArgs: log.level: debug extraLabels: {} extraEnvVars: {} # The following environment variables are set by the Helm chart. # JAEGER_AGENT_HOST: jaeger-agent.jaeger.svc.cluster.local. extraCustomEnvVars: {} # The following environment variables raw form. # - name: MY_NODE_NAME # valueFrom: # fieldRef: # fieldPath: spec.nodeName # -- Environment variables from secrets or configmaps to add to the pods extraEnvFrom: [] imagePullSecrets: [] dnsPolicy: ClusterFirst initContainers: [] extraContainers: [] nameOverride: "" fullnameOverride: "" serviceAccount: # Specifies whether a service account should be created create: true # Annotations to add to the service account annotations: {} # The name of the service account to use. # If not set and create is true, a name is generated using the fullname template name: "" podAnnotations: # Scrapes itself see https://grafana.com/docs/pyroscope/latest/deploy-kubernetes/helm/#optional-scrape-your-own-workloads-profiles profiles.grafana.com/memory.scrape: "true" profiles.grafana.com/memory.port_name: http2 profiles.grafana.com/cpu.scrape: "true" profiles.grafana.com/cpu.port_name: http2 profiles.grafana.com/goroutine.scrape: "true" profiles.grafana.com/goroutine.port_name: http2 # profiles.grafana.com/block.scrape: "true" # profiles.grafana.com/mutex.scrape: "true" podSecurityContext: fsGroup: 10001 runAsUser: 10001 runAsNonRoot: true podDisruptionBudget: enabled: true maxUnavailable: 1 securityContext: {} # capabilities: # drop: # - ALL # readOnlyRootFilesystem: true # runAsNonRoot: true # runAsUser: 1000 service: type: ClusterIP port: 4040 port_name: http2 scheme: HTTP annotations: {} headlessAnnotations: {} memberlist: port: 7946 port_name: memberlist grpc: port: 9095 port_name: grpc metastore: port: 9099 port_name: raft resources: {} # We usually recommend not to specify default resources and to leave this as a conscious # choice for the user. This also increases chances charts run on environments with little # resources, such as Minikube. If you do want to specify resources, uncomment the following # lines, adjust them as necessary, and remove the curly braces after 'resources'. # # Note that if memory consumption is higher than you would like, you can decrease the interval # that profiles are written into blocks by setting `pyroscopedb.max-block-duration` in the `extraArgs` # stanza. By default, it is set to 3h - override it, for example, as below: # ``` # extraArgs: # pyroscopedb.max-block-duration: 30m # ``` # # limits: # cpu: 100m # memory: 128Mi # requests: # cpu: 100m # memory: 128Mi nodeSelector: {} # -- Topology Spread Constraints topologySpreadConstraints: [] ## ref: https://kubernetes.io/docs/concepts/storage/persistent-volumes/ ## If you set enabled as "True", you need : ## - create a pv which above 10Gi and has same namespace with pyroscope ## - keep storageClassName same with below setting persistence: enabled: false accessModes: - ReadWriteOnce size: 10Gi annotations: {} # selector: # matchLabels: # app.kubernetes.io/name: pyroscope # subPath: "" # existingClaim: metastore: # subPath to use of the data volume for the metastore persistence. subPath: .metastore shared: enabled: true # subPath to use of the data volume for the shared storage used as bucket replacement. subPath: .shared extraVolumes: [] # - name: backup-volume # emptydir: {} extraVolumeMounts: [] # - name: testing # mountPath: /var/lib/testing # readOnly: false # - name: test-volume # mountPath: /var/tmp/test-volume # existingClaim: test-volume # readOnly: false tolerations: [] affinity: {} # Override the PodPriorityClass # priorityClassName: high # run specific components separately components: {} # -- Allows to override Pyroscope's configuration using structured format. structuredConfig: {} # -- Contains Pyroscope's configuration as a string. # @default -- The config depends on other values been set, details can be found in [`values.yaml`](./values.yaml) config: | {{- if .Values.minio.enabled }} storage: backend: s3 s3: endpoint: "{{ include "pyroscope.fullname" . }}-minio:9000" bucket_name: {{(index .Values.minio.buckets 0).name | quote }} access_key_id: {{ .Values.minio.rootUser | quote }} secret_access_key: {{ .Values.minio.rootPassword | quote }} insecure: true {{- end }} # -- Allows to add tenant specific overrides to the default limit configuration. tenantOverrides: {} # "foo": # ingestion_rate_mb: 1 # ingestion_burst_size_mb: 2 # -- Grafana Agent Configuration. architecture: storage: # -- (bool) Enable v1 storage layer. v1: true # -- (bool) Enable v2 storage layer. v2: false migration: # -- (float) Specifies the fraction [0:1] that should be send to the v1 write path / ingester in combined mode. 0 means no traffics is sent to ingester. 1 means 100% of requests are sent to ingester. ingesterWeight: 1.0 # -- (float) Specifies the fraction [0:1] that should be send to the v2 write path / segment-writer in combined mode. 0 means no traffics is sent to segment-writer. 1 means 100% of requests are sent to segment-writer. segmentWriterWeight: 1.0 # -- (bool) Specify a time stamp from when the v2 read path should serve traffic. queryBackend: true # -- (string) Specify a time stamp from when the v2 read path should serve traffic. queryBackendFrom: auto # -- This flag is useful for testing, it will overwrite all pods resource statements with its contents overwriteResources: {} # limits: {} # requests: {} # -- Deploy unified write/read services. These endpoints will can be used no matter if the helm chart is configured as single-binary or microservices deployUnifiedServices: false microservices: # -- (bool) Enable micro-services deployment mode. This is recommend for larger scale deployment and allow right size each aspect of Pyroscope. enabled: false # -- (string) Memberlist cluster label that will be used for all members of this cluster clusterLabelSuffix: -micro-services # -- @ignored # Not useful to be indivually exposed v1: querier: kind: Deployment replicaCount: 3 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 1 extraArgs: store-gateway.sharding-ring.replication-factor: "3" query-frontend: kind: Deployment replicaCount: 2 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 100m query-scheduler: kind: Deployment replicaCount: 2 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 100m distributor: kind: Deployment replicaCount: 2 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 500m ingester: kind: StatefulSet replicaCount: 3 terminationGracePeriodSeconds: 600 resources: limits: memory: 16Gi requests: memory: 8Gi cpu: 1 compactor: kind: StatefulSet replicaCount: 3 terminationGracePeriodSeconds: 1200 persistence: enabled: false resources: limits: memory: 16Gi requests: memory: 8Gi cpu: 1 store-gateway: kind: StatefulSet replicaCount: 3 persistence: # The store-gateway needs not need persistent storage, but we still run it as a StatefulSet # This is to avoid having blocks of data being enabled: false resources: limits: memory: 16Gi requests: memory: 8Gi cpu: 1 readinessProbe: # The store gateway can be configured to wait on startup for ring stability to be reached before it becomes # ready. See the `store-gateway.sharding-ring.wait-stability-min-duration` server argument for more information. # # Depending on this flag and the number of tenants + blocks that need to be synced on startup, pods can take # some time to become ready. This value can be used to ensure Kubernetes waits long enough and reduce errors. initialDelaySeconds: 60 extraArgs: store-gateway.sharding-ring.replication-factor: "3" tenant-settings: kind: Deployment replicaCount: 1 resources: limits: memory: 4Gi requests: memory: 16Mi cpu: 0.1 ad-hoc-profiles: kind: Deployment replicaCount: 1 resources: limits: memory: 4Gi requests: memory: 16Mi cpu: 0.1 # -- @ignored # Not useful to be indivually exposed v2: query-backend: kind: Deployment replicaCount: 3 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 1 query-frontend: kind: Deployment replicaCount: 2 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 100m distributor: kind: Deployment replicaCount: 2 resources: limits: memory: 1Gi requests: memory: 256Mi cpu: 500m segment-writer: kind: StatefulSet replicaCount: 3 terminationGracePeriodSeconds: 600 resources: limits: memory: 4Gi requests: memory: 2Gi cpu: 1 compaction-worker: kind: StatefulSet replicaCount: 3 terminationGracePeriodSeconds: 1200 persistence: enabled: false resources: limits: memory: 2Gi requests: memory: 1Gi cpu: 1 metastore: kind: StatefulSet replicaCount: 3 terminationGracePeriodSeconds: 1200 persistence: enabled: false resources: limits: memory: 2Gi requests: memory: 1Gi cpu: 1 extraArgs: # Expect 3 metastores metastore.raft.bootstrap-expect-peers: 3 # TODO(kolesnikovae): Update defaults. adaptive-placement.max-dataset-shards: 1024 adaptive-placement.unit-size-bytes: 131072 # enable cleanup of blocks beyond retention metastore.index.cleanup-interval: 1m metastore.snapshot-compact-on-restore: true tenant-settings: kind: Deployment replicaCount: 1 resources: limits: memory: 256Mi requests: memory: 16Mi cpu: 0.1 ad-hoc-profiles: kind: Deployment replicaCount: 1 resources: limits: memory: 256Mi requests: memory: 16Mi cpu: 0.1 admin: kind: Deployment replicaCount: 1 resources: limits: memory: 256Mi requests: memory: 16Mi cpu: 0.1 # ------------------------------------- # Configuration for `alloy` child chart # ------------------------------------- alloy: enabled: true controller: type: "statefulset" replicas: 1 podAnnotations: profiles.grafana.com/memory.scrape: "true" profiles.grafana.com/memory.port_name: "http-metrics" profiles.grafana.com/cpu.scrape: "true" profiles.grafana.com/cpu.port_name: "http-metrics" profiles.grafana.com/goroutine.scrape: "true" profiles.grafana.com/goroutine.port_name: "http-metrics" profiles.grafana.com/service_repository: 'https://github.com/grafana/alloy' profiles.grafana.com/service_git_ref: 'v1.8.1' alloy: stabilityLevel: "public-preview" # This needs to be set for some of our resources until verison v1.2 is released configMap: create: false name: alloy-config-pyroscope clustering: enabled: true # ------------------------------------- # Configuration for `grafana-agent` child chart # ------------------------------------- agent: enabled: false controller: type: "statefulset" replicas: 1 podAnnotations: profiles.grafana.com/memory.scrape: "true" profiles.grafana.com/memory.port_name: "http-metrics" profiles.grafana.com/cpu.scrape: "true" profiles.grafana.com/cpu.port_name: "http-metrics" profiles.grafana.com/goroutine.scrape: "true" profiles.grafana.com/goroutine.port_name: "http-metrics" agent: configMap: create: false name: grafana-agent-config-pyroscope clustering: enabled: true # ------------------------------------- # Configuration for `minio` child chart # ------------------------------------- minio: enabled: false replicas: 1 # Minio requires 2 to 16 drives for erasure code (drivesPerNode * replicas) # https://docs.min.io/docs/minio-erasure-code-quickstart-guide # Since we only have 1 replica, that means 2 drives must be used. drivesPerNode: 2 rootUser: grafana-pyroscope rootPassword: supersecret buckets: - name: grafana-pyroscope-data policy: none purge: false persistence: size: 5Gi resources: requests: cpu: 100m memory: 128Mi podAnnotations: {} ingress: enabled: false className: "" pathType: ImplementationSpecific # Additional labels to add to the ingress resource labels: {} # Additional annotations to add to the ingress resource annotations: {} # hosts: # - localhost # tls: # - secretName: certificate # ServiceMonitor configuration serviceMonitor: # -- If enabled, ServiceMonitor resources for Prometheus Operator are created enabled: false # -- Namespace selector for ServiceMonitor resources namespaceSelector: {} # -- Optional expressions to match on matchExpressions: [] # - key: prometheus.io/service-monitor # operator: NotIn # values: # - "false" # -- ServiceMonitor annotations annotations: {} # -- Additional ServiceMonitor labels labels: {} # -- ServiceMonitor scrape interval interval: null # -- ServiceMonitor scrape timeout in Go duration format (e.g. 15s) scrapeTimeout: null # -- ServiceMonitor relabel configs to apply to samples before scraping # https://github.com/prometheus-operator/prometheus-operator/blob/master/Documentation/api.md#relabelconfig relabelings: [] # -- ServiceMonitor metric relabel configs to apply to samples before ingestion # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#endpoint metricRelabelings: [] # --ServiceMonitor will add labels from the service to the Prometheus metric # https://github.com/prometheus-operator/prometheus-operator/blob/main/Documentation/api.md#servicemonitorspec targetLabels: [] # -- ServiceMonitor will use http by default, but you can pick https as well scheme: http # -- ServiceMonitor will use these tlsConfig settings to make the health check requests tlsConfig: null