--- apiVersion: v1 kind: ConfigMap metadata: name: project-grafana-loki-0.48.7-d2iq-defaults namespace: ${releaseNamespace} data: values.yaml: | #################################################################### ## BEGIN DKP specific config overrides ## ## This is added as a workaround to use the same configmap for ## ## both project-grafana-loki & object-bucket-claims helmreleases. ## #################################################################### dkp: project-grafana-loki: enabled: true enableOBCHealthCheck: false bucketName: proj-loki-${releaseNamespace} storageClassName: dkp-object-store additionalConfig: # We emit 200MB per workspace per day with audit logs disabled. # We also have a default retention period 7days. # This is NOT definitive but this translates to 200 x 7 ~ at most 1.5G per week per project (since workspace log data is a superset of project log data). # This value can be configured by user during project deployment according to their project needs (e.g.: if a project has lot of noisy user created workloads). maxSize: "5G" #################################################################### ## END of dkp specific config overrides ## #################################################################### loki: ingesterFullname: loki-ingester annotations: secret.reloader.stakater.com/reload: proj-loki-${releaseNamespace} config: | auth_enabled: false server: http_listen_port: 3100 log_level: warn grpc_server_max_recv_msg_size: 10485760 # grpc_server_max_send_msg_size should be set at least to the maximum logs size expected in a single push request. grpc_server_max_send_msg_size: 10485760 distributor: ring: kvstore: store: memberlist memberlist: join_members: - {{ include "loki.fullname" . }}-memberlist ingester: lifecycler: ring: kvstore: store: memberlist replication_factor: 1 chunk_idle_period: 30m chunk_block_size: 262144 chunk_encoding: snappy chunk_retain_period: 1m max_transfer_retries: 0 flush_op_timeout: 10m wal: enabled: true flush_on_shutdown: false checkpoint_duration: 5m replay_memory_ceiling: 4GB dir: /var/loki/wal limits_config: retention_period: 168h enforce_metric_name: false reject_old_samples: true reject_old_samples_max_age: 168h max_cache_freshness_per_query: 10m split_queries_by_interval: 15m ingestion_rate_mb: 10 # ingestion_burst_size_mb should be set at least to the maximum logs size expected in a single push request. ingestion_burst_size_mb: 10 per_stream_rate_limit: 10MB per_stream_rate_limit_burst: 15MB schema_config: configs: - from: 2020-09-07 store: boltdb-shipper object_store: aws schema: v11 index: prefix: loki_index_ period: 24h storage_config: boltdb_shipper: shared_store: s3 active_index_directory: /var/loki/index cache_location: /var/loki/cache cache_ttl: 168h aws: s3: "http://rook-ceph-rgw-dkp-object-store.${workspaceNamespace}.svc:80/proj-loki-${releaseNamespace}" s3forcepathstyle: true chunk_store_config: max_look_back_period: 0s query_range: align_queries_with_step: true max_retries: 5 cache_results: true results_cache: cache: enable_fifocache: true fifocache: max_size_items: 1024 validity: 24h frontend_worker: frontend_address: {{ include "loki.queryFrontendFullname" . }}:9095 frontend: log_queries_longer_than: 5s compress_responses: true tail_proxy_url: http://{{ include "loki.querierFullname" . }}:3100 compactor: shared_store: s3 retention_enabled: true compaction_interval: 10m # Default is 10m retention_delete_delay: 2h # Default is 2h ruler: storage: type: local local: directory: /etc/loki/rules ring: kvstore: store: memberlist rule_path: /tmp/loki/scratch alertmanager_url: http://kube-prometheus-stack-alertmanager.${workspaceNamespace}.svc.cluster.local:9093 external_url: "" ingester: replicas: 1 serviceLabels: servicemonitor.kommander.mesosphere.io/path: "metrics" servicemonitor.kommander.mesosphere.io/port: "http" persistence: enabled: true size: 10Gi extraEnvFrom: - secretRef: # s3 access and secret keys name: proj-loki-${releaseNamespace} querier: replicas: 1 serviceLabels: servicemonitor.kommander.mesosphere.io/path: "metrics" servicemonitor.kommander.mesosphere.io/port: "http" persistence: enabled: true size: 10Gi extraEnvFrom: - secretRef: # s3 access and secret keys name: proj-loki-${releaseNamespace} queryFrontend: extraEnvFrom: - secretRef: # s3 access and secret keys name: proj-loki-${releaseNamespace} compactor: enabled: true serviceLabels: servicemonitor.kommander.mesosphere.io/path: "metrics" servicemonitor.kommander.mesosphere.io/port: "http" persistence: enabled: true size: 10Gi extraEnvFrom: - secretRef: # s3 access and secret keys name: proj-loki-${releaseNamespace} ruler: enabled: false gateway: image: # Override nginx image to address known CVEs. # As of 0.48.4, chart maintainers are still using 1.19-alpine. tag: 1.22.0-alpine verboseLogging: false nginxConfig: httpSnippet: |- client_max_body_size 10M; serverSnippet: |- client_max_body_size 10M; distributor: serviceLabels: servicemonitor.kommander.mesosphere.io/path: "metrics" servicemonitor.kommander.mesosphere.io/port: "http" extraEnvFrom: - secretRef: # s3 access and secret keys name: proj-loki-${releaseNamespace}