# Default values # This is a YAML-formatted file. # Declare variables to be passed into your templates. manager: name: "reporter-manager" # -- Number of old ReplicaSets to retain for deployment rollback revisionHistoryLimit: 10 # -- Annotations for the manager deployment resource annotations: {} # -- Annotations for the manager pods podAnnotations: {} replicaCount: 1 # -- Readiness probe configuration readinessProbe: initialDelaySeconds: 25 periodSeconds: 5 timeoutSeconds: 3 successThreshold: 1 failureThreshold: 3 # -- Liveness probe configuration livenessProbe: initialDelaySeconds: 30 periodSeconds: 5 timeoutSeconds: 3 successThreshold: 1 failureThreshold: 3 image: # -- Repository for the console service container image repository: ghcr.io/lerianstudio/reporter-manager # -- Image pull policy pullPolicy: IfNotPresent # -- Image tag used for deployment tag: "1.1.1" # -- Secrets for pulling images from a private registry imagePullSecrets: [] # -- Overrides the default generated name by Helm nameOverride: "" # -- Overrides the full name generated by Helm fullnameOverride: "" ingress: # -- Enable or disable ingress enabled: false # -- Ingress class name className: "nginx" # -- Additional ingress annotations annotations: {} hosts: - host: "" paths: - path: / pathType: Prefix # -- TLS configuration for ingress tls: [] # - secretName: chart-example-tls # hosts: # - chart-example.local service: # -- Kubernetes service type type: ClusterIP # -- Service port port: 4005 # -- Annotations for the service annotations: {} deploymentStrategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 resources: # -- CPU and memory limits for pods limits: cpu: 200m memory: 1000Mi # -- Minimum CPU and memory requests requests: cpu: 100m memory: 256Mi # -- Node selector for scheduling pods on specific nodes nodeSelector: {} # -- Tolerations for scheduling on tainted nodes tolerations: {} # -- Affinity rules for pod scheduling affinity: {} # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: midaz.io/owner # operator: In # values: # - midaz # podAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: [] # topologyKey: kubernetes.io/hostnamex pdb: # -- Enable or disable PodDisruptionBudget enabled: true # -- Maximum number of unavailable pods maxUnavailable: 1 # -- Minimum number of available pods minAvailable: 0 # -- Annotations for PodDisruptionBudget annotations: {} useExistingSecret: false existingSecretName: "" clusterRole: # -- Enable or disable ClusterRole and ClusterRoleBinding creation create: true configmap: # -- Annotations for the configmap annotations: {} # APP' VERSION: "v1.0.0" APP_CONTEXT: "/manager/v1" SERVER_PORT: "4005" SERVER_ADDRESS: ":4005" #":{{ .Values.manager.configmap.SERVER_PORT }}" # LOG LEVEL LOG_LEVEL: debug # AUTH CONFIGS PLUGIN_AUTH_ENABLED: false PLUGIN_AUTH_ADDRESS: "http://plugin-access-manager-auth:4000" # OPEN TELEMETRY OTEL_RESOURCE_SERVICE_NAME: reporter-manager OTEL_RESOURCE_SERVICE_VERSION: "v1.0.0" #"{{ .Values.manager.configmap.VERSION }}" # SWAGGER SWAGGER_TITLE: 'Reporter' SWAGGER_DESCRIPTION: 'Documentation for reporter' SWAGGER_VERSION: "v4.0.0" #"{{ .Values.manager.configmap.VERSION }}" SWAGGER_HOST: ":4005" #"{{ .Values.manager.configmap.SERVER_ADDRESS }}" SWAGGER_BASE_PATH: / SWAGGER_SCHEMES: http SWAGGER_LEFT_DELIMITER: "{{" SWAGGER_RIGHT_DELIMITER: "}}" PDF_POOL_WORKERS: "3" PDF_TIMEOUT_SECONDS: "60" # Extra Env Vars extraEnvVars: {} # -- HPA configuration (used when KEDA is disabled) autoscaling: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 80 targetMemoryUtilizationPercentage: 80 scaleDownStabilizationSeconds: 300 keda: scaledObject: enabled: true minReplicaCount: 1 maxReplicaCount: 10 pollingInterval: 30 cooldownPeriod: 300 triggers: - type: cpu metricType: Utilization # Allowed types are 'Utilization' or 'AverageValue' metadata: value: "80" containerName: "reporter-manager" - type: memory metricType: Utilization # Allowed types are 'Utilization' or 'AverageValue' metadata: value: "80" containerName: "reporter-manager" # Worker configuration worker: name: "reporter-worker" # -- Annotations for worker job resources annotations: {} # -- Number of replicas (used when KEDA is disabled and worker runs as a Deployment) replicaCount: 2 image: # -- Repository for the console service container image repository: ghcr.io/lerianstudio/reporter-worker # -- Image pull policy pullPolicy: IfNotPresent # -- Image tag used for deployment tag: "1.1.0" # -- Secrets for pulling images from a private registry imagePullSecrets: [] # -- Overrides the default generated name by Helm nameOverride: "" # -- Overrides the full name generated by Helm fullnameOverride: "" service: # -- Kubernetes service type type: ClusterIP # -- Service port port: 80 # -- Annotations for the service annotations: {} resources: # -- CPU and memory limits for pods limits: cpu: 200m memory: 256Mi # -- Minimum CPU and memory requests requests: cpu: 100m memory: 128Mi # -- Node selector for scheduling pods on specific nodes nodeSelector: {} # -- Tolerations for scheduling on tainted nodes tolerations: {} # -- Affinity rules for pod scheduling affinity: {} # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: midaz.io/owner # operator: In # values: # - midaz # podAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # - labelSelector: # matchExpressions: [] # topologyKey: kubernetes.io/hostname useExistingSecret: false existingSecretName: "" configmap: # -- Annotations for the configmap annotations: {} # APP VERSION: v1.0.0 # HEALTH SERVER HEALTH_PORT: "4006" # LOG LEVEL LOG_LEVEL: debug # OPEN TELEMETRY OTEL_RESOURCE_SERVICE_NAME: reporter-worker OTEL_RESOURCE_SERVICE_VERSION: "v1.0.0" PDF_POOL_WORKERS: "5" PDF_TIMEOUT_SECONDS: "30" # -- Pod annotations for the worker deployment (used when KEDA is disabled) podAnnotations: {} # -- Deployment strategy (used when KEDA is disabled) deploymentStrategy: type: RollingUpdate rollingUpdate: maxSurge: 1 maxUnavailable: 0 # -- Readiness probe configuration (used when KEDA is disabled) readinessProbe: initialDelaySeconds: 15 periodSeconds: 5 timeoutSeconds: 3 successThreshold: 1 failureThreshold: 3 # -- Liveness probe configuration (used when KEDA is disabled) livenessProbe: initialDelaySeconds: 20 periodSeconds: 5 timeoutSeconds: 3 successThreshold: 1 failureThreshold: 3 # -- HPA configuration (used when KEDA is disabled) autoscaling: enabled: true minReplicas: 2 maxReplicas: 10 targetCPUUtilizationPercentage: 80 targetMemoryUtilizationPercentage: 80 scaleDownStabilizationSeconds: 300 # Extra Env Vars extraEnvVars: {} # -- KEDA configuration for worker auto-scaling (default mode) keda: scaledJob: enabled: true name: "reporter-worker-scaler" pollingInterval: 10 minReplicaCount: 0 backoffLimit: 3 ttlSecondsAfterFinished: 30 # -- Maximum time in seconds for a job to run before it is terminated activeDeadlineSeconds: 300 successfulJobsHistoryLimit: 3 failedJobsHistoryLimit: 3 maxReplicaCount: 5 triggers: - type: rabbitmq metadata: queueLength: "1" protocol: "amqp" vhost: "/" common: configmap: # WORKER ENV_NAME: development # RABBITMQ RABBITMQ_URI: amqp RABBITMQ_PORT_HOST: 15672 RABBITMQ_HOST: reporter-rabbitmq.reporter.svc.cluster.local RABBITMQ_PORT_AMQP: 5672 RABBITMQ_NUMBERS_OF_WORKERS: 5 RABBITMQ_EXCHANGE: "reporter.generate-report.exchange" RABBITMQ_GENERATE_REPORT_QUEUE: "reporter.generate-report.queue" RABBITMQ_GENERATE_REPORT_KEY: "reporter.generate-report.key" RABBITMQ_HEALTH_CHECK_URL: "http://reporter-rabbitmq.reporter.svc.cluster.local:15672" # Redis Configs REDIS_MASTER_NAME: "" REDIS_HOST: reporter-valkey.reporter.svc.cluster.local:6379 REDIS_DB: 0 REDIS_PROTOCOL: "3" REDIS_TLS: "false" REDIS_CA_CERT: "" GOOGLE_APPLICATION_CREDENTIALS: "" REDIS_SERVICE_ACCOUNT: "" # Object Storage endpoint (default for local development with SeaweedFS) OBJECT_STORAGE_ENDPOINT: http://seaweedfs-s3.reporter.svc.cluster.local:8333 OBJECT_STORAGE_REGION: us-east-1 OBJECT_STORAGE_USE_PATH_STYLE: "true" OBJECT_STORAGE_DISABLE_SSL: "true" OBJECT_STORAGE_BUCKET: "reporter-storage" # MONGO DB #MONGO_URI=mongo+srv MONGO_URI: mongodb MONGO_HOST: reporter-mongodb.reporter.svc.cluster.local MONGO_NAME: reporter-db MONGO_USER: reporter MONGO_PORT: 27017 MONGO_MAX_POOL_SIZE: 1000 # OPEN TELEMETRY OTEL_LIBRARY_NAME: github.com/LerianStudio/reporter OTEL_RESOURCE_DEPLOYMENT_ENVIRONMENT: production OTEL_EXPORTER_OTLP_ENDPOINT_PORT: 4317 OTEL_EXPORTER_OTLP_ENDPOINT: otlp://midaz-otel-lgtm:4317 ENABLE_TELEMETRY: true # MIDAZ ONBOARDING DATASOURCE_ONBOARDING_CONFIG_NAME: midaz_onboarding DATASOURCE_ONBOARDING_HOST: midaz-postgresql-replication.midaz.svc.cluster.local DATASOURCE_ONBOARDING_PORT: 5432 DATASOURCE_ONBOARDING_USER: midaz DATASOURCE_ONBOARDING_DATABASE: onboarding DATASOURCE_ONBOARDING_TYPE: postgresql DATASOURCE_ONBOARDING_SSLMODE: disable DATASOURCE_ONBOARDING_SSLROOTCERT: "" # EXTERNAL DATABASE WITH MULTIPLE SCHEMAS # Use DATASOURCE__SCHEMAS to specify which schemas to query (comma-separated) # If not set, defaults to "public" schema only # In templates, use explicit schema syntax: external_db:sales.orders #DATASOURCE_EXTERNAL_CONFIG_NAME: external_db #DATASOURCE_EXTERNAL_HOST: external-postgres #DATASOURCE_EXTERNAL_PORT: 5432 #DATASOURCE_EXTERNAL_USER: db_user #DATASOURCE_EXTERNAL_DATABASE: external_database #DATASOURCE_EXTERNAL_TYPE: postgresql #DATASOURCE_EXTERNAL_SSLMODE: disable #DATASOURCE_EXTERNAL_SSLROOTCERT: #DATASOURCE_EXTERNAL_DB_SCHEMAS: sales,inventory,reporting secrets: MONGO_PASSWORD: lerian REDIS_PASSWORD: "lerian" RABBITMQ_DEFAULT_USER: plugin RABBITMQ_DEFAULT_PASS: Lerian@123 DATASOURCE_ONBOARDING_PASSWORD: lerian #DATASOURCE_EXTERNAL_PASSWORD: db_password OBJECT_STORAGE_ACCESS_KEY_ID: "any" OBJECT_STORAGE_SECRET_KEY: "any" seaweedfs: enabled: true master: enabled: true replicas: 1 extraArgs: - "master" - "-port=9333" - "-mdir=/data" service: type: ClusterIP ports: http: 9333 ui: 9334 # Persistent storage for the master node data: type: "persistentVolumeClaim" size: "5Gi" storageClass: null # Use cluster default or set a specific StorageClass volume: enabled: true replicas: 1 extraArgs: - "volume" - "-port=9080" - "-mserver=seaweedfs-master:9333" - "-dir=/data" - "-max=0" service: type: ClusterIP ports: http: 9080 # Persistent storage for volume servers dataDirs: - name: data type: "persistentVolumeClaim" size: "10Gi" storageClass: null # Use default or specify your StorageClass maxVolumes: 0 # 0 = let SeaweedFS decide, or set a limit filer: enabled: true extraArgs: - "filer" - "-port=8888" - "-master=seaweedfs-master:9333" service: type: ClusterIP ports: http: 8888 # Persistent storage for filer data: type: "persistentVolumeClaim" size: "25Gi" storageClass: null s3: enabled: true port: 8333 grpcPort: 18333 # Allow empty access key for development (disable auth) # In production, use Kubernetes secrets for credentials allowEmptyAccessKey: true # Enable path-style URLs (required for most S3 clients with non-AWS endpoints) enableAuth: false resources: limits: cpu: 200m memory: 256Mi requests: cpu: 100m memory: 128Mi logs: type: "emptyDir" # KEDA configuration keda: enabled: true # Set to true when using an externally installed KEDA operator (e.g., installed via separate helm chart) # When external is true, the chart will still create ScaledJob and TriggerAuthentication resources # but will not install the KEDA operator itself external: false # TriggerAuthentication configuration for RabbitMQ credentials triggerAuthentication: # -- Secret name containing RabbitMQ credentials (defaults to reporter-manager secret) secretName: "" # -- Key in secret for RabbitMQ username usernameKey: "RABBITMQ_DEFAULT_USER" # -- Key in secret for RabbitMQ password passwordKey: "RABBITMQ_DEFAULT_PASS" crds: install: true webhookCerts: generate: true serviceAccount: create: true automountServiceAccountToken: true operator: logLevel: info logEncoder: console resources: limits: cpu: 150m memory: 256Mi requests: cpu: 100m memory: 128Mi webhooks: logLevel: info logEncoder: console resources: limits: cpu: 150m memory: 256Mi requests: cpu: 100m memory: 128Mi metricsApiServer: logLevel: info logEncoder: console resources: limits: cpu: 150m memory: 256Mi requests: cpu: 100m memory: 128Mi mongodb: # MongoDB’s flexibility and scalability make it the perfect choice for managing evolving and less structured data. enabled: true global: security: allowInsecureImages: true image: repository: bitnamisecure/mongodb tag: "latest" external: false auth: enabled: true rootUser: reporter rootPassword: lerian persistence: size: 8Gi resources: limits: cpu: 500m memory: 512Mi requests: cpu: 500m memory: 512Mi resourcesPreset: small service: type: ClusterIP port: 27017 externalRabbitmqDefinitions: # -- Enable or disable the RabbitMQ bootstrap job enabled: false # -- RabbitMQ connection settings connection: # -- RabbitMQ protocol (http or https) protocol: "http" # -- RabbitMQ host (management API endpoint) host: "reporter-rabbitmq" # -- RabbitMQ HTTP management port port: "15672" # -- RabbitMQ AMQP port (for connectivity check) portAmqp: "5672" # -- Admin credentials for RabbitMQ management API rabbitmqAdminLogin: useExistingSecret: # -- Name of existing secret containing RABBITMQ_ADMIN_USER and RABBITMQ_ADMIN_PASS keys name: "" # -- Admin username (ignored if useExistingSecret.name is set) username: "midaz" # -- Admin password (ignored if useExistingSecret.name is set) password: "lerian" # -- Credentials for the plugin application user created by the job appCredentials: useExistingSecret: # -- Name of existing secret containing RABBITMQ_DEFAULT_PASS key name: "" # -- Password for plugin user (ignored if useExistingSecret.name is set) pluginPassword: "Lerian@123" rabbitmq: enabled: true image: tag: "3.13.6" persistence: size: 8Gi resources: requests: cpu: 250m memory: 512Mi limits: cpu: "1" memory: 1Gi podSecurityContext: runAsUser: 1001 runAsGroup: 1001 fsGroup: 1001 fsGroupChangePolicy: "OnRootMismatch" seccompProfile: {type: RuntimeDefault} containerSecurityContext: allowPrivilegeEscalation: false readOnlyRootFilesystem: true capabilities: {drop: ["ALL"]} authentication: user: value: "midaz" password: value: "lerian" erlangCookie: value: "b2a717550ac09676c545fe9d986c7651f7237b2691292961" extraSecrets: - name: "reporter-manager-load-definitions" mountPath: /etc/rabbitmq/definitions customConfig: | management.load_definitions = /etc/rabbitmq/definitions/load_definition.json valkey: # Redis is used to handle scenarios where real-time performance and fast data retrieval are essential. # This component is responsible for providing an in-memory data store # For more details, refer to the documentation: # https://docs.lerian.studio/docs/midaz-components#why-redis enabled: true auth: enabled: false otel-collector-lerian: enabled: true # -- AWS Authentication - IAM Roles Anywhere for non-AWS clusters (e.g. Clotilde/Proxmox) aws: rolesAnywhere: enabled: false trustAnchorArn: "" profileArn: "" roleArn: "" region: "us-east-2" sessionDuration: 3600 certificateSecretName: "reporter-iam-tls" sidecar: image: repository: public.ecr.aws/rolesanywhere/credential-helper tag: "latest-amd64" pullPolicy: IfNotPresent port: 9911 resources: limits: cpu: 100m memory: 128Mi requests: cpu: 10m memory: 64Mi