# -- Parameters used globally across all services helm charts. global: # -- Add custom normal and secret envs to the service. # Envs defined in global.userEnvs will be globally available to all services usrEnvs: # -- Add custom normal envs to the service. # variable1: value1 normal: {} # -- Add custom secret envs to the service. # variable1: value1 secret: {} istio: # -- Boolean flag that enables using istio side cars with Gluu services. ingress: false # -- Boolean flag that enables using istio gateway for Gluu. This assumes istio ingress is installed and hence the LB is available. enabled: false # -- The namespace istio is deployed in. The is normally istio-system. namespace: istio-system # -- Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} additionalAnnotations: { } alb: ingress: enabled: false # -- Enable Admin UI endpoints /identity adminUiEnabled: true # -- Enable endpoint /.well-known/openid-configuration openidConfigEnabled: true # -- Enable endpoint /.well-known/uma2-configuration uma2ConfigEnabled: true # -- Enable endpoint /.well-known/webfinger webfingerEnabled: true # -- Enable endpoint /.well-known/simple-web-discovery webdiscoveryEnabled: true # -- Enable endpoint /.well-known/scim-configuration scimConfigEnabled: false # -- Enable SCIM endpoints /scim scimEnabled: false # -- Enable endpoint /.well-known/fido-configuration u2fConfigEnabled: true # -- Enable all fido2 endpoints /fido2 fido2Enabled: false # -- Enable endpoint /.well-known/fido2-configuration fido2ConfigEnabled: false # -- Enable Auth server endpoints /oxauth authServerEnabled: true # -- Enable casa endpoints /casa casaEnabled: false # -- Enable passport /passport passportEnabled: false # -- Enable oxshibboleth endpoints /idp shibEnabled: false # -- Additional labels that will be added across all ingress definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all ingress definitions in the format of {cert-manager.io/issuer: "letsencrypt-prod"} additionalAnnotations: kubernetes.io/ingress.class: alb alb.ingress.kubernetes.io/scheme: internet-facing alb.ingress.kubernetes.io/certificate-arn: arn:aws:acm:us-west-2:xxxx:certificate/xxxxxx alb.ingress.kubernetes.io/auth-session-cookie: custom-cookie cloud: # -- Boolean flag if enabled will strip resources requests and limits from all services. testEnviroment: false upgrade: # -- Boolean flag used when running upgrading through versions command. enabled: false image: # -- Image to use for deploying. repository: gluufederation/upgrade # -- Image tag to use for deploying. tag: 4.4.2-2 # -- Source version currently running. This is normally one minor version down. # The step should only be one minor version per upgrade sourceVersion: "4.4" # -- Target version currently running. This is normally one minor version up. # The step should only be one minor version per upgrade targetVersion: "4.4" # -- StorageClass section for Jackrabbit and OpenDJ charts. This is not currently used by the openbanking distribution. You may specify custom parameters as needed. storageClass: allowVolumeExpansion: true allowedTopologies: [] mountOptions: - debug # -- parameters: #fsType: "" #kind: "" #pool: "" #storageAccountType: "" #type: "" parameters: {} provisioner: microk8s.io/hostpath reclaimPolicy: Retain volumeBindingMode: WaitForFirstConsumer # -- GCE storage kind if using Google disks gcePdStorageType: pd-standard # -- Volume storage type if using Azure disks. azureStorageAccountType: Standard_LRS # -- Azure storage kind if using Azure disks azureStorageKind: Managed # -- The Loadbalancer IP created by nginx or istio on clouds that provide static IPs. This is not needed if `global.domain` is globally resolvable. lbIp: "22.22.22.22" # -- Fully qualified domain name to be used for Gluu installation. This address will be used to reach Gluu services. domain: demoexample.gluu.org # -- Boolean flag to enable mapping global.lbIp to global.fqdn inside pods on clouds that provide static ip for loadbalancers. On cloud that provide only addresses to the LB this flag will enable a script to actively scan config.configmap.lbAddr and update the hosts file inside the pods automatically. isDomainRegistered: "false" # -- Name of the OpenDJ service. Please keep it as default. ldapServiceName: opendj # -- Persistence backend to run Gluu with ldap|couchbase|hybrid|sql|spanner. gluuPersistenceType: couchbase # -- Boolean flag if enabled will enable jackrabbit in cluster mode with Postgres. gluuJackrabbitCluster: "true" # -- The config backend adapter that will hold Gluu configuration layer. google|kubernetes configAdapterName: kubernetes # -- The config backend adapter that will hold Gluu secret layer. google|kubernetes configSecretAdapter: kubernetes # -- Validate certificate is downloaded from given domain. If set to true (default to false), raise an error if cert is not downloaded. Note that the flag is ignored if mounted SSL cert and key files exist sslCertFromDomain: "false" # -- Base64 encoded service account. The sa must have roles/secretmanager.admin to use Google secrets and roles/spanner.databaseUser to use Spanner. cnGoogleApplicationCredentials: /etc/gluu/conf/google-credentials.json oxauth: # -- Boolean flag to enable/disable oxauth chart. You should never set this to false. enabled: true # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- oxauth.log target authLogTarget: "STDOUT" # -- oxauth.log level authLogLevel: "INFO" # -- http_request_response.log target httpLogTarget: "FILE" # -- http_request_response.log level httpLogLevel: "INFO" # -- oxauth_persistence.log target persistenceLogTarget: "FILE" # -- oxauth_persistence.log level persistenceLogLevel: "INFO" # -- oxauth_persistence_duration.log target persistenceDurationLogTarget: "FILE" # -- oxauth_persistence_duration.log level persistenceDurationLogLevel: "INFO" # -- oxauth_persistence_ldap_statistics.log target ldapStatsLogTarget: "FILE" # -- oxauth_persistence_ldap_statistics.log level ldapStatsLogLevel: "INFO" # -- oxauth_script.log target scriptLogTarget: "FILE" # -- oxauth_script.log level scriptLogLevel: "INFO" # -- oxauth_script.log target auditStatsLogTarget: "FILE" # -- oxauth_audit.log level auditStatsLogLevel: "INFO" # -- cleaner log target cleanerLogTarget: "FILE" # -- cleaner log level cleanerLogLevel: "INFO" fido2: # -- Boolean flag to enable/disable the fido2 chart. enabled: false # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- fido2.log target fido2LogTarget: "STDOUT" # -- fido2.log level fido2LogLevel: "INFO" # -- fido2_persistence.log target persistenceLogTarget: "FILE" # -- fido2_persistence.log level persistenceLogLevel: "INFO" scim: # -- Boolean flag to enable/disable the SCIM chart. enabled: false # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- scim.log target scimLogTarget: "STDOUT" # -- scim.log level scimLogLevel: "INFO" # -- scim_persistence.log target persistenceLogTarget: "FILE" # -- scim_persistence.log level persistenceLogLevel: "INFO" # -- scim_persistence_duration.log target persistenceDurationLogTarget: "FILE" # -- scim_persistence_duration.log level persistenceDurationLogLevel: "INFO" # -- scim_script.log target scriptLogTarget: "FILE" # -- scim_script.log level scriptLogLevel: "INFO" config: # -- Boolean flag to enable/disable the configuration chart. This normally should never be false enabled: true # -- https://kubernetes.io/docs/concepts/workloads/controllers/ttlafterfinished/ jobTtlSecondsAfterFinished: 300 jackrabbit: # -- Boolean flag to enable/disable the jackrabbit chart. For more information on how it is used inside Gluu https://gluu.org/docs/gluu-server/4.2/installation-guide/install-kubernetes/#working-with-jackrabbit. If disabled oxShibboleth cannot be run. enabled: true persistence: # -- Boolean flag to enable/disable the persistence chart. enabled: true oxtrust: # -- Boolean flag to enable/disable the oxtrust chart. enabled: true # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- oxtrust.log target oxtrustLogTarget: "STDOUT" # -- oxtrust.log level oxtrustLogLevel: "INFO" # -- http_request_response.log target httpLogTarget: "FILE" # -- http_request_response.log level httpLogLevel: "INFO" # -- oxtrust_persistence.log target persistenceLogTarget: "FILE" # -- oxtrust_persistence.log level persistenceLogLevel: "INFO" # -- oxtrust_persistence_duration.log target persistenceDurationLogTarget: "FILE" # -- oxtrust_persistence_duration.log level persistenceDurationLogLevel: "INFO" # -- oxtrust_persistence_ldap_statistics.log target ldapStatsLogTarget: "FILE" # -- oxtrust_persistence_ldap_statistics.log level ldapStatsLogLevel: "INFO" # -- oxtrust_script.log target scriptLogTarget: "FILE" # -- oxtrust_script.log level scriptLogLevel: "INFO" # -- oxtrust_script.log target auditStatsLogTarget: "FILE" # -- oxtrust_audit.log level auditStatsLogLevel: "INFO" # -- cleaner log target cleanerLogTarget: "FILE" # -- cleaner log target cleanerLogLevel: "INFO" # -- velocity log level velocityLogLevel: "INFO" # -- velocity log target velocityLogTarget: "FILE" # -- cache refresh log level cacheRefreshLogLevel: "INFO" # -- cache refresh log target cacheRefreshLogTarget: "FILE" # -- cleaner log level cacheRefreshPythonLogLevel: "INFO" # -- cache refresh python log target cacheRefreshPythonLogTarget: "FILE" # -- apachehc log level apachehcLogLevel: "INFO" # -- apachehc log target apachehcLogTarget: "FILE" opendj: # -- Boolean flag to enable/disable the OpenDJ chart. enabled: true oxshibboleth: # -- Boolean flag to enable/disable the oxShibbboleth chart. enabled: false # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- idp-process.log target idpLogTarget: "STDOUT" # -- idp-process.log level idpLogLevel: "INFO" # -- idp-script.log target scriptLogTarget: "FILE" # -- idp-script.log level scriptLogLevel: "INFO" # -- idp-audit.log target auditStatsLogTarget: "FILE" # -- idp-audit.log level auditStatsLogLevel: "INFO" # -- idp-consent-audit.log target consentAuditLogTarget: "FILE" # -- idp-consent-audit.log level consentAuditLogLevel: "INFO" # -- https://github.com/GluuFederation/docker-oxshibboleth#additional-logger-configuration # The below are very noisy logs and are better left untouched ldapLogLevel: "" messagesLogLevel: "" encryptionLogLevel: "" opensamlLogLevel: "" propsLogLevel: "" httpclientLogLevel: "" springLogLevel: "" containerLogLevel: "" xmlsecLogLevel: "" oxd-server: # -- Boolean flag to enable/disable the oxd-server chart. enabled: false # -- App loggers can be configured to define where the logs will be redirected to and the level of each in which it should be displayed. # log levels are "OFF", "FATAL", "ERROR", "WARN", "INFO", "DEBUG", "TRACE" # Targets are "STDOUT" and "FILE" appLoggers: # -- oxd-server.log target oxdServerLogTarget: "STDOUT" # -- oxd-server.log level oxdServerLogLevel: "INFO" nginx-ingress: # -- Boolean flag to enable/disable the nginx-ingress definitions chart. enabled: true oxauth-key-rotation: # -- Boolean flag to enable/disable the oxauth-server-key rotation cronjob chart. enabled: false cr-rotate: # -- Boolean flag to enable/disable the cr-rotate chart. enabled: false # -- Configuration parameters for setup and initial configuration secret and config layers used by Gluu services. config: # -- Add custom normal and secret envs to the service. usrEnvs: # -- Add custom normal envs to the service. # variable1: value1 normal: {} # -- Add custom secret envs to the service. # variable1: value1 secret: {} # -- Organization name. Used for certificate creation. orgName: Gluu # -- Email address of the administrator usually. Used for certificate creation. email: support@gluu.com # -- Admin password to log in to the UI. adminPass: P@ssw0rd # -- LDAP admin password if OpenDJ is used for persistence. ldapPass: P@ssw0rd # -- Redis admin password if `config.configmap.gluuCacheType` is set to `REDIS`. redisPass: P@assw0rd # -- Country code. Used for certificate creation. countryCode: US # -- State code. Used for certificate creation. state: TX # -- City. Used for certificate creation. city: Austin # -- Salt. Used for encoding/decoding sensitive data. If omitted or set to empty string, the value will be self-generated. Otherwise, a 24 alphanumeric characters are allowed as its value. salt: "" configmap: # -- SQL database dialect. `mysql` or `pgsql` cnSqlDbDialect: mysql # -- SQL database host uri. cnSqlDbHost: my-release-mysql.default.svc.cluster.local # -- SQL database port. cnSqlDbPort: 3306 # -- SQL database name. cnSqlDbName: gluu # -- SQL database username. cnSqlDbUser: gluu # -- SQL database timezone. cnSqlDbTimezone: UTC # -- SQL password file holding password from config.configmap.cnSqldbUserPassword . cnSqlPasswordFile: /etc/gluu/conf/sql_password # -- SQL password injected as config.configmap.cnSqlPasswordFile . cnSqldbUserPassword: Test1234# # -- OXD server OAuth client application certificate common name. This should be left to the default value client-api. gluuOxdApplicationCertCn: oxd-server # -- OXD serve OAuth client admin certificate common name. This should be left to the default value client-api . gluuOxdAdminCertCn: oxd-server # -- Couchbase certificate authority string. This must be encoded using base64. This can also be found in your couchbase UI Security > Root Certificate. In mTLS setups this is not required. gluuCouchbaseCrt: LS0tLS1CRUdJTiBDRVJUSUZJQ0FURS0tLS0tCk1JSURlakNDQW1LZ0F3SUJBZ0lKQUwyem5UWlREUHFNTUEwR0NTcUdTSWIzRFFFQkN3VUFNQzB4S3pBcEJnTlYKQkFNTUlpb3VZMkpuYkhWMUxtUmxabUYxYkhRdWMzWmpMbU5zZFhOMFpYSXViRzlqWVd3d0hoY05NakF3TWpBMQpNRGt4T1RVeFdoY05NekF3TWpBeU1Ea3hPVFV4V2pBdE1Tc3dLUVlEVlFRRERDSXFMbU5pWjJ4MWRTNWtaV1poCmRXeDBMbk4yWXk1amJIVnpkR1Z5TG14dlkyRnNNSUlCSWpBTkJna3Foa2lHOXcwQkFRRUZBQU9DQVE4QU1JSUIKQ2dLQ0FRRUFycmQ5T3lvSnRsVzhnNW5nWlJtL2FKWjJ2eUtubGU3dVFIUEw4Q2RJa1RNdjB0eHZhR1B5UkNQQgo3RE00RTFkLzhMaU5takdZZk41QjZjWjlRUmNCaG1VNmFyUDRKZUZ3c0x0cTFGT3MxaDlmWGo3d3NzcTYrYmlkCjV6Umw3UEE0YmdvOXVkUVRzU1UrWDJUUVRDc0dxVVVPWExrZ3NCMjI0RDNsdkFCbmZOeHcvYnFQa2ZCQTFxVzYKVXpxellMdHN6WE5GY0dQMFhtU3c4WjJuaFhhUGlva2pPT2dyMkMrbVFZK0htQ2xGUWRpd2g2ZjBYR0V0STMrKwoyMStTejdXRkF6RlFBVUp2MHIvZnk4TDRXZzh1YysvalgwTGQrc2NoQTlNQjh3YmJORUp2ZjNMOGZ5QjZ0cTd2CjF4b0FnL0g0S1dJaHdqSEN0dFVnWU1oU0xWV3UrUUlEQVFBQm80R2NNSUdaTUIwR0ExVWREZ1FXQkJTWmQxWU0KVGNIRVZjSENNUmp6ejczZitEVmxxREJkQmdOVkhTTUVWakJVZ0JTWmQxWU1UY0hFVmNIQ01Sanp6NzNmK0RWbApxS0V4cEM4d0xURXJNQ2tHQTFVRUF3d2lLaTVqWW1kc2RYVXVaR1ZtWVhWc2RDNXpkbU11WTJ4MWMzUmxjaTVzCmIyTmhiSUlKQUwyem5UWlREUHFNTUF3R0ExVWRFd1FGTUFNQkFmOHdDd1lEVlIwUEJBUURBZ0VHTUEwR0NTcUcKU0liM0RRRUJDd1VBQTRJQkFRQk9meTVWSHlKZCtWUTBXaUQ1aSs2cmhidGNpSmtFN0YwWVVVZnJ6UFN2YWVFWQp2NElVWStWOC9UNnE4Mk9vVWU1eCtvS2dzbFBsL01nZEg2SW9CRnVtaUFqek14RTdUYUhHcXJ5dk13Qk5IKzB5CnhadG9mSnFXQzhGeUlwTVFHTEs0RVBGd3VHRlJnazZMRGR2ZEN5NVdxWW1MQWdBZVh5VWNaNnlHYkdMTjRPUDUKZTFiaEFiLzRXWXRxRHVydFJrWjNEejlZcis4VWNCVTRLT005OHBZN05aaXFmKzlCZVkvOEhZaVQ2Q0RRWWgyTgoyK0VWRFBHcFE4UkVsRThhN1ZLL29MemlOaXFyRjllNDV1OU1KdjM1ZktmNUJjK2FKdWduTGcwaUZUYmNaT1prCkpuYkUvUENIUDZFWmxLaEFiZUdnendtS1dDbTZTL3g0TklRK2JtMmoKLS0tLS1FTkQgQ0VSVElGSUNBVEUtLS0tLQo= # -- Couchbase password for the restricted user config.configmap.gluuCouchbaseUser that is often used inside the services. The password must contain one digit, one uppercase letter, one lower case letter and one symbol . gluuCouchbasePass: P@ssw0rd # -- Couchbase password for the super user config.configmap.gluuCouchbaseSuperUser that is used during the initialization process. The password must contain one digit, one uppercase letter, one lower case letter and one symbol gluuCouchbaseSuperUserPass: P@ssw0rd # -- The Couchbase super user (admin) user name. This user is used during initialization only. gluuCouchbaseSuperUser: admin # -- Couchbase URL. Used only when global.gluuPersistenceType is hybrid or couchbase. This should be in FQDN format for either remote or local Couchbase clusters. The address can be an internal address inside the kubernetes cluster gluuCouchbaseUrl: cbgluu.default.svc.cluster.local # -- The prefix of couchbase buckets. This helps with separation in between different environments and allows for the same couchbase cluster to be used by different setups of Gluu. gluuCouchbaseBucketPrefix: gluu # -- Couchbase restricted user. Used only when global.gluuPersistenceType is hybrid or couchbase. gluuCouchbaseUser: gluu # -- The number of replicas per index created. Please note that the number of index nodes must be one greater than the number of index replicas. That means if your couchbase cluster only has 2 index nodes you cannot place the number of replicas to be higher than 1. gluuCouchbaseIndexNumReplica: 0 # -- The location of the Couchbase restricted user config.configmap.gluuCouchbaseUser password. The file path must end with couchbase_password gluuCouchbasePassFile: /etc/gluu/conf/couchbase_password # -- The location of the Couchbase restricted user config.configmap.gluuCouchbaseSuperUser password. The file path must end with couchbase_superuser_password. gluuCouchbaseSuperUserPassFile: /etc/gluu/conf/couchbase_superuser_password # -- Location of `couchbase.crt` used by Couchbase SDK for tls termination. The file path must end with couchbase.crt. In mTLS setups this is not required. gluuCouchbaseCertFile: /etc/certs/couchbase.crt # -- Specify data that should be saved in LDAP (one of default, user, cache, site, token, or session; default to default). Note this environment only takes effect when `global.gluuPersistenceType` is set to `hybrid`. gluuPersistenceLdapMapping: default # -- Cache type. `NATIVE_PERSISTENCE`, `REDIS`. or `IN_MEMORY`. Defaults to `NATIVE_PERSISTENCE` . gluuCacheType: NATIVE_PERSISTENCE # -- Activate manual Shib files sync - depreciated gluuSyncShibManifests: false # -- Activate manual Casa files sync - depreciated gluuSyncCasaManifests: false # -- Value passed to Java option -XX:MaxRAMPercentage gluuMaxRamPercent: "75.0" containerMetadataName: kubernetes # -- Redis URL and port number :. Can be used when `config.configmap.gluuCacheType` is set to `REDIS`. gluuRedisUrl: redis:6379 # -- Boolean to use SSL in Redis. Can be used when `config.configmap.gluuCacheType` is set to `REDIS`. gluuRedisUseSsl: "false" # -- Redis service type. `STANDALONE` or `CLUSTER`. Can be used when `config.configmap.gluuCacheType` is set to `REDIS`. gluuRedisType: STANDALONE # -- Redis SSL truststore. Optional. Can be used when `config.configmap.gluuCacheType` is set to `REDIS`. gluuRedisSslTruststore: "" # -- Redis Sentinel Group. Often set when `config.configmap.gluuRedisType` is set to `SENTINEL`. Can be used when `config.configmap.gluuCacheType` is set to `REDIS`. gluuRedisSentinelGroup: "" # -- Whether to generate oxShibboleth configuration or not (default to true). gluuOxtrustConfigGeneration: true # -- oxTrust internal address. Leave as default. gluuOxtrustBackend: oxtrust:8080 # -- oxAuth internal address. Leave as default. gluuOxauthBackend: oxauth:8080 # -- OXD server Oauth client address. This should be left intact in kubernetes as it uses the internal address format. gluuOxdServerUrl: oxd-server:8443 # -- OXD server bind address. This limits what ip ranges can access the client-api. This should be left as * and controlled by a NetworkPolicy gluuOxdBindIpAddresses: "*" # -- OpenDJ internal address. Leave as default. Used when `global.gluuPersistenceType` is set to `ldap`. gluuLdapUrl: opendj:1636 # -- Jackrabbit Postgres uid gluuJackrabbitPostgresUser: jackrabbit # -- The location of the Jackrabbit postgres password file jackrabbit.secrets.gluuJackrabbitPostgresPassword. The file path must end with postgres_password. gluuJackrabbitPostgresPasswordFile: /etc/gluu/conf/postgres_password # -- Jackrabbit postgres database name. gluuJackrabbitPostgresDatabaseName: jackrabbit # -- Postgres url gluuJackrabbitPostgresHost: postgresql.postgres.svc.cluster.local # -- Jackrabbit Postgres port gluuJackrabbitPostgresPort: 5432 # -- Jackrabbit admin uid. gluuJackrabbitAdminId: admin # -- The location of the Jackrabbit admin password jackrabbit.secrets.gluuJackrabbitAdminPassword. The file path must end with jackrabbit_admin_password. gluuJackrabbitAdminPassFile: /etc/gluu/conf/jackrabbit_admin_password # -- Interval between files sync (default to 300 seconds). gluuJackrabbitSyncInterval: 300 # -- Jackrabbit internal url. Normally left as default. gluuJackrabbitUrl: http://jackrabbit:8080 # -- The location of the Jackrabbit admin uid config.gluuJackrabbitAdminId. The file path must end with jackrabbit_admin_id. gluuJackrabbitAdminIdFile: /etc/gluu/conf/jackrabbit_admin_id # -- Document store type to use for shibboleth files JCA or LOCAL. Note that if JCA is selected Apache Jackrabbit will be used. Jackrabbit also enables loading custom files across all services easily. gluuDocumentStoreType: JCA # [google_envs] Envs related to using Google # -- Service account with roles roles/secretmanager.admin base64 encoded string. This is used often inside the services to reach the configuration layer. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnGoogleServiceAccount: SWFtTm90YVNlcnZpY2VBY2NvdW50Q2hhbmdlTWV0b09uZQo= # -- Project id of the google project the secret manager and/or spanner instance belongs to. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnGoogleProjectId: google-project-to-save-config-and-secrets-to # -- Google Spanner ID. Used only when global.gluuPersistenceType is spanner. cnGoogleSpannerInstanceId: "" # -- Google Spanner Database ID. Used only when global.gluuPersistenceType is spanner. cnGoogleSpannerDatabaseId: "" # -- Google Spanner Emulator Host. Used only when global.gluuPersistenceType is spanner and during testing if needed. cnGoogleSpannerEmulatorHost: "" # [google_spanner_envs] END # [google_secret_manager_envs] Envs related to using Google Secret Manager to store config and secret layer # -- Secret version to be used for secret configuration. Defaults to latest and should normally always stay that way. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnSecretGoogleSecretVersionId: "latest" # -- Prefix for Gluu secret in Google Secret Manager. Defaults to gluu. If left gluu-secret secret will be created. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnSecretGoogleSecretNamePrefix: gluu # -- Passphrase for Gluu secret in Google Secret Manager. This is used for encrypting and decrypting data from the Google Secret Manager. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnGoogleSecretManagerPassPhrase: Test1234# # -- Secret version to be used for configuration. Defaults to latest and should normally always stay that way. Used only when global.configAdapterName and global.configSecretAdapter is set to google. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnConfigGoogleSecretVersionId: "latest" # -- Prefix for Gluu configuration secret in Google Secret Manager. Defaults to gluu. If left intact gluu-configuration secret will be created. Used only when global.configAdapterName and global.configSecretAdapter is set to google. cnConfigGoogleSecretNamePrefix: gluu # [google_secret_manager_envs] END # [google_envs] END # -- Loadbalancer address for AWS if the FQDN is not registered. lbAddr: "" # -- Enable oxTrust API gluuOxtrustApiEnabled: false # -- Enable oxTrust API testmode gluuOxtrustApiTestMode: false # -- SCIM protection mode OAUTH|TEST|UMA gluuScimProtectionMode: "OAUTH" # -- Boolean flag to enable/disable passport chart gluuPassportEnabled: false # -- TEMP KEY TO BE REMOVED IN 4.4 which allows passport failure redirect url to be specified. gluuPassportFailureRedirectUrl: "" # -- Enable Casa flag . gluuCasaEnabled: false # -- Enable SAML-related features; UI menu, etc. gluuSamlEnabled: false image: # -- Image to use for deploying. repository: gluufederation/config-init # -- Image tag to use for deploying. tag: 4.4.2-6 # -- Image Pull Secrets pullSecrets: [ ] # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} # -- CE to CN Migration section migration: # -- Boolean flag to enable migration from CE enabled: false # -- Directory holding all migration files migrationDir: /ce-migration # -- migration data-format depending on persistence backend. # Supported data formats are ldif, couchbase+json, spanner+avro, postgresql+json, and mysql+json. migrationDataFormat: ldif # -- Resource specs. resources: limits: # -- CPU limit. cpu: 300m # -- Memory limit. memory: 300Mi requests: # -- CPU request. cpu: 300m # -- Memory request. memory: 300Mi # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Nginx ingress definitions chart nginx-ingress: certManager: # Enable deploying a certificate that uses dns01 challenge instead of passing an annotation nginx-ingress.ingress.additionalAnnotations for nginx http01 challenge. certificate: enabled: false issuerKind: ClusterIssuer # Issuer name which you will create manually. Can be letsencrypt-production. issuerName: "" issuerGroup: cert-manager.io ingress: enabled: true # -- Enable use of legacy API version networking.k8s.io/v1beta1 to support kubernetes 1.18. This flag should be removed next version release along with nginx-ingress/templates/ingress-legacy.yaml. legacy: false path: / # -- Enable Admin UI endpoints /identity adminUiEnabled: true # -- Admin UI ingress resource labels. key app is taken. adminUiLabels: { } # -- Admin UI ingress resource additional annotations. adminUiAdditionalAnnotations: { } # -- Enable endpoint /.well-known/openid-configuration openidConfigEnabled: true # -- openid-configuration ingress resource labels. key app is taken openidConfigLabels: { } # -- openid-configuration ingress resource additional annotations. openidAdditionalAnnotations: { } # -- Enable endpoint /device-code deviceCodeEnabled: true # -- device-code ingress resource labels. key app is taken deviceCodeLabels: { } # -- device-code ingress resource additional annotations. deviceCodeAdditionalAnnotations: { } # -- Enable endpoint /firebase-messaging-sw.js firebaseMessagingEnabled: true # -- Firebase Messaging ingress resource labels. key app is taken firebaseMessagingLabels: { } # -- Firebase Messaging ingress resource additional annotations. firebaseMessagingAdditionalAnnotations: { } # -- Enable endpoint /.well-known/uma2-configuration uma2ConfigEnabled: true # -- uma 2 config ingress resource labels. key app is taken uma2ConfigLabels: { } # -- uma2 config ingress resource additional annotations. uma2AdditionalAnnotations: { } # -- Enable endpoint /.well-known/webfinger webfingerEnabled: true # -- webfinger ingress resource labels. key app is taken webfingerLabels: { } # -- webfinger ingress resource additional annotations. webfingerAdditionalAnnotations: { } # -- Enable endpoint /.well-known/simple-web-discovery webdiscoveryEnabled: true # -- webdiscovery ingress resource labels. key app is taken webdiscoveryLabels: { } # -- webdiscovery ingress resource additional annotations. webdiscoveryAdditionalAnnotations: { } # -- Enable endpoint /.well-known/scim-configuration scimConfigEnabled: false # -- webdiscovery ingress resource labels. key app is taken scimConfigLabels: { } # -- SCIM config ingress resource additional annotations. scimConfigAdditionalAnnotations: { } # -- Enable SCIM endpoints /scim scimEnabled: false # -- scim config ingress resource labels. key app is taken scimLabels: { } # -- SCIM ingress resource additional annotations. scimAdditionalAnnotations: { } # -- Enable endpoint /.well-known/fido-configuration u2fConfigEnabled: true # -- u2f config ingress resource labels. key app is taken u2fConfigLabels: { } # -- u2f config ingress resource additional annotations. u2fAdditionalAnnotations: { } # -- Enable endpoint /.well-known/fido2-configuration fido2ConfigEnabled: false # -- fido2 config ingress resource labels. key app is taken fido2ConfigLabels: { } # -- fido2 config ingress resource additional annotations. fido2ConfigAdditionalAnnotations: { } # -- Enable all fido2 endpoints fido2Enabled: false # -- fido2 ingress resource labels. key app is taken fido2Labels: { } # -- Enable Auth server endpoints /oxauth authServerEnabled: true # -- Auth server config ingress resource labels. key app is taken authServerLabels: { } # -- Auth server ingress resource additional annotations. authServerAdditionalAnnotations: { } # -- Enable casa endpoints /casa casaEnabled: false # -- Casa ingress resource labels. key app is taken casaLabels: { } # -- Casa ingress resource additional annotations. casaAdditionalAnnotations: { } # -- Enable passport endpoints /idp passportEnabled: false # -- passport ingress resource labels. key app is taken. passportLabels: { } # -- passport ingress resource additional annotations. passportAdditionalAnnotations: { } # -- Enable shibboleth endpoints /idp shibEnabled: false # -- shibboleth ingress resource labels. key app is taken. shibLabels: { } # -- shibboleth ingress resource additional annotations. shibAdditionalAnnotations: { } # -- Additional labels that will be added across all ingress definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all ingress definitions in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken # Enable client certificate authentication # nginx.ingress.kubernetes.io/auth-tls-verify-client: "optional" # Create the secret containing the trusted ca certificates # nginx.ingress.kubernetes.io/auth-tls-secret: "gluu/tls-certificate" # Specify the verification depth in the client certificates chain # nginx.ingress.kubernetes.io/auth-tls-verify-depth: "1" # Specify if certificates are passed to upstream server # nginx.ingress.kubernetes.io/auth-tls-pass-certificate-to-upstream: "true" additionalAnnotations: # -- Required annotation below. Use kubernetes.io/ingress.class: "public" for microk8s. kubernetes.io/ingress.class: "nginx" hosts: - demoexample.gluu.org tls: - secretName: tls-certificate # DON'T change hosts: - demoexample.gluu.org # -- Jackrabbit Oak is a complementary implementation of the JCR specification. It is an effort to implement a scalable and performant hierarchical content repository for use as the foundation of modern world-class web sites and other demanding content applications # https://jackrabbit.apache.org/jcr/index.html jackrabbit: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: 1 # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/jackrabbit # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 1500m # -- Memory limit. memory: 1000Mi requests: # -- CPU request. cpu: 1500m # -- Memory request. memory: 1000Mi secrets: # -- Jackrabbit admin uid password gluuJackrabbitAdminPass: Test1234# # -- Jackrabbit Postgres uid password gluuJackrabbitPostgresPass: P@ssw0rd service: # -- Name of the Jackrabbit service. Please keep it as default. jackRabbitServiceName: jackrabbit # -- The name of the jackrabbit port within the jackrabbit service. Please keep it as default. name: http-jackrabbit # -- Port of the jackrabbit service. Please keep it as default. port: 8080 # -- This id needs to be unique to each kubernetes cluster in a multi cluster setup # west, east, south, north, region ...etc If left empty it will be randomly generated. clusterId: "" storage: # -- Jackrabbit volume size size: 5Gi # -- Configure the liveness healthcheck for the Jackrabbit if needed. livenessProbe: # -- Executes tcp healthcheck. tcpSocket: port: http-jackrabbit initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the Jackrabbit if needed. readinessProbe: # -- Executes tcp healthcheck. tcpSocket: port: http-jackrabbit initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across the gateway in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across the gateway in the format of {cert-manager.io/issuer: "letsencrypt-prod"} additionalAnnotations: { } # -- OpenDJ is a directory server which implements a wide range of Lightweight Directory Access Protocol and related standards, including full compliance with LDAPv3 but also support for Directory Service Markup Language (DSMLv2).Written in Java, OpenDJ offers multi-master replication, access control, and many extensions. opendj: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: 1 # -- Configure ldap backup cronjob backup: enabled: true cronJobSchedule: "*/59 * * * *" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/opendj # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] persistence: # -- OpenDJ volume size size: 5Gi # -- servicePorts values used in StatefulSet container ports: tcp-admin: nodePort: "" port: 4444 protocol: TCP targetPort: 4444 tcp-ldap: nodePort: "" port: 1389 protocol: TCP targetPort: 1389 tcp-ldaps: nodePort: "" port: 1636 protocol: TCP targetPort: 1636 tcp-repl: nodePort: "" port: 8989 protocol: TCP targetPort: 8989 tcp-serf: nodePort: "" port: 7946 protocol: TCP targetPort: 7946 udp-serf: nodePort: "" port: 7946 protocol: UDP targetPort: 7946 # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 1500m # -- Memory limit. memory: 2000Mi requests: # -- CPU request. cpu: 1500m # -- Memory request. memory: 2000Mi # -- Configure the liveness healthcheck for OpenDJ if needed. # https://github.com/GluuFederation/docker-opendj/blob/4.4/scripts/healthcheck.py livenessProbe: # -- Executes the python3 healthcheck. exec: command: - python3 - /app/scripts/healthcheck.py initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 failureThreshold: 20 # -- Configure the readiness healthcheck for OpenDJ if needed. # https://github.com/GluuFederation/docker-opendj/blob/4.4/scripts/healthcheck.py readinessProbe: tcpSocket: port: 1636 initialDelaySeconds: 60 timeoutSeconds: 5 periodSeconds: 25 failureThreshold: 20 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Job to generate data and initial config for Gluu Server persistence layer. persistence: # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/persistence # -- Image tag to use for deploying. tag: 4.4.2-2 # -- Image Pull Secrets pullSecrets: [ ] # -- Resource specs. resources: limits: # -- CPU limit cpu: 300m # -- Memory limit. memory: 300Mi requests: # -- CPU request. cpu: 300m # -- Memory request. memory: 300Mi # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- OAuth Authorization Server, the OpenID Connect Provider, the UMA Authorization Server--this is the main Internet facing component of Gluu. It's the service that returns tokens, JWT's and identity assertions. This service must be Internet facing. oxauth: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/oxauth # -- Image tag to use for deploying. tag: 4.4.2-4 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 2500m # -- Memory limit. memory: 2500Mi requests: # -- CPU request. cpu: 2500m # -- Memory request. memory: 2500Mi service: # -- Name of the oxauth service. Please keep it as default. oxAuthServiceName: oxauth # -- The name of the oxauth port within the oxauth service. Please keep it as default. name: http-oxauth # -- Port of the oxauth service. Please keep it as default. port: 8080 # -- Configure the liveness healthcheck for the auth server if needed. livenessProbe: # -- Executes the python3 healthcheck. # https://github.com/GluuFederation/docker-oxauth/blob/4.4/scripts/healthcheck.py exec: command: - python3 - /app/scripts/healthcheck.py initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the auth server if needed. # https://github.com/GluuFederation/docker-oxauth/blob/4.4/scripts/healthcheck.py readinessProbe: exec: command: - python3 - /app/scripts/healthcheck.py initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Gluu Admin UI. This shouldn't be internet facing. oxtrust: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: 1 # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/oxtrust # -- Image tag to use for deploying. tag: 4.4.2-2 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 2500m # -- Memory limit. memory: 2500Mi requests: # -- CPU request. cpu: 2500m # -- Memory request. memory: 2500Mi service: # -- The name of the oxtrust port within the oxtrust service. Please keep it as default. name: http-oxtrust # -- Port of the oxtrust service. Please keep it as default. port: 8080 clusterIp: None # -- Name of the oxtrust service. Please keep it as default. oxTrustServiceName: oxtrust # -- Configure the liveness healthcheck for the auth server if needed. livenessProbe: # -- Executes the python3 healthcheck. # https://github.com/GluuFederation/docker-oxauth/blob/4.4/scripts/healthcheck.py exec: command: - python3 - /app/scripts/healthcheck.py initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the auth server if needed. # https://github.com/GluuFederation/docker-oxauth/blob/4.4/scripts/healthcheck.py readinessProbe: exec: command: - python3 - /app/scripts/healthcheck.py initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- FIDO 2.0 (FIDO2) is an open authentication standard that enables leveraging common devices to authenticate to online services in both mobile and desktop environments. fido2: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/fido2 # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 500m # -- Memory limit. memory: 500Mi requests: # -- CPU request. cpu: 500m # -- Memory request. memory: 500Mi service: # -- Name of the fido2 service. Please keep it as default. fido2ServiceName: fido2 # -- The name of the fido2 port within the fido2 service. Please keep it as default. name: http-fido2 # -- Port of the fido2 service. Please keep it as default. port: 8080 # -- Configure the liveness healthcheck for the fido2 if needed. livenessProbe: # -- http liveness probe endpoint httpGet: path: /fido2/restv1/fido2/configuration port: http-fido2 initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the fido2 if needed. readinessProbe: httpGet: path: /fido2/restv1/fido2/configuration port: http-fido2 initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- System for Cross-domain Identity Management (SCIM) version 2.0 scim: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/scim # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 resources: limits: # -- CPU limit. cpu: 1000m # -- Memory limit. memory: 1000Mi requests: # -- CPU request. cpu: 1000m # -- Memory request. memory: 1000Mi service: # -- Name of the scim service. Please keep it as default. scimServiceName: scim # -- The name of the scim port within the scim service. Please keep it as default. name: http-scim # -- Port of the scim service. Please keep it as default. port: 8080 # -- Configure the liveness healthcheck for SCIM if needed. livenessProbe: httpGet: # -- http liveness probe endpoint path: /scim/restv1/scim/v2/ServiceProviderConfig port: 8080 initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the SCIM if needed. readinessProbe: httpGet: # -- http readiness probe endpoint path: /scim/restv1/scim/v2/ServiceProviderConfig port: 8080 initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Middleware API to help application developers call an OAuth, OpenID or UMA server. You may wonder why this is necessary. It makes it easier for client developers to use OpenID signing and encryption features, without becoming crypto experts. This API provides some high level endpoints to do some of the heavy lifting. oxd-server: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/oxd-server # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 1000m # -- Memory limit. memory: 400Mi requests: # -- CPU request. cpu: 1000m # -- Memory request. memory: 400Mi service: # -- Name of the OXD server service. This must match config.configMap.gluuOxdApplicationCertCn. Please keep it as default. oxdServerServiceName: oxd-server # -- Configure the liveness healthcheck for the auth server if needed. livenessProbe: # -- Executes the python3 healthcheck. exec: command: - curl - -k - https://localhost:8443/health-check initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the auth server if needed. readinessProbe: exec: command: - curl - -k - https://localhost:8443/health-check initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Gluu Casa ("Casa") is a self-service web portal for end-users to manage authentication and authorization preferences for their account in a Gluu Server. casa: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/casa # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 500m # -- Memory limit. memory: 500Mi requests: # -- CPU request. cpu: 500m # -- Memory request. memory: 500Mi service: # -- Name of the casa service. Please keep it as default. casaServiceName: casa # -- Port of the casa service. Please keep it as default. port: 8080 # -- The name of the casa port within the casa service. Please keep it as default. name: http-casa # -- Configure the liveness healthcheck for casa if needed. livenessProbe: httpGet: # -- http liveness probe endpoint path: /casa/health-check port: http-casa initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the casa if needed. readinessProbe: httpGet: # -- http readiness probe endpoint path: /casa/health-check port: http-casa initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Gluu interface to Passport.js to support social login and inbound identity. oxpassport: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: "90%" # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/oxpassport # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 700m # -- Memory limit. memory: 900Mi requests: # -- CPU request. cpu: 700m # -- Memory request. memory: 900Mi service: # -- Name of the oxPassport service. Please keep it as default. oxPassportServiceName: oxpassport # -- Port of the oxPassport service. Please keep it as default. port: 8090 # -- The name of the oxPassport port within the oxPassport service. Please keep it as default. name: http-passport # -- Configure the liveness healthcheck for oxPassport if needed. livenessProbe: httpGet: # -- http liveness probe endpoint path: /passport/health-check port: http-passport initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 failureThreshold: 20 # -- Configure the readiness healthcheck for the oxPassport if needed. readinessProbe: httpGet: # -- http readiness probe endpoint path: /passport/health-check port: http-passport initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 failureThreshold: 20 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Shibboleth project for the Gluu Server's SAML IDP functionality. oxshibboleth: # -- Configure the topology spread constraints. Notice this is a map NOT a list as in the upstream API # https://kubernetes.io/docs/concepts/scheduling-eviction/topology-spread-constraints/ topologySpreadConstraints: {} # -- Define below as many constraints as needed. The key name should follow the structure tsc1, tsc2...etc. # Do not enter the key labelSelector in the entry/entries below as that is automatically injected by the chart #tsc1: # maxSkew: 1 # minDomains: 1 # optional; beta since v1.25 # topologyKey: kubernetes.io/hostname # whenUnsatisfiable: DoNotSchedule # matchLabelKeys: [] # optional; alpha since v1.25 # nodeAffinityPolicy: [] # optional; alpha since v1.25 # nodeTaintsPolicy: [] # optional; alpha since v1.25 #tsc2: #maxSkew: 1 # -- Configure the PodDisruptionBudget pdb: enabled: true maxUnavailable: 1 # -- Configure the HorizontalPodAutoscaler hpa: enabled: true minReplicas: 1 maxReplicas: 10 targetCPUUtilizationPercentage: 50 # -- metrics if targetCPUUtilizationPercentage is not set metrics: [] # -- Scaling Policies behavior: {} # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/oxshibboleth # -- Image tag to use for deploying. tag: 4.4.2-2 # -- Image Pull Secrets pullSecrets: [ ] # -- Service replica number. replicas: 1 # -- Resource specs. resources: limits: # -- CPU limit. cpu: 1000m # -- Memory limit. memory: 1000Mi requests: # -- CPU request. cpu: 1000m # -- Memory request. memory: 1000Mi service: # -- The name of the oxPassport port within the oxPassport service. Please keep it as default. port: 8080 # -- Name of the oxShibboleth service. Please keep it as default. oxShibbolethServiceName: oxshibboleth # -- Port of the oxShibboleth service. Please keep it as default. name: http-oxshib # -- Configure the liveness healthcheck for the oxShibboleth if needed. livenessProbe: httpGet: # -- http liveness probe endpoint path: /idp port: http-oxshib initialDelaySeconds: 30 periodSeconds: 30 timeoutSeconds: 5 # -- Configure the readiness healthcheck for the casa if needed. readinessProbe: httpGet: # -- http liveness probe endpoint path: /idp port: http-oxshib initialDelaySeconds: 25 periodSeconds: 25 timeoutSeconds: 5 # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- CacheRefreshRotation is a special container to monitor cache refresh on oxTrust containers. This may be depreciated. cr-rotate: # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/cr-rotate # -- Image tag to use for deploying. tag: 4.4.2-1 # -- Image Pull Secrets pullSecrets: [ ] # -- Resource specs. resources: limits: # -- CPU limit. cpu: 200m # -- Memory limit. memory: 200Mi requests: # -- CPU request. cpu: 200m # -- Memory request. memory: 200Mi service: # -- Name of the cr-rotate service. Please keep it as default. crRotateServiceName: cr-rotate # -- Port of the casa service. Please keep it as default. port: 8084 # -- The name of the cr-rotate port within the cr-rotate service. Please keep it as default. name: http-cr-rotate # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { } # -- Responsible for regenerating auth-keys per x hours oxauth-key-rotation: # -- Add custom normal and secret envs to the service usrEnvs: # -- Add custom normal envs to the service # variable1: value1 normal: {} # -- Add custom secret envs to the service # variable1: value1 secret: {} # -- Add custom preStop # https://github.com/GluuFederation/cloud-native-edition/discussions/516 preStop: {} # -- Add custom dns policy dnsPolicy: "" # -- Add custom dns config dnsConfig: {} image: # -- Image pullPolicy to use for deploying. pullPolicy: IfNotPresent # -- Image to use for deploying. repository: gluufederation/certmanager # -- Image tag to use for deploying. tag: 4.4.2-2 # -- Image Pull Secrets pullSecrets: [ ] # -- Auth server key rotation keys life in hours keysLife: 48 # -- Set key selection strategy used by Auth server keysStrategy: NEWER # -- Delay (in seconds) before pushing private keys to Auth server keysPushDelay: 0 # -- Set key selection strategy after pushing private keys to Auth server (only takes effect when keysPushDelay value is greater than 0) keysPushStrategy: NEWER # -- Resource specs. resources: limits: # -- CPU limit. cpu: 300m # -- Memory limit. memory: 300Mi requests: # -- CPU request. cpu: 300m # -- Memory request. memory: 300Mi # -- Configure any additional volumes that need to be attached to the pod volumes: [] # -- Configure any additional volumesMounts that need to be attached to the containers volumeMounts: [] # -- Additional labels that will be added across all resources definitions in the format of {mylabel: "myapp"} additionalLabels: { } # -- Additional annotations that will be added across all resources in the format of {cert-manager.io/issuer: "letsencrypt-prod"}. key app is taken additionalAnnotations: { }