##################################################### # OpsMx AutoPilot and Integrated Spinnaker Delivery # This file contains configurations for AP as well as Spinnaker ##################################################### ## Info related to registry where OES images are stored imageCredentials: repoUrl: https://quay.io/ # Update this is using a private repo such as ACR, ECR, GCR, JFrog, etc. registry: quay.io/opsmxpublic #################################################### ## Option to skip installation of OpsMx Spinnaker ## Setting this to true, will ensure custom ubi8 images of Spinnaker installSpinnaker: true #################################################### ## Redis configuration ## Set it to false only if OES needs to be configured with external redis installRedis: true #################################################### # kubernetes services can be exposed to a web-UI based 3 modes # ClusterIP (will required Ingress or some other mechanism) # or LoadBalancer or NodePort # OES UI, OES Gate, Spinnaker Deck and Spinnaker Gate service type k8sServiceType: ClusterIP #extraEnvVars: # - name: JAVA_OPTS # value: -Dhttps.proxyHost=proxy.server.local -Dhttps.proxyPort=3128 # Global variables can be accessed across all the charts including sub-charts global: # Custom Images registry where all the OSS and customized images used in the helm chart are stored # Only update this is using a private repo such as ACR, ECR, GCR, JFrog, etc. customImages: registry: quay.io/opsmxpublic # when this flag is set to false; UI will be accessible over http instead of https ssl: enabled: false # If cert-manager is installed, TLS secrets will created automatically, as an 'Issuer' will be created # Else, the TLS secrets will need to created manually certManager: installed: false customCerts: enabled: false # Set to true if your organization requires custom TLS certs secretName: oes-cacerts # Please do not change this # Common gate for both spin and oes services. Change ONLY if required commonGate: enabled: true spinnakerRBAC: false # Set to true if RBAC is enabled in Spinnaker spinnakerKayanta: false ## Set to true to expose oes-ui, oes-gate services over ingress createIngress: false ## Node Selection rules for all the OES components (db, Autopilot, Sapor, Dashboard, Visibility, Audit, Datascience, Platform, Ui, Forwarder, Gate...) nodeSelector: {} affinity: {} tolerations: [] ## Set to false only if user uses gitops style and user dont want the repo to be created automatically in github.com githubcreationHook: true gitea: enabled: true standardrepoBranch: 3.12 admin: existingSecret: gitea-secret username: opsmx password: opsmxadmin123 email: "support@opsmx.com" # Spinnaker Deck URL configuration; url overwhich spinnaker deck will be accessed spinDeck: protocol: https host: spin.example.ops.com #port: 31464 serviceAnnotations: {} ingress: annotations: ingress.kubernetes.io/ssl-redirect: "true" kubernetes.io/ingress.class: nginx tls: secretName: spin-deck-ingress # Spinnaker Gate URL configuration; url overwhich spinnaker gate will be accessed spinGate: protocol: https host: spin-gate.example.ops.com #port: 31465 serviceAnnotations: {} ingress: annotations: ingress.kubernetes.io/ssl-redirect: "true" kubernetes.io/ingress.class: nginx tls: secretName: spin-gate-ingress ## OES-UI url configuration oesUI: protocol: http host: oes.example.ops.com # Use below port when hostname above is an external IP instead of a hostname #port: 31466 ingress: annotations: kubernetes.io/ingress.class: nginx tls: secretName: oes-ui-ingress ## OES-Gate url configuration oesGate: protocol: https host: oes-gate.example.ops.com # Use below port when hostname above is an external IP instead of a hostname #port: 31467 ingress: annotations: kubernetes.io/ingress.class: nginx tls: secretName: oes-gate-ingress ############################################################################### # A trial LDAP is installed by default, with users: admin, user1,2,3 with user1password, user2pa... # Set it to false if own LDAP is to be configured installOpenLdap: true ## ldap configuration used in oes-gate, oes-platform and spinnaker gate for authentication and authorization ldap: enabled: true url: ldap://{{ .Release.Name }}-openldap:389 managerDn: cn=admin,dc=example,dc=org managerPassword: opsmxadmin123 groupSearchBase: ou=groups,dc=example,dc=org groupSearchFilter: member={0} groupRoleAttributes: cn userDnPattern: cn={0},dc=example,dc=org ############################################################################### # Best left unchanged # Use the below flag to exclude pre-delete helm hooks to the rendered manifests preDeleteHelmHooks: true # Minio access/secret keys for the in-cluster S3 usage # Minio is not exposed publically minio: enabled: true image: repository: quay.io/opsmxpublic/minio tag: RELEASE.2020-01-03T19-12-21Z mcImage: repository: quay.io/opsmxpublic/minio-mc tag: RELEASE.2020-11-25T23-04-07Z service: type: ClusterIP accessKey: spinnakeradmin secretKey: spinnakeradmin region: us-east-1 securityContext: enabled: true runAsUser: 1000 runAsGroup: 1000 fsGroup: 1000 buckets: - name: "spinnaker" policy: none purge: false - name: "autopilot" policy: none purge: false defaultBucket: enabled: true name: "spinnaker" nodeSelector: {} affinity: {} tolerations: [] persistence: enabled: true size: 10Gi ############################################################################### ## This option enables OES to be configured automatically ## Load Balancer IPs will be automatically replaced in the ## configuration files of oes-gate, oes-ui autoConfiguration: # Set it to false if OES is being installed on restricted evnironment; # Autoconfiguration assumes Load Balancer is available for oes-gate, oes-ui # and spind-deck and configures accordingly enabled: false initContainer: # Image for init container to automatically configure oes components # during startup image: quay.io/opsmxpublic/oes-init:v4 pullPolicy: IfNotPresent # Max time(in secs) that an init container of oes-ui should wait # to fetch External Load Balancer IP of oes-gate and vice versa externalIpCheckDelay: 180 ##################################################### # Centralized Monitoring ##################################################### enableCentralMonitoring: false ############################################################################### # AutoPilot, Sapor, Platform, Visibility and DS services use a Postgress DB for backend. A DB is automatically installed # Values of OES Database db: ## Set it to false if any external database is to be used enabled: true ## Change the default configuration when above option is set to false ## Below url and credentials are used by Autopilot & Sapor url: jdbc:postgresql://oes-db:5432 username: postgres password: networks123 ## Image specific details image: repository: ubi8-oes-db tag: v3.0.0 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd serviceAnnotations: {} ## Strategy to rollout statefulset pods podManagementPolicy: OrderedReady ## Default group to which the default user of a pod belongs securityContext: fsGroup: 1000 ## storageMountSize is the size with which a PVC is to be created storageMountSize: 8Gi ## storageClass for DB persistent volume claim (PVC) #storageClassName: default ############################################################################### ## ## Values of OES Autopilot ## autopilot: ## Image specific details image: repository: ubi8-oes-autopilot tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8090" prometheus.io/scrape: "true" serviceAnnotations: {} resources: {} # requests: # memory: 2Gi # cpu: 1 # limits: # memory: 8Gi # cpu: 2 config: ## Build Analysis ## buildAnalysis: enabled: false ############################################################################### ## ## Values of OES auditservice ## audit: ## Image specific details ## image: repository: ubi8-oes-audit-service tag: v3.12.4 pullPolicy: IfNotPresent serviceAnnotations: {} annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8097" prometheus.io/scrape: "false" resources: {} config: {} ############################################################################### ## ## Values of OES auditclient ## auditClient: ## Image specific details ## image: repository: ubi8-oes-audit-client tag: v3.12.4 pullPolicy: IfNotPresent serviceAnnotations: {} annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8098" prometheus.io/scrape: "true" resources: {} config: {} ############################################################################### ## ## Values of OES datascience ## datascience: ## Image specific details ## image: repository: ubi8-oes-datascience tag: v3.12.4 pullPolicy: IfNotPresent serviceAnnotations: {} annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "5005" prometheus.io/scrape: "false" resources: {} config: {} ############################################################################### ## Values of OES Dashboard dashboard: ## Image specific details image: repository: ubi8-oes-dashboard tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8094" prometheus.io/scrape: "true" serviceAnnotations: {} resources: {} # requests: # memory: 500Mi # cpu: 500m # limits: # memory: 1000Mi # cpu: 1500m config: app: sync: enabled: true ## By default spinnakerLink is {{ .Values.global.spinDeck.protocol }}://{{ .Values.global.spinDeck.host }} ## If spinnaker is exposed on Load balancer instead of ingress, set this value to external IP of spinnaker UI #spinnakerLink: http://spinnaker.domain.com ##################################################### # AP supports an agent based deployments to remote clusters # This should NOT be set to false, and only externalName needs to be changed in most cases forwarder: externalName: controller.exampleopsmx.net # This hostname should reach agent-grpc service from the agent enabled: true agent: image: quay.io/opsmxpublic/forwarder-agent:v3.12.0 serviceType: LoadBalancer # Value is also used in sapor configuration for kubernetes.agent.serverHostName host: opsmx-controller-controller1 image: repository: quay.io/opsmxpublic/forwarder-controller tag: v3.12.0 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd serviceAnnotations: {} ############################################################################### ## ## Values of OES Gate ## gate: ## Image specific details image: repository: ubi8-gate tag: v3.12.4 pullPolicy: IfNotPresent serviceAnnotations: {} annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8084" prometheus.io/scrape: "false" resources: {} # requests: # memory: 500Mi # cpu: 500m # limits: # memory: 1500Mi # cpu: 1500m config: ## Set it to true to disable LDAP authentication and enable file based authentication ## Reach out over support@opsmx.com for pre-configured user credentials fileBasedAuthentication: enabled: false server: session: timeoutInSeconds: 7200 agentAPI: false webhooks: true ##################################################### # SAML AUthentication ##################################################### # keytool -genkey -v -keystore oessaml.jks -alias saml -keyalg RSA -keysize 2048 -validity 10000 # oessaml.jks and oesmetadata.xml are mounted as secrets, create them as follows # kubectl create secret generic oesmetadataxml --from-file=oesmetadata.xml -n # kubectl create secret generic oessamljks --from-file=oessaml.jks -n # kubectl create secret generic samljks-password --from-literal password=changeit -n saml: enabled: false userSource: gate # Groups will be obtained from SAML keyStore: /opt/spinnaker/saml/oessaml.jks # The key in this secret must be oessaml.jks keyStorePassword: changeit keyStoreAliasName: saml metadataUrl: /opt/spinnaker/saml/oesmetadata.xml # The key in this secret must be oesmetadata.xml redirectProtocol: https redirectHostname: oes-gate.ryzon7-gitops.opsmx.org # OES gate host name redirectBasePath: / issuerId: ryzonoesgate jksSecretName: oessamljks metadataSecretName: oesmetadataxml ##################################################### #OAUTH2 Authentication for GitHub ##################################################### oauth2: enabled: false client: clientId: #CLIENT_ID clientSecret: #CLIENT_SECRET_ID accessTokenUri: https://github.com/login/oauth/access_token userAuthorizationUri: https://github.com/login/oauth/authorize scope: user-email resource: userInfoUri: https://api.github.com/user userInfoMapping: email: email firstName: firstname lastName: name username: login provider: GITHUB ############################################################################### ## Values of OES Platform platform: ## Image specific details image: repository: ubi8-oes-platform tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8095" prometheus.io/scrape: "true" serviceAnnotations: {} resources: {} # requests: # memory: 500Mi # cpu: 500m # limits: # memory: 1500Mi # cpu: 1500m config: ## groups defined here will have superAdmin priviledges in AP adminGroups: admin ## Source of groups for authorization ## Supported sources:- ldap, file, gate. In general, use "gate" for SAML userSource: ldap ## List of features to be supported by OES, please do not change supportedFeatures: - deployment_verification - sapor - visibility app: dashboard: adminuser: admin # mandatory to provide admin user name to run admin jobs that compute dashboard counts ############################################################################### ## Details of rabbitmq message bus image for OES ## rabbitmq: ## rabbitmq endpoint that is used by oes-gate and oes-platform for caching; ## Change this to custom URL if installRedis is set to false ## url: rabbitmq-service ## url: default port 5672 url: rabbitmq-service port: 5672 username: rabbitmq password: Networks123 image: registry: quay.io/opsmxpublic/rabbitmq repository: 3-management annotations: moniker.spinnaker.io/application: isd serviceAnnotations: {} ############################################################################### ## Values of OES Sapor (Security Audit Policy Onboarding & Release) sapor: ## Image specific details image: repository: ubi8-oes-sapor tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8085" prometheus.io/scrape: "true" serviceAnnotations: {} resources: {} # requests: # memory: 100Mi # cpu: 500m # limits: # memory: 2000Mi # cpu: 1500m config: ## Use OSS if sapor needs to be connected to Open Source Spinnaker ## If sapor needs to be connected to ubi8 based Spinnaker Images, use OPSMX spinnakerImages: OPSMX spinnaker: ## Spinnaker configuration ## Necessary details needed by Sapor tointegrate with Spinnaker ## Set it to true if authentication is enabled in Spinnaker authnEnabled: true # This mandatory, un-authenicated Spinnaker is not supported ## LDAP #ldap: # ldapEnabled: true # ldapUsername: admin # ldapPassword: opsmxadmin123 # ldapAdminLoginEnabled: false # ldapAdminUsername: admin # ldapAdminPassword: admin ## X509 x509: enabled: false client: password: changeit #encryption key needed for sapor to startup from 3.9 encrypt: enabled: false # This key should match the encryption key specified in .hal/default/profiles/spinnakerConfig.yaml # encryption key needed for sapor to startup from 3.9 key: Q7udUkHPuA3VnNlOtksSgQ # Set the below field to true if datasource configurations from platform, please don't change datasources: platform: true sync: permission: enabled: true ############################################################################### ## Use this SAPOR GATE Configuration to Enable Basic Authentication for OES SAPOR to communitcate ## with spinnaker instead of x509 ## Values of SAPOR OES Gate ## saporgate: ## Image specific details ## enabled: true image: repository: ubi8-oes-spin-gate tag: v3.12.0-saporgate pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: "isd" serviceAnnotations: {} resources: {} # requests: # memory: 500Mi # cpu: 500m # limits: # memory: 1500Mi # cpu: 1500m config: username: admin # User name with admin permissions and belonging to admin groups defined in platform service password: saporadmin # Any generic String, need not be the real password ############################################################################### ## Values of OES UI ui: ## Image specific details image: repository: ubi8-oes-ui tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: "isd" serviceAnnotations: {} config: ## Interval, in millsecs, at which UI refreshes application dashboard setApplicationRefreshInterval: 300000 ################################################################################### ## Values of OES Visibility visibility: ## Image specific details image: repository: ubi8-oes-visibility tag: v3.12.4 pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd prometheus_io_path: /mgmt/prometheus prometheus_io_port: "8096" prometheus.io/scrape: "true" serviceAnnotations: {} resources: {} # requests: # memory: 500Mi # cpu: 500m # limits: # memory: 1000Mi # cpu: 1500m config: ## Autopilot integration options configuredConnectors: JIRA,GIT,AUTOPILOT,SONARQUBE,JENKINS,AQUAWAVE ## Logger level logLevel: ERROR ############################################################################### ## Details of redis-master image for OES redis: ## Redis endpoint that is used by oes-gate and oes-platform for caching; ## Change this to custom URL if installRedis is set to false ## url: redis://{{ .Release.Name }}-redis-master:6379 url: redis://:password@{{ .Release.Name }}-redis-master port: 6379 image: registry: quay.io/opsmxpublic repository: bitnami-redis password: password cluster: enabled: false # External Redis option will be enabled if in-cluster redis is disabled external: host: "" port: 6379 # password: "" nodeSelector: {} master: affinity: {} tolerations: [] podAnnotations: moniker.spinnaker.io/application: spin ## Redis config file ## ref: https://redis.io/topics/config ## configmap: |- # Enable AOF https://redis.io/topics/persistence#append-only-file appendonly no # Disable RDB persistence, AOF persistence already enabled. save 60 1000 # Uncomment if you don't want to create a PVC for redis # master: # persistence: # enabled: false ##################################################### # Spinnaker instance configuration ##################################################### spinnaker: autoInstallSampleApps: true # Set this to FALSE if upgrading # Initialize gitOps style Halyard gitopsHalyard: enabled: true secretName: opsmx-gitops-auth # No need to change this repo: type: git # git, s3, stash(bitbucket server) use a different sample values file for s3/bitbuck-stash configArgs: "http.sslVerify=false" ## Configure below fields only if repo type is git/stash. Skip to s3 section if type is s3 baseUrlHostName: github.com # Specify the Base FQDN of your repository without the protocol organization: OpsMx projectName: "" # Specify project name only if repo is under a project repository: standard-gitops-repo # repo name for GitOps Halyard (Sample Reference: https://github.com/OpsMx/sample-gitops-repo.git). halConfigBranch: master # Branch under which halyard config is present under above repository halConfigPath: / #relative path from repository root folder dynamicAccRepository: standard-gitops-repo # Please provide the repo name of the GitOps Dynamic Accounts Directory. Can be same as Hal repo. dynAccntConfigPath: / #relative path from repository root folder username: git/stash_username # Username to authenticate with git/stash repo token: git/stash_token # Token corresponding to above username usermailid: user@mail.com # Enter a valid email address for the above repo sshkeysecret: false sshsecretName: ssh-secret # Please create a secret with the ssh private key and the known hosts for the github/gitlab/bitbucket.com ## kubectl create secret generic ssh-secret --from-file=ssh --from-file=known_hosts -n ## please make sure the rsa key is present in file called ssh and known host for the github.com present in known_hosts file ## ssh-keyscan github.com >> ~/.ssh/known_hosts ## Configure below fields only if repo type is s3 s3accesskey: AWS_ACCESS_KEY_ID s3secretkey: AWS_SECRET_ACCESS_KEY s3bucket: bucket name.e.g-testbucket s3region: regionofbucket # Promote applications and pipelines from one environment to another or take backup pipelinePromotion: # GitHub only, Not supportd on S3 or Stash enabled: false type: git # git, s3, stash gitConfig: "git config --global http.sslVerify false" organization: project_name # Also called "project" in some repos repository: repo_name # bucket name in case of S3 rootFolder: pipeline/ ##### ONLY In case of S3 AWS_ACCESS_KEY_ID: access_key AWS_SECRET_ACCESS_KEY: secret_key ##### S3 config for pipelinePromotion is complete ##### For non-S3 repos baseUrl: example.repo.com # "git_url" username: username token: token ##SSH key based comminication, sshkey is passed in the secret after helm installation edit secret "git-token" and pass BASE64 encoded id_rsa to git_secret_sshkey ##make sure make empty value for "git_secret_token" in the secret if NOT using SSH branch: samplerepo usermail: krish@company.com #password="K438" ### Token is preferred, Password also might work, try your luck # Instead of username, token/password, sshkey can be provided #API #apiUrl: "https://api.bitbucket.org/2.0/repositories/" # bitbucket apiUrl: "https://api.github.com/repos" # guthub #apiUrl: "https://api.gitlab.com/repos" # gitLAB #apiUrl: "https://bbq.opsmx.com/api" # bitbucket-server(stash) ############################################################################################### ############################################################################################### #### The values below this line are typically not required to be changed for Spinnaker ############################################################################################### ############################################################################################### createPR: false autoApprovePR: false targetBranch: master # can be any branch to which PR to be raised approvingUser: approver_user ### user who is going to auto-merge approverToken: token ## Token for the user to auto-merge ## x509 authentication for Spinnaker Gate gatex509: enabled: false host: spingate-x509.domain.com # Max time(in secs) that an init container of halyard should wait # to fetch External Load Balancer IP of spin-deck and spin-gate spinnakerLBCheckDelay: 1 # This is only used if not using ingress mTLS: enabled: false # Enable mTLS for Spinnaker Services and SSL for Deck and Gate ############################################################################################### #### The values below this line apply mainly to non-gitOps Spinnaker, best not changed ############################################################################################### halyard: spinnakerVersion: 1.26.6 image: repository: quay.io/opsmxpublic/ubi8-spin-halyard tag: opsmx-1.40.0 # Set to false to disable persistence data volume for halyard persistence: enabled: true # Provide a config map with Hal commands that will be run the core config (storage) # The config map should contain a script in the config.sh key additionalScripts: enabled: false configMapName: my-halyard-config configMapKey: config.sh # If you'd rather do an inline script, set create to true and put the content in the data dict like you would a configmap # The content will be passed through `tpl`, so value interpolation is supported. create: false data: {} additionalSecrets: create: false data: {} ## Uncomment if you want to use a pre-created secret rather than feeding data in via helm. # name: additionalConfigMaps: create: false data: {} ## Uncomment if you want to use a pre-created ConfigMap rather than feeding data in via helm. # name: additionalProfileConfigMaps: data: ## if you're running spinnaker behind a reverse proxy such as a GCE ingress ## you may need the following profile settings for the gate profile. ## see https://github.com/spinnaker/spinnaker/issues/1630 ## otherwise its harmless and will likely become default behavior in the future ## According to the linked github issue. gate-local.yml: server: tomcat: httpsServerPort: X-Forwarded-Port internalProxies: .* protocolHeader: X-Forwarded-Proto remoteIpHeader: X-Forwarded-For spinnaker: extensibility: plugins: deck-proxy: enabled: true plugins: Opsmx.VerificationGatePlugin: enabled: true version: 1.0.1 Opsmx.TestVerificationGatePlugin: enabled: true version: 1.0.1 Opsmx.PolicyGatePlugin: enabled: true version: 1.0.1 Opsmx.CustomStagePlugin: enabled: true version: 1.0.1 Opsmx.VisibilityApprovalPlugin: enabled: true version: 1.0.1 repositories: opsmx-repo: url: https://raw.githubusercontent.com/opsmx/spinnakerPluginRepository/v3.9.0/plugins.json ##If opa is installed and enabled and spinnaker is installed, ##Then you can enable policy in spinnaker through front50-local yaml. ##If you don't want to configure make it as false or ## If you have your different opa server you can mention that url here front50-local.yml: policy: opa: enabled: true url: http://oes-sapor.{{ .Release.Namespace }}:8085 fiat-local.yml: auth: groupMembership: service: ldap ldap: url: ldap://RELEASE_NAME-openldap:389 managerDn: cn=admin,dc=example,dc=org managerPassword: opsmxadmin123 groupSearchBase: ou=groups,dc=example,dc=org groupSearchFilter: member={0} groupRoleAttributes: cn userDnPattern: cn={0},dc=example,dc=org echo-local.yml: rest: enabled: true endpoints: - wrap: false url: http://oes-audit-service:8097/auditservice/v1/echo/events/data - wrap: false url: http://oes-sapor:8085/oes/echo # ----> [Note: This url should be our SAPOR url] microsoftteams: enabled: true ## Define custom settings for Spinnaker services. Read more for details: ## https://www.spinnaker.io/reference/halyard/custom/#custom-service-settings ## You can use it to add annotations for pods, override the image, etc. additionalServiceSettings: ## artifactId to override Spinnaker components images with OpsMx custom images gate.yml: healthEndpoint: /health kubernetes: useExecHealthCheck: false artifactId: quay.io/opsmxpublic/ubi8-oes-spin-gate:1.22.1 deck.yml: artifactId: quay.io/opsmxpublic/ubi8-oes-deck:3.5.1 clouddriver.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-clouddriver:8.0.4-1 clouddriver-caching.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-clouddriver:8.0.4-1 clouddriver-rw.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-clouddriver:8.0.4-1 clouddriver-ro.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-clouddriver:8.0.4-1 clouddriver-ro-deck.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-clouddriver:8.0.4-1 echo.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-echo:2.17.1 echo-scheduler.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-echo:2.17.1 echo-worker.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-echo:2.17.1 fiat.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-fiat:1.16.0 front50.yml: artifactId: quay.io/opsmxpublic/ubi8-oes-front50:0.27.1-opa igor.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-igor:1.16.0 kayenta.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-kayenta:0.21.0 orca.yml: artifactId: quay.io/opsmxpublic/ubi8-oes-orca:2.20.4 rosco.yml: artifactId: quay.io/opsmxpublic/ubi8-spin-rosco:0.25.0 ## Uncomment if you want to add extra commands to the init script ## run by the init container before halyard is started. ## The content will be passed through `tpl`, so value interpolation is supported. # additionalInitScript: |- ## Uncomment if you want to add annotations on halyard and install-using-hal pods annotations: moniker.spinnaker.io/application: spin # iam.amazonaws.com/role: ## Uncomment the following resources definitions to control the cpu and memory # resources allocated for the halyard pod resources: {} # requests: # memory: "1Gi" # cpu: "100m" # limits: # memory: "2Gi" # cpu: "200m" ## Uncomment if you want to set environment variables on the Halyard pod. # env: # - name: JAVA_OPTS # value: -Dhttp.proxyHost=proxy.example.com customCerts: ## Enable to override the default cacerts with your own one enabled: false secretName: custom-cacerts # Define which registries and repositories you want available in your # Spinnaker pipeline definitions # For more info visit: # https://www.spinnaker.io/setup/providers/docker-registry/ # Configure your Docker registries here dockerRegistries: - name: dockerhub address: index.docker.io repositories: - library/alpine - library/ubuntu - library/centos - library/nginx # - name: gcr # address: https://gcr.io # username: _json_key # password: '' # email: 1234@5678.com # If you don't want to put your passwords into a values file # you can use a pre-created secret instead of putting passwords # (specify secret name in below `dockerRegistryAccountSecret`) # per account above with data in the format: # : # dockerRegistryAccountSecret: myregistry-secrets kubeConfig: # Use this when you want to register arbitrary clusters with Spinnaker # Upload your ~/kube/.config to a secret enabled: false secretName: my-kubeconfig secretKey: config # List of contexts from the kubeconfig to make available to Spinnaker contexts: - default deploymentContext: default omittedNameSpaces: - kube-system - kube-public onlySpinnakerManaged: enabled: true # spinnakerFeatureFlags is a list of Spinnaker feature flags to enable # Ref: https://www.spinnaker.io/reference/halyard/commands/#hal-config-features-edit # spinnakerFeatureFlags: # - artifacts # - pipeline-templates spinnakerFeatureFlags: [] # Node labels for pod assignment # Ref: https://kubernetes.io/docs/user-guide/node-selection/ # nodeSelector to provide to each of the Spinnaker components nodeSelector: {} affinity: {} tolerations: [] # Google Cloud Storage gcs: enabled: false project: my-project-name bucket: "" ## if jsonKey is set, will create a secret containing it jsonKey: "" ## override the name of the secret to use for jsonKey, if `jsonKey` ## is empty, it will not create a secret assuming you are creating one ## external to the chart. the key for that secret should be `key.json`. secretName: # AWS Simple Storage Service s3: enabled: false bucket: "" # rootFolder: "front50" # region: "us-east-1" # endpoint: "" # accessKey: "" # secretKey: "" # Azure Storage Account azs: enabled: false # storageAccountName: "" # accessKey: "" # containerName: "spinnaker" rbac: # Specifies whether RBAC resources should be created create: true serviceAccount: # Specifies whether a ServiceAccount should be created create: true # The name of the ServiceAccounts to use. # If left blank it is auto-generated from the fullname of the release halyardName: spinnakerName: securityContext: # Specifies permissions to write for user/group runAsUser: 1000 fsGroup: 1000 ## Option to enable HA in Spinnaker; Enabled by default enableHA: true ## Auth mechanism and credentials to be used by spin-cli for creating the sample pipelines ## Here basic ldap auth is used by default; everything under spinCli will be pasted in ~/.spin/config spinCli: gate: endpoint: http://sapor-gate:8084 # URL that needs to be used to talk to spinnaker gate and create sample pipelines auth: enabled: true basic: username: admin # Use credentials corresponding to saporgate.config.username password: saporadmin # Use credentials corresponding to saporgate.config.password ############################################################################################### ############################################################################################### #### The values below this line are best not changed except in rare situations ############################################################################################### ############################################################################################### ## Name of the secret for pulling images from container registry. ## Use it when the images are stored under private registry. ## Create this secret before installing the chart imagePullSecret: "" ## Installation mode, available modes OES-AP, None (do not install AP (previously called OES) installationMode: OES-AP oesAutoConfiguration: true # Attempt to configure OES with some basic integrations based on best guess secretStore: db # Valid values: db, Vault: Used for storing account and integration secrets. Vault, if used, needs to be installed and configured separately vault: enterpriseEdition: false ## Namespace is mandatory when enterpriseEdition flag is set to true namespace: admin/isd-platform address: https://server.vaultint.opsmx.net # Vault Address URL token: 123132 # Vault Root token # Spinnaker needs it. Please do not change it, even if you do not plan to use RBAC. rbac: create: true ##################################################### # AP integrates with OPA and installs an OPA server # Leave it as true, even if not using OPA opa: enabled: true image: repository: openpolicyagent/opa tag: latest pullPolicy: IfNotPresent annotations: moniker.spinnaker.io/application: isd #################################################### # AP installs a trial LDAP for POCs and testing purposes # OpenLDAP custom configuration; will override default configuration of openldap helm chart openldap: # Password for the admin user; by default it is set to admin adminPassword: opsmxadmin123 configPassword: opsmxconfig123 omitClusterIP: true affinity: {} tolerations: [] podAnnotations: moniker.spinnaker.io/application: spin persistence: enabled: false env: LDAP_REMOVE_CONFIG_AFTER_SETUP: "false" customLdifFiles: 01-memberof.ldif: |- dn: cn=module,cn=config cn: module objectClass: olcModuleList olcModuleLoad: memberof.la olcModulePath: /usr/lib/ldap dn: olcOverlay={0}memberof,olcDatabase={1}hdb,cn=config objectClass: olcConfig objectClass: olcMemberOf objectClass: olcOverlayConfig objectClass: top olcOverlay: memberof olcMemberOfDangling: ignore olcMemberOfRefInt: TRUE olcMemberOfGroupOC: groupOfNames olcMemberOfMemberAD: member olcMemberOfMemberOfAD: memberOf 02-refint1.ldif: |- dn: cn=module{1},cn=config changetype: modify add: olcmoduleload olcmoduleload: refint.la 03-refint2.ldif: |- dn: olcOverlay={1}refint,olcDatabase={1}hdb,cn=config objectClass: olcConfig objectClass: olcOverlayConfig objectClass: olcRefintConfig objectClass: top olcOverlay: {1}refint olcRefintAttribute: memberof member manager owner 04-add_ou.ldif: |- dn: ou=groups,dc=example,dc=org objectClass: organizationalUnit ou: Groups 05-admin.ldif: |- dn: cn=admin,ou=groups,dc=example,dc=org objectClass: groupofnames cn: admin description: read write and execute group member: cn=admin,dc=example,dc=org 06-developer.ldif: |- dn: cn=developers,ou=groups,dc=example,dc=org objectClass: groupofnames cn: developers description: read only users member: cn=admin,dc=example,dc=org member: cn=developer,dc=example,dc=org 07-qa.ldif: |- dn: cn=QA,ou=groups,dc=example,dc=org objectClass: groupofnames cn: QA description: read only users member: cn=admin,dc=example,dc=org member: cn=qa,dc=example,dc=org 08-manager.ldif: |- dn: cn=managers,ou=groups,dc=example,dc=org objectClass: groupofnames cn: managers description: read and execute group member: cn=admin,dc=example,dc=org member: cn=manager,dc=example,dc=org 09-IT-manager.ldif: |- dn: cn=ITManagers,ou=groups,dc=example,dc=org objectClass: groupofnames cn: ITManagers description: read and execute group member: cn=admin,dc=example,dc=org member: cn=ITManager,dc=example,dc=org 10-users.ldif: |- dn: cn=user1,dc=example,dc=org objectClass: simpleSecurityObject objectClass: organizationalRole cn: user1 userpassword: {SSHA}Y9L4AsYL16WLK10qDZ62pTScFnaWb0nz dn: cn=user2,dc=example,dc=org objectClass: simpleSecurityObject objectClass: organizationalRole cn: user2 userpassword: {SSHA}DasTBI0eut1F83Bh1F1HXmDT8juJj3pY dn: cn=user3,dc=example,dc=org objectClass: simpleSecurityObject objectClass: organizationalRole cn: user3 userpassword: {SSHA}Qu1FW7BdLMndwM/Gf+zc3a8VIMAymbuv dn: cn=developers,ou=groups,dc=example,dc=org changetype: modify add: member member: cn=user1,dc=example,dc=org member: cn=user3,dc=example,dc=org dn: cn=QA,ou=groups,dc=example,dc=org changetype: modify add: member member: cn=user2,dc=example,dc=org member: cn=user3,dc=example,dc=org ##################################################### # Centralized Logging Configuration #################################################### # Set to true to install Elastic and Kibana enableCentralLogging: false elasticsearch: replicas: 1 minimumMasterNodes: 1 resources: requests: cpu: "100m" memory: "1Gi" kibana: service: type: LoadBalancer resources: requests: cpu: "100m" memory: "250Mi" lifecycle: postStart: exec: command: - bash - -c - > until curl localhost:5601; do echo "Waiting for Kibana to be available..."; sleep 5; done; until curl elasticsearch-master:9200; do echo "Waiting for Elasticsearch to be available..."; sleep 5; done; sleep 60; curl https://raw.githubusercontent.com/OpsMx/enterprise-spinnaker/master/scripts/kibana/kibana_objects.ndjson > /tmp/kibana_objects.ndjson; curl -X POST "localhost:5601/api/saved_objects/_import?overwrite=true" -H "kbn-xsrf: true" --form file=@/tmp/kibana_objects.ndjson 2>&1 1> /tmp/postStart.out;