# Default values for siterm-frontend. # This is a YAML-formatted file. # Declare variables to be passed into your templates. # Deployment Type: # Available Options are: # Deployment - Deploy on one specific node. Use kind Deployment # StatefulSet - Use StatefulSet (Most prefered if PVC ReadWriteOnce) deploymentType: StatefulSet # Namespace to deploy into. namespace: sense # Image to run. Helm charts run same image as helm version by default. # If you helm chart installed is 0.1.46 - it will use latest-1.4.6 image # Please consult SENSE team if any changes needed, but usually no. # During upgrade process - new helm chart will take new image of specified version of helm. # latest - Latest stable version (might be not from same helm release and missing Helm features) # dev - Development release (use with caution) # pullPolicy: Always - Always pull image from repository (that is default) Overwrite if needed. #image: # pullPolicy: Always # image: latest-20240614 # Resources for Frontend. This should not require any changes for a single network device control; # If you have more devices - increase resources. e.g. for 4 network devices - 4 CPU and 8Gi memory # Defaults below (no need to uncomment and those will be used by helm chart) # Change it only if needed. #cpuLimit: 4 #memoryLimit: 8Gi #cpuRequest: 2 #memoryRequest: 4Gi # Priority Class Name for your deployment. Uncomment and set if you have priority class defined in your cluster. # See https://kubernetes.io/docs/concepts/scheduling-eviction/pod-priority-preemption/ for more details #priorityClassName: "somename" # Persistent volume claim for Frontend. # This should not require any changes if you run ceph and no others are supported yet. # If you have another storage class, change it (no guarantees to work and might need manual configuration) # If you set it to false - there is no guarantees for persistent storage (and data might be lost at pod restart) storage: enabled: true storageClassName: rook-ceph-block capacity: 10Gi # volname: Optional, better not set to avoid conflict. By default it uses pvc-<.Chart.Name>-<.Values.sitename>-<.Values.md5>, which is unique per deployment # Persistent volume claim for Frontend logs. # This should not require any changes if you run ceph and no others are supported yet. # If you have another storage class, change it (no guarantees to work and might need manual configuration) # If you set it to false - there is no guarantees for persistent storage (and data might be lost at pod restart) logstorage: enabled: true storageClassName: rook-ceph-block capacity: 5Gi # volname: Optional, better not set to avoid conflict. By default it uses pvclog-<.Chart.Name>-<.Values.sitename>-<.Values.md5>, which is unique per deployment # tolerations for your deployment. Include as many as you need and it will # be added to your deployment file. # tolerations: # - key: "key" # operator: "Equal|Exists" # value: "value" # effect: "NoSchedule|PreferNoSchedule|NoExecute(1.6 only)" # Node Selector for your deployment. Required to set if deploymentType is Deployment: nodeSelector: {} #nodeSelector: # kubernetes.io/hostname: node-2-7.sdsc.optiputer.net # In case of DaemonSet - it will be ignored. # Affinity for your deployment. Include as many as you need and it will # be added to your deployment file. affinity: {} #affinity: # nodeAffinity: # requiredDuringSchedulingIgnoredDuringExecution: # nodeSelectorTerms: # - matchExpressions: # - key: kubernetes.io/nodepool # operator: In # values: # - nodepool1 # Liveness and readiness probes for deployment. # Liveness probe for your deployment. It checks if agent is running and healthy. # By default - this is enabled and will be added to deployment. liveness: enabled: true initialDelaySeconds: 30 periodSeconds: 60 failureThreshold: 5 successThreshold: 1 timeoutSeconds: 30 # Readiness probe for your deployment. It checks if agent is ready to accept connections. # By default - this is enabled and will be added to deployment. readiness: enabled: true initialDelaySeconds: 30 periodSeconds: 60 failureThreshold: 5 successThreshold: 1 timeoutSeconds: 30 # Ingess for Frontend. Ingress can also issue certificate, e.g. from letsencrypt. # If you prefer to use cert-manager, set certmanager to true and configure certmanager_params. # If you prefer to use files, set hostcert and hostkey to true and point to pem files. ingress: enabled: true host: # annotations: # kubernetes.io/ingress.class: haproxy # kubernetes.io/ssl-passthrough: "true" # cert-manager.io/cluster-issuer: letsencrypt-prod # =============================================================== # Configuration parameters for Frontend: # These must be defined and configured here: https://github.com/sdn-sense/rm-configs sitename: gitrepo: "sdn-sense/rm-configs" branch: master md5: # Sadly, helm does not allow to include files outside of chart # So local files are not possible to include. Modify the following # parameters to correct values. # Spacing is important, so make sure to keep it at 2 spaces, which points to line beginning inside file (as this is a YAML file) # In case it is RAW (No network configuration) - set ansible_conf to inventory: {} # For the rest - see here for format: https://sdn-sense.github.io/NetControlAnsible.html and supported devices; ansible_conf: | # See https://sdn-sense.github.io/NetControlAnsible.html for format inventory: dellos9_s0: network_os: sense.dellos9.dellos9 host: 1.2.3.4 user: admin123 pass: admin123 become: 0 snmp_params: session_vars: community: comm_test hostname: 1.2.3.4 version: 2 # Environment variables for Frontend (Mainly modify only MariaDB_DB_PASSWORD) # Spacing is important, so make sure to keep it at 2 spaces, which points to line beginning inside file (as this is a YAML file) environment_file: | # Environment variables for MariaDB MARIA_DB_PASSWORD= MARIA_DB_USER=root MARIA_DB_HOST=localhost MARIA_DB_PORT=3306 MARIA_DB_DATABASE=sitefe # SSH Keys for communications with Network devices # In case ansible is configured to use ssh key, add path to private key here. #sshkeys: # frrsshkey: files/sense-cern-fabric.nrp-nautilus.io.yaml/frrsshkey # SSL Certificate parameters: # It does support getting certificate from cert-manager or # from files directory inside this chart. # Below are 3 options supported by SiteRM Helm Charts. # Cert-manager depends on Kubernetes cluster and supported options. # Please check with your Kubernetes cluster administrator, which option is supported (ClusterIssuer, Issuer, Or none - in this case use Option 3) # =================================================================================== # Option 1: CERT MANAGER + ClusterIssuer # ClusterIssuer is a cluster-scoped resource, usable across all namespaces. #certmanager: true #certmanager_params: # commonName: "hostname-replace-me" # dnsNames: # - "hostname-replace-me" # issuerRef: # group: cert-manager.io # kind: ClusterIssuer # name: incommon-v2-prod # REPLACE BASED on your ClusterIssuer Name. # privateKey: # algorithm: RSA # encoding: PKCS1 # size: 4096 # =================================================================================== # Option 2: CERT MANAGER + Issuer # An Issuer is a namespaced resource, usable only within the same namespace it's defined in. # Set the correct location to hostcert and hostkey inside files directory # In case of cert manager - set certmanager to true # and all certmanager related parameters #certmanager: true #certmanager_params: # commonName: "hostname-replace-me-if-deployment" # dnsNames: # - "hostname-replace-me-if-deployment" # duration: 720h0m0s # issuerRef: # group: cert-manager.io # kind: Issuer # name: sense-issuer-agent # Name of issuer must match issuer_params:name # renewBefore: 120h0m0s # privateKey: # algorithm: RSA # encoding: PKCS1 # size: 4096 # Name of issuer must be same as used in certmanager_params:issuerRef:name # Email address for registering to server (e.g. LetsEncrypt requires that) #issuer: true #issuer_params: # name: sense-issuer-agent # Name of issuer must match certmanager_params:issuerRef:name # email: "myemailaddress@somedomain.com" # server: "https://acme-v02.api.letsencrypt.org/directory" # =================================================================================== # Option 3: Manual Certificates # If you plan to define your own hostcert and hostkey, set certmanager: false # And in this case usehostcert: true # Also update hostcert and hostkey values. # IMPORTANT!!!! # Spacing is important, make sure to keep it at 2 spaces, which points to line beginning inside file (as this is a YAML file and need structure) #certmanager: false #usehostcert: true #hostcert: | # -----BEGIN CERTIFICATE----- # MIIITTCCBrWgAwIBAgIRAIQCECprMd/bWm5I5oKFx+owDQYJKoZIhvcNAQEMBQAw # .... # -----END CERTIFICATE----- #hostkey: | # -----BEGIN RSA PRIVATE KEY----- # MIIJKQIBAAKCAgEApJNvz1WUk/iW1VBkX3UsCWdoxRpH7nYvMCrqyRbiKUI54CHu # ... # -----END RSA PRIVATE KEY-----