--- apiVersion: v1 kind: ConfigMap metadata: name: cilium-config namespace: kube-system data: # This etcd-config contains the etcd endpoints of your cluster. If you use # TLS please make sure you follow the tutorial in https://cilium.link/etcd-config etcd-config: |- --- endpoints: - https://cilium-etcd-client.kube-system.svc:2379 # # In case you want to use TLS in etcd, uncomment the 'ca-file' line # and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config ca-file: '/var/lib/etcd-secrets/etcd-client-ca.crt' # # In case you want client to server authentication, uncomment the following # lines and create a kubernetes secret by following the tutorial in # https://cilium.link/etcd-config key-file: '/var/lib/etcd-secrets/etcd-client.key' cert-file: '/var/lib/etcd-secrets/etcd-client.crt' # If you want to run cilium in debug mode change this value to true debug: "false" # If you want metrics enabled in all of your Cilium agents, set the port for # which the Cilium agents will have their metrics exposed. # This option deprecates the "prometheus-serve-addr" in the # "cilium-metrics-config" ConfigMap # NOTE that this will open the port on ALL nodes where Cilium pods are # scheduled. # prometheus-serve-addr: ":9090" # Enable IPv4 addressing. If enabled, all endpoints are allocated an IPv4 # address. enable-ipv4: "true" # Enable IPv6 addressing. If enabled, all endpoints are allocated an IPv6 # address. enable-ipv6: "false" # If a serious issue occurs during Cilium startup, this # invasive option may be set to true to remove all persistent # state. Endpoints will not be restored using knowledge from a # prior Cilium run, so they may receive new IP addresses upon # restart. This also triggers clean-cilium-bpf-state. clean-cilium-state: "false" # If you want to clean cilium BPF state, set this to true; # Removes all BPF maps from the filesystem. Upon restart, # endpoints are restored with the same IP addresses, however # any ongoing connections may be disrupted briefly. # Loadbalancing decisions will be reset, so any ongoing # connections via a service may be loadbalanced to a different # backend after restart. clean-cilium-bpf-state: "false" # If you want cilium monitor to aggregate tracing for packets, set this level # to "low", "medium", or "maximum". The higher the level, the less packets # that will be seen in monitor output. monitor-aggregation: "none" # ct-global-max-entries-* specifies the maximum number of connections # supported across all endpoints, split by protocol: tcp or other. One pair # of maps uses these values for IPv4 connections, and another pair of maps # use these values for IPv6 connections. # # If these values are modified, then during the next Cilium startup the # tracking of ongoing connections may be disrupted. This may lead to brief # policy drops or a change in loadbalancing decisions for a connection. # # For users upgrading from Cilium 1.2 or earlier, to minimize disruption # during the upgrade process, comment out these options. bpf-ct-global-tcp-max: "524288" bpf-ct-global-any-max: "262144" # Pre-allocation of map entries allows per-packet latency to be reduced, at # the expense of up-front memory allocation for the entries in the maps. The # default value below will minimize memory usage in the default installation; # users who are sensitive to latency may consider setting this to "true". # # This option was introduced in Cilium 1.4. Cilium 1.3 and earlier ignore # this option and behave as though it is set to "true". # # If this value is modified, then during the next Cilium startup the restore # of existing endpoints and tracking of ongoing connections may be disrupted. # This may lead to policy drops or a change in loadbalancing decisions for a # connection for some time. Endpoints may need to be recreated to restore # connectivity. # # If this option is set to "false" during an upgrade from 1.3 or earlier to # 1.4 or later, then it may cause one-time disruptions during the upgrade. preallocate-bpf-maps: "false" # Regular expression matching compatible Istio sidecar istio-proxy # container image names sidecar-istio-proxy-image: "cilium/istio_proxy" # Encapsulation mode for communication between nodes # Possible values: # - disabled # - vxlan (default) # - geneve tunnel: "vxlan" # Name of the cluster. Only relevant when building a mesh of clusters. cluster-name: default # Unique ID of the cluster. Must be unique across all conneted clusters and # in the range of 1 and 255. Only relevant when building a mesh of clusters. #cluster-id: 1 # Interface to be used when running Cilium on top of a CNI plugin. # For flannel, use "cni0" flannel-master-device: "" # When running Cilium with policy enforcement enabled on top of a CNI plugin # the BPF programs will be installed on the network interface specified in # 'flannel-master-device' and on all network interfaces belonging to # a container. When the Cilium DaemonSet is removed, the BPF programs will # be kept in the interfaces unless this option is set to "true". flannel-uninstall-on-exit: "false" # Installs a BPF program to allow for policy enforcement in already running # containers managed by Flannel. # NOTE: This requires Cilium DaemonSet to be running in the hostPID. # To run in this mode in Kubernetes change the value of the hostPID from # false to true. Can be found under the path `spec.spec.hostPID` flannel-manage-existing-containers: "false" # DNS Polling periodically issues a DNS lookup for each `matchName` from # cilium-agent. The result is used to regenerate endpoint policy. # DNS lookups are repeated with an interval of 5 seconds, and are made for # A(IPv4) and AAAA(IPv6) addresses. Should a lookup fail, the most recent IP # data is used instead. An IP change will trigger a regeneration of the Cilium # policy for each endpoint and increment the per cilium-agent policy # repository revision. # # This option is disabled by default starting from version 1.4.x in favor # of a more powerful DNS proxy-based implementation, see [0] for details. # Enable this option if you want to use FQDN policies but do not want to use # the DNS proxy. # # To ease upgrade, users may opt to set this option to "true". # Otherwise please refer to the Upgrade Guide [1] which explains how to # prepare policy rules for upgrade. # # [0] http://docs.cilium.io/en/stable/policy/language/#dns-based # [1] http://docs.cilium.io/en/stable/install/upgrade/#changes-that-may-require-action tofqdns-enable-poller: "false" # wait-bpf-mount makes init container wait until bpf filesystem is mounted wait-bpf-mount: "false" # Enable legacy services (prior v1.5) to prevent from terminating existing # connections with services when upgrading Cilium from < v1.5 to v1.5. enable-legacy-services: "false" --- apiVersion: apps/v1 kind: DaemonSet metadata: labels: k8s-app: cilium kubernetes.io/cluster-service: "true" name: cilium namespace: kube-system spec: selector: matchLabels: k8s-app: cilium kubernetes.io/cluster-service: "true" template: metadata: annotations: prometheus.io/port: "9090" prometheus.io/scrape: "true" # This annotation plus the CriticalAddonsOnly toleration makes # cilium to be a critical pod in the cluster, which ensures cilium # gets priority scheduling. # https://kubernetes.io/docs/tasks/administer-cluster/guaranteed-scheduling-critical-addon-pods/ scheduler.alpha.kubernetes.io/critical-pod: "" scheduler.alpha.kubernetes.io/tolerations: '[{"key":"dedicated","operator":"Equal","value":"master","effect":"NoSchedule"}]' labels: k8s-app: cilium kubernetes.io/cluster-service: "true" spec: containers: - args: - --kvstore=etcd - --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config - --container-runtime=crio - --config-dir=/tmp/cilium/config-map command: - cilium-agent env: - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_K8S_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: CILIUM_FLANNEL_MASTER_DEVICE valueFrom: configMapKeyRef: key: flannel-master-device name: cilium-config optional: true - name: CILIUM_FLANNEL_UNINSTALL_ON_EXIT valueFrom: configMapKeyRef: key: flannel-uninstall-on-exit name: cilium-config optional: true # To be removed in Cilium 1.6, use prometheus-serve-addr in the # cilium-config ConfigMap - name: CILIUM_PROMETHEUS_SERVE_ADDR valueFrom: configMapKeyRef: key: prometheus-serve-addr name: cilium-metrics-config optional: true - name: CILIUM_CLUSTERMESH_CONFIG value: /var/lib/cilium/clustermesh/ image: docker.io/cilium/cilium:v1.5.0 imagePullPolicy: IfNotPresent lifecycle: postStart: exec: command: - /cni-install.sh preStop: exec: command: - /cni-uninstall.sh livenessProbe: exec: command: - cilium - status - --brief failureThreshold: 10 # The initial delay for the liveness probe is intentionally large to # avoid an endless kill & restart cycle if in the event that the initial # bootstrapping takes longer than expected. initialDelaySeconds: 120 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 name: cilium-agent ports: - containerPort: 9090 hostPort: 9090 name: prometheus protocol: TCP readinessProbe: exec: command: - cilium - status - --brief failureThreshold: 3 initialDelaySeconds: 5 periodSeconds: 30 successThreshold: 1 timeoutSeconds: 5 securityContext: capabilities: add: - NET_ADMIN - SYS_MODULE privileged: true volumeMounts: - mountPath: /var/run/cilium name: cilium-run - mountPath: /host/opt/cni/bin name: cni-path - mountPath: /host/etc/cni/net.d name: etc-cni-netd - mountPath: /var/run/crio/crio.sock name: crio-socket readOnly: true - mountPath: /var/lib/etcd-config name: etcd-config-path readOnly: true - mountPath: /var/lib/etcd-secrets name: etcd-secrets readOnly: true - mountPath: /var/lib/cilium/clustermesh name: clustermesh-secrets readOnly: true - mountPath: /tmp/cilium/config-map name: cilium-config-path readOnly: true # Needed to be able to load kernel modules - mountPath: /lib/modules name: lib-modules readOnly: true dnsPolicy: ClusterFirstWithHostNet hostNetwork: true hostPID: false initContainers: - command: - /init-container.sh env: - name: CLEAN_CILIUM_STATE valueFrom: configMapKeyRef: key: clean-cilium-state name: cilium-config optional: true - name: CLEAN_CILIUM_BPF_STATE valueFrom: configMapKeyRef: key: clean-cilium-bpf-state name: cilium-config optional: true image: docker.io/cilium/cilium-init:2019-04-05 imagePullPolicy: IfNotPresent name: clean-cilium-state securityContext: capabilities: add: - NET_ADMIN privileged: true volumeMounts: - mountPath: /var/run/cilium name: cilium-run priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium serviceAccountName: cilium terminationGracePeriodSeconds: 1 tolerations: - operator: Exists volumes: # To keep state between restarts / upgrades - hostPath: path: /var/run/cilium type: DirectoryOrCreate name: cilium-run # To read labels from CRI-O containers running in the host - hostPath: path: /var/run/crio/crio.sock type: Socket name: crio-socket # To install cilium cni plugin in the host - hostPath: path: /opt/cni/bin type: DirectoryOrCreate name: cni-path # To install cilium cni configuration in the host - hostPath: path: /etc/cni/net.d type: DirectoryOrCreate name: etc-cni-netd # To be able to load kernel modules - hostPath: path: /lib/modules name: lib-modules # To read the etcd config stored in config maps - configMap: defaultMode: 420 items: - key: etcd-config path: etcd.config name: cilium-config name: etcd-config-path # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets secret: defaultMode: 420 optional: true secretName: cilium-etcd-secrets # To read the clustermesh configuration - name: clustermesh-secrets secret: defaultMode: 420 optional: true secretName: cilium-clustermesh # To read the configuration from the config map - configMap: name: cilium-config name: cilium-config-path updateStrategy: rollingUpdate: # Specifies the maximum number of Pods that can be unavailable during the update process. maxUnavailable: 2 type: RollingUpdate --- apiVersion: apps/v1 kind: Deployment metadata: labels: io.cilium/app: operator name: cilium-operator name: cilium-operator namespace: kube-system spec: replicas: 1 selector: matchLabels: io.cilium/app: operator name: cilium-operator strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: labels: io.cilium/app: operator name: cilium-operator spec: containers: - args: - --debug=$(CILIUM_DEBUG) - --kvstore=etcd - --kvstore-opt=etcd.config=/var/lib/etcd-config/etcd.config command: - cilium-operator env: - name: POD_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: K8S_NODE_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: spec.nodeName - name: CILIUM_DEBUG valueFrom: configMapKeyRef: key: debug name: cilium-config optional: true - name: CILIUM_CLUSTER_NAME valueFrom: configMapKeyRef: key: cluster-name name: cilium-config optional: true - name: CILIUM_CLUSTER_ID valueFrom: configMapKeyRef: key: cluster-id name: cilium-config optional: true - name: CILIUM_DISABLE_ENDPOINT_CRD valueFrom: configMapKeyRef: key: disable-endpoint-crd name: cilium-config optional: true - name: AWS_ACCESS_KEY_ID valueFrom: secretKeyRef: key: AWS_ACCESS_KEY_ID name: cilium-aws optional: true - name: AWS_SECRET_ACCESS_KEY valueFrom: secretKeyRef: key: AWS_SECRET_ACCESS_KEY name: cilium-aws optional: true - name: AWS_DEFAULT_REGION valueFrom: secretKeyRef: key: AWS_DEFAULT_REGION name: cilium-aws optional: true image: docker.io/cilium/operator:v1.5.0 imagePullPolicy: IfNotPresent name: cilium-operator livenessProbe: httpGet: path: /healthz port: 9234 scheme: HTTP initialDelaySeconds: 60 periodSeconds: 10 timeoutSeconds: 3 volumeMounts: - mountPath: /var/lib/etcd-config name: etcd-config-path readOnly: true - mountPath: /var/lib/etcd-secrets name: etcd-secrets readOnly: true dnsPolicy: ClusterFirst priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium-operator serviceAccountName: cilium-operator volumes: # To read the etcd config stored in config maps - configMap: defaultMode: 420 items: - key: etcd-config path: etcd.config name: cilium-config name: etcd-config-path # To read the k8s etcd secrets in case the user might want to use TLS - name: etcd-secrets secret: defaultMode: 420 optional: true secretName: cilium-etcd-secrets --- apiVersion: v1 kind: ServiceAccount metadata: name: cilium-operator namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-operator rules: - apiGroups: - "" resources: # to get k8s version and status - componentstatuses verbs: - get - apiGroups: - "" resources: # to automatically delete [core|kube]dns pods so that are starting to being # managed by Cilium - pods verbs: - get - list - watch - delete - apiGroups: - "" resources: # to automatically read from k8s and import the node's pod CIDR to cilium's # etcd so all nodes know how to reach another pod running in in a different # node. - nodes # to perform the translation of a CNP that contains `ToGroup` to its endpoints - services - endpoints verbs: - get - list - watch - apiGroups: - cilium.io resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - ciliumendpoints - ciliumendpoints/status verbs: - '*' --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium-operator subjects: - kind: ServiceAccount name: cilium-operator namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium-etcd-operator rules: - apiGroups: - etcd.database.coreos.com resources: - etcdclusters verbs: - get - delete - create - update - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - delete - get - create - apiGroups: - "" resources: - deployments verbs: - delete - create - get - update - apiGroups: - "" resources: - pods verbs: - list - get - delete - apiGroups: - apps resources: - deployments verbs: - delete - create - get - update - apiGroups: - "" resources: - componentstatuses verbs: - get - apiGroups: - extensions resources: - deployments verbs: - delete - create - get - update - apiGroups: - "" resources: - secrets verbs: - get - create - delete --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium-etcd-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium-etcd-operator subjects: - kind: ServiceAccount name: cilium-etcd-operator namespace: kube-system --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: etcd-operator rules: - apiGroups: - etcd.database.coreos.com resources: - etcdclusters - etcdbackups - etcdrestores verbs: - '*' - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - '*' - apiGroups: - "" resources: - pods - services - endpoints - persistentvolumeclaims - events - deployments verbs: - '*' - apiGroups: - apps resources: - deployments verbs: - '*' - apiGroups: - extensions resources: - deployments verbs: - create - get - list - patch - update - apiGroups: - "" resources: - secrets verbs: - get --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: etcd-operator roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: etcd-operator subjects: - kind: ServiceAccount name: cilium-etcd-sa namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: cilium-etcd-operator namespace: kube-system --- apiVersion: v1 kind: ServiceAccount metadata: name: cilium-etcd-sa namespace: kube-system --- apiVersion: apps/v1 kind: Deployment metadata: labels: io.cilium/app: etcd-operator name: cilium-etcd-operator name: cilium-etcd-operator namespace: kube-system spec: replicas: 1 selector: matchLabels: io.cilium/app: etcd-operator name: cilium-etcd-operator strategy: rollingUpdate: maxSurge: 1 maxUnavailable: 1 type: RollingUpdate template: metadata: labels: io.cilium/app: etcd-operator name: cilium-etcd-operator spec: containers: - args: #- --etcd-node-selector=disktype=ssd,cputype=high command: - /usr/bin/cilium-etcd-operator env: - name: CILIUM_ETCD_OPERATOR_CLUSTER_DOMAIN value: "cluster.local" - name: CILIUM_ETCD_OPERATOR_ETCD_CLUSTER_SIZE value: "3" - name: CILIUM_ETCD_OPERATOR_NAMESPACE valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.namespace - name: CILIUM_ETCD_OPERATOR_POD_NAME valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.name - name: CILIUM_ETCD_OPERATOR_POD_UID valueFrom: fieldRef: apiVersion: v1 fieldPath: metadata.uid - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_MODE value: "revision" - name: CILIUM_ETCD_META_ETCD_AUTO_COMPACTION_RETENTION value: "25000" image: docker.io/cilium/cilium-etcd-operator:v2.0.6 imagePullPolicy: IfNotPresent name: cilium-etcd-operator dnsPolicy: ClusterFirst hostNetwork: true priorityClassName: system-node-critical restartPolicy: Always serviceAccount: cilium-etcd-operator serviceAccountName: cilium-etcd-operator tolerations: - operator: Exists --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRoleBinding metadata: name: cilium roleRef: apiGroup: rbac.authorization.k8s.io kind: ClusterRole name: cilium subjects: - kind: ServiceAccount name: cilium namespace: kube-system - apiGroup: rbac.authorization.k8s.io kind: Group name: system:nodes --- apiVersion: rbac.authorization.k8s.io/v1 kind: ClusterRole metadata: name: cilium rules: - apiGroups: - networking.k8s.io resources: - networkpolicies verbs: - get - list - watch - apiGroups: - "" resources: - namespaces - services - nodes - endpoints - componentstatuses verbs: - get - list - watch - apiGroups: - "" resources: - pods - nodes verbs: - get - list - watch - update - apiGroups: - extensions resources: - ingresses verbs: - create - get - list - watch - apiGroups: - apiextensions.k8s.io resources: - customresourcedefinitions verbs: - create - get - list - watch - update - apiGroups: - cilium.io resources: - ciliumnetworkpolicies - ciliumnetworkpolicies/status - ciliumendpoints - ciliumendpoints/status verbs: - '*' --- apiVersion: v1 kind: ServiceAccount metadata: name: cilium namespace: kube-system