apiVersion: v1 kind: Service metadata: name: "etcd" annotations: # Create endpoints also if the related pod isn't ready service.alpha.kubernetes.io/tolerate-unready-endpoints: "true" spec: ports: - port: 2379 name: client - port: 2380 name: peer clusterIP: None selector: component: "etcd" --- apiVersion: apps/v1beta1 kind: StatefulSet metadata: name: "etcd" labels: component: "etcd" spec: serviceName: "etcd" # changing replicas value will require a manual etcdctl member remove/add # command (remove before decreasing and add after increasing) replicas: 3 template: metadata: name: "etcd" labels: component: "etcd" spec: containers: - name: "etcd" image: "quay.io/coreos/etcd:v3.2.3" ports: - containerPort: 2379 name: client - containerPort: 2380 name: peer env: - name: CLUSTER_SIZE value: "3" - name: SET_NAME value: "etcd" volumeMounts: - name: data mountPath: /var/run/etcd command: - "/bin/sh" - "-ecx" - | IP=$(hostname -i) # there's no need to wait since it'll cause problems when # restarting an already initialized cluster if a pod cannot be # scheduled since its related endpoint (and so dns entry) won't be # created During initialization etcd will fail to resolve the name # and retry. # #for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do # while true; do # echo "Waiting for ${SET_NAME}-${i}.${SET_NAME} to come up" # ping -W 1 -c 1 ${SET_NAME}-${i}.${SET_NAME} > /dev/null && break # sleep 1s # done #done PEERS="" for i in $(seq 0 $((${CLUSTER_SIZE} - 1))); do PEERS="${PEERS}${PEERS:+,}${SET_NAME}-${i}=http://${SET_NAME}-${i}.${SET_NAME}:2380" done # start etcd. If cluster is already initialized the `--initial-*` options will be ignored. exec etcd --name ${HOSTNAME} \ --listen-peer-urls http://${IP}:2380 \ --listen-client-urls http://${IP}:2379,http://127.0.0.1:2379 \ --advertise-client-urls http://${HOSTNAME}.${SET_NAME}:2379 \ --initial-advertise-peer-urls http://${HOSTNAME}.${SET_NAME}:2380 \ --initial-cluster-token etcd-cluster-1 \ --initial-cluster ${PEERS} \ --initial-cluster-state new \ --data-dir /var/run/etcd/default.etcd ## We are using dynamic pv provisioning using the "standard" storage class so ## this resource can be directly deployed without changes to minikube (since ## minikube defines this class for its minikube hostpath provisioner). In ## production define your own way to use pv claims. volumeClaimTemplates: - metadata: name: data annotations: volume.alpha.kubernetes.io/storage-class: standard spec: accessModes: - "ReadWriteOnce" resources: requests: storage: 1Gi ---