#!/usr/bin/env bash # justin lim # # version 1.0 ################## # # this script is no longer being maintained. Please use the new script curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-elastick8s.sh -o deploy-elastick8s.sh # ################# # curl -fsSL https://raw.githubusercontent.com/jlim0930/scripts/master/deploy-eck.sh -o deploy-eck.sh # # NOTES # eck 1.2+ # start of elasticsearchRef # ES 6.8+ & 7.1+ # Beats 7.0+ # entsearch 7.7+ # all-in-one operator # eck 1.4+ # ES 7.17.3+ # eck 1.6+ # elastic-agent 7.10+ # elastic map server 7.11+ # eck 1.7+ # crds.yaml & operator.yaml # fleet 7.14 # sidecar container stack monitoring started with ES 7.14 # eck 1.9+ # helm 3.2.0 # eck 2.1+ # kibana configuration becomre longer and more specific for fleet server # eck 2.2 # ES 8.0+ # Starting 7.17 stack container image changed to ubuntu - some fixes are needed due to this # set WORKDIR WORKDIR="${HOME}/eckstack" ############################################################################################################### # colors red=`tput setaf 1` green=`tput setaf 2` blue=`tput setaf 4` reset=`tput sgr0` ############################################################################################################### # help help() { echo "" echo "${green}This script is limited to ECK Operator 1.4.0+ & Stack 7.10.0+." echo "${green} - various commands have additional limitations that will be listed below." echo "" echo "${green}USAGE:${reset} ./`basename $0` command STACKversion ECKversion" echo "" echo "${blue}COMMANDS:${reset}" echo " ${green}operator${reset} - will just stand up the operator only and apply a trial license" echo " ${green}stack|start|build${reset} - will stand up the ECK Operator, elasticsearch, & kibana with CLUSTER name : ${blue}eck-lab${reset}" echo " ${green}dedicated${reset} - will stand up the ECK Operator, elasticsearch, & kibana with CLUSTER name : ${blue}eck-lab${reset} with 3 dedicated masters and 3 dedicated data nodes" echo " ${green}beats${reset} - will stand up the basic stack + filebeat, metricbeat, packetbeat, & heartbeat" echo " ${green}monitor1${reset} - will stand up the basic stack named ${blue}eck-lab${reset} and a monitoring stack named ${blue}eck-lab-monitor${reset}, filebeat, & metricbeat as PODS to report stack monitoring to ${blue}eck-lab-monitor${reset}" echo " ${green}monitor2${reset} - will be the same as ${blue}monitor1${reset} however both filebeat & metricbeat will be a sidecar container inside of elasticsearch & kibana Pods. Limited to ECK ${blue}1.7.0+${reset} & STACK ${blue}7.14.0+${reset}" echo " ${green}fleet${reset} - will stand up the basic stack + FLEET Server & elastic-agent as DaemonSet on each ECK node." echo "" echo " ${green}cleanup${reset} - will delete all the resources including the ECK operator" echo "" echo "${green}EXAMPLE: ${reset}./`basename $0` fleet 8.2.0 2.2.0" echo "" echo "All yaml files will be stored in ${blue}~/eckstack${reset}" echo " ${blue}~/eckstack/notes${reset} will contain all endpoint and password information" echo " ${blue}~/eckstack/ca.crt${reset} will be the CA used to sign the public certificate" echo "" } # end of help ############################################################################################################### # functions # cleanup function cleanup() { # make sure to name all yaml files as .yaml so that it can be picked up during cleanup echo "" echo "${green}********** Cleaning up **********${reset}" echo "" for item in `ls -1t ${WORKDIR}/*.yaml 2>/dev/null` do echo "${green}[DEBUG]${reset} DELETING Resources for: ${blue}${item}${reset}" kubectl delete -f ${item} > /dev/null 2>&1 done rm -rf ${WORKDIR} > /dev/null 2>&1 echo "" echo "${green}[DEBUG]${reset} All cleanedup" echo "" } # end of cleanup function createsummary() { unset PASSWORD while [ "${PASSWORD}" = "" ] do PASSWORD=$(kubectl get secret ${1}-es-elastic-user -o go-template='{{.data.elastic | base64decode}}') echo "${green}[DEBUG]${reset} Grabbing elastic password for ${1}: ${blue}${PASSWORD}${reset}" done echo "${1} elastic password: ${PASSWORD}" >> notes unset ESIP while [ "${ESIP}" = "" ] do ESIP=`kubectl get service | grep ${1}-es-http | awk '{ print $4 }'` echo "${green}[DEBUG]${reset} Grabbing elasticsearch endpoint for ${1}: ${blue}https://${ESIP}:9200${reset}" done echo "${1} elasticsearch endpoint: https://${ESIP}:9200" >> notes unset KIBANAIP while [ "${KIBANAIP}" = "" -o "${KIBANAIP}" = "" ] do KIBANAIP=`kubectl get service | grep ${1}-kb-http | awk '{ print $4 }'` echo "${green}[DEBUG]${reset} Grabbing kibana endpoint for ${1}: ${blue}https://${KIBANAIP}:5601${reset}" sleep 2 done echo "${1} kibana endpoint: https://${KIBANAIP}:5601" >> notes if [ "${1}" = "eck-lab" ]; then kubectl get secrets ${1}-es-http-certs-public -o jsonpath="{.data.ca\.crt}" | base64 -d > ca.crt fi echo "" } summary() { echo "" echo "${green}[SUMMARY]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset}" echo "" kubectl get all echo "" echo "${green}[SUMMARY]${reset} STACK INFO:" while read line do string1=`echo $line | awk -F": " '{ print $1 }'` string2=`echo $line | awk -F": " '{ print $2 }'` echo "${string1}: ${blue}${string2}${reset}" done < ${WORKDIR}/notes # cat ${WORKDIR}/notes #echo "${green}[SUMMARY]${reset} ${1} elastic user password: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elastic password' | awk '{ print $NF }'`${reset}" #echo "${green}[SUMMARY]${reset} ${1} elasticsearch endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB elasticsearch endpoint' | awk '{ print $NF }'`${reset}" #echo "${green}[SUMMARY]${reset} ECK-LAB kibana endpoint: ${blue}`cat ${WORKDIR}/notes | grep 'ECK-LAB kibana endpoint' | awk '{ print $NF }'`${reset}" echo "" echo "${green}[SUMMARY]${reset} ${blue}ca.crt${reset} is located in ${blue}${WORKDIR}/ca.crt${reset}" #echo "${green}[SUMMARY]${reset} EXAMPLE: ${blue}curl --cacert ${WORKDIR}/ca.crt -u \"elastic:${PASSWORD}\" https://${ESIP}:9200${reset}" # curl --cacert ${WORKDIR}/ca.crt -u "elastic:${PASSWORD}" https://${ESIP}:9200 echo "" echo "${green}[NOTE]${reset} If you missed the summary its also in ${blue}${WORKDIR}/notes${reset}" echo "${green}[NOTE]${reset} You can start logging into kibana but please give things few minutes for proper startup and letting components settle down." echo "" } # check jq checkjq() { if ! [ -x "$(command -v jq)" ]; then echo "${red}[DEBUG]${reset} jq is not installed. Please install jq and try again" exit fi } # end of checkjq # check kubectl checkkubectl() { if [ `kubectl version 2>/dev/null | grep -c "Client Version"` -lt 1 ]; then echo "${red}[DEBUG]${reset} kubectl is not installed. Please install kubectl and try again" exit fi if [ `kubectl version 2>/dev/null | grep -c "Server Version"` -lt 1 ]; then echo "${red}[DEBUG]${reset} kubectl is not connecting to any kubernetes environment" echo "${red}[DEBUG]${reset} if you did not setup your k8s environment. Please configure your kubernetes environment and try again" exit fi } # end checkubectl # function used for version checking and comparing checkversion() { echo "$@" | awk -F. '{ printf("%d%03d%03d%03d\n", $1,$2,$3,$4); }' } # end of checkversion function # check directory exist checkdir() { # check to see if the directory exists should not since this is the start if [ -d ${WORKDIR} ]; then echo "${red}[DEBUG]${reset} Looks like ${WORKDIR} already exists." echo "${red}[DEBUG]${reset} Please run ${blue}`basename $0` cleanup${reset} before trying again" echo "" help exit fi # end of if to check if WORKDIR exist # create directorys and files mkdir -p ${WORKDIR} cd ${WORKDIR} mkdir temp echo ${VERSION} > VERSION echo ${ECKVERSION} > ECKVERSION } # checkdir # check health of various things checkhealth() { sleep 3 while true do if [ "`kubectl get ${1} | grep "${2} " | awk '{ print $2 }'`" = "green" ]; then sleep 2 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} is ${green}HEALTHY${reset}" echo "" kubectl get ${1} echo "" break else echo "${red}[DEBUG]${reset} ${1} is starting. Checking again in 20 seconds. If this does not finish in few minutes something is wrong. CTRL-C please" #echo "" #kubectl get ${1} #echo "" #kubectl get pods | grep "${2} " #echo "" sleep 20 fi done } # end checkhealth ############################################################################################################### # operator operator() { echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} OPERATOR **************${reset}" echo "" # all version checks complete & directory structures created starting operator if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then # if version is less than 1.7.0 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: all-in-one.yaml" if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o all-in-one.yaml; then # if curl is successful kubectl apply -f all-in-one.yaml > /dev/null 2>&1 else echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get all-in-one.yaml - check network/version?" echo "" help exit fi else # if eckversion is not less than 1.7.0 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading crds: crds.yaml" if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o crds.yaml; then kubectl create -f crds.yaml > /dev/null 2>&1 else echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get crds.yaml - check network/version?" echo "" help exit fi echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} downloading operator: operator.yaml" if curl -fsSL https://download.elastic.co/downloads/eck/${ECKVERSION}/operator.yaml -o operator.yaml; then kubectl create -f operator.yaml > /dev/null 2>&1 else echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Failed to get operator.yaml - check network/version?" echo "" help exit fi fi while true do if [ "`kubectl -n elastic-system get pod | grep elastic-operator | awk '{ print $3 }'`" = "Running" ]; then sleep 2 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} OPERATOR is ${green}HEALTHY${reset}" echo "" kubectl -n elastic-system get all echo "" break else echo "${red}[DEBUG]${reset} ECK Operator is starting. Checking again in 20 seconds. If the operator does not goto Running status in few minutes something is wrong. CTRL-C please" # kubectl -n elastic-system get pod echo "" sleep 20 fi done echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Creating license.yaml" # apply trial licence cat >>license.yaml< /dev/null 2>&1 # sleep 30 # kubectl -n elastic-system get configmap elastic-licensing -o json | jq -r '.data' } # end of operator ############################################################################################################### # stack stack() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} CLUSTER ${blue}${1}${reset} **************${reset}" echo "" # create elasticsearch.yaml echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" cat >> elasticsearch-${1}.yaml < /dev/null 2>&1 # checkeshealth checkhealth "elasticsearch" "${1}" # create kibana.yaml echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating kibana.yaml" cat >> kibana-${1}.yaml < /dev/null 2>&1 #checkkbhealth checkhealth "kibana" "${1}" createsummary ${1} } # end of stack ############################################################################################################### # dedicated dedicated() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} CLUSTER ${blue}${1}${reset} with DEDICATED masters and data nodes **************${reset}" echo "" # create elasticsearch.yaml echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" cat >> elasticsearch-${1}.yaml < /dev/null 2>&1 # checkeshealth checkhealth "elasticsearch" "${1}" # create kibana.yaml echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating kibana.yaml" cat >> kibana-${1}.yaml < /dev/null 2>&1 #checkkbhealth checkhealth "kibana" "${1}" createsummary ${1} } # end of dedicated ############################################################################################################### # filebeat autodiscover & metricbeat hosts as daemonset onto k8s hosts beats() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} with BEATS **************${reset}" echo "" # Create and apply metricbeat-rbac echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS crds" cat >> beats-crds.yaml< /dev/null 2>&1 # Create and apply metricbeat-rbac echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Creating BEATS" cat >> beats.yaml< /dev/null 2>&1 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} filebeat, metricbeat, packetbeat, & heartbeat deployed" echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Please wait a few minutes for the beats to become healthy. (it will restart 3-4 times before it becomes healthy) & for the data to start showing" #sleep 30 #echo "" #kubectl get daemonset echo "" } ############################################################################################################### # stack monitoring - beats in pods monitor1() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in Pods **************${reset}" echo "" # remove labels from eck-lab-montor pods # is this needed? echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Removing scrape label from monitoring pods" for item in `kubectl get pods --no-headers -o custom-columns=":metadata.name" | grep eck-lab-monitor` do kubectl label pod ${item} scrape- > /dev/null 2>&1 done sleep 10 # Create and apply monitor1.yaml cat >> monitor1.yaml< /dev/null 2>&1 echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} Stack monitoring with BEATS in PODS deployed" #echo "" #kubectl get daemonset echo "" } ############################################################################################################### # stack monitoring - side car monitor2() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${BLUE}${VERSION}${green} Stack Monitoring with BEATS in sidecar containers **************${reset}" echo "" # create elasticsearch-eck-lab.yaml echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} STACK ${blue}${VERSION}${reset} CLUSTER ${blue}${1}${reset} Creating elasticsearch.yaml" cat >> monitor2.yaml < /dev/null 2>&1 #checkkbhealth checkhealth "kibana" "${1}" createsummary "${1}" echo "" # notes # you can create a normal deployment and patch it using kubectl patch kibana eck-lab --type merge -p '{"spec":{"monitoring":{"logs":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]},"metrics":{"elasticsearchRefs":[{"name":"eck-lab-monitor"}]}}}}' to change it to sidecar monitoring # } ############################################################################################################### # fleet server fleet() { echo "" echo "${green} ********** Deploying ECK ${blue}${ECKVERSION}${green} STACK ${blue}${VERSION}${green} Fleet Server & elastic-agent **************${reset}" echo "" # patch kibana echo "${green}[DEBUG]${reset} Patching kibana to set fleet settings" if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.1.0") ]; then kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"is_default_fleet_server":true,"name":"Default Fleet Server on ECK policy","package_policies":[{"name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"is_default":true,"name":"Default Elastic Agent on ECK policy","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"kubernetes","version":"latest"}]}}}' elif [ $(checkversion $ECKVERSION) -ge $(checkversion "2.1.0") ]; then kubectl patch kibana eck-lab --type merge -p '{"spec":{"config":{"xpack.fleet.agentPolicies":[{"id":"eck-fleet-server","is_default_fleet_server":true,"monitoring_enabled":["logs","metrics"],"name":"Fleet Server on ECK policy","namespace":"default","package_policies":[{"id":"fleet_server-1","name":"fleet_server-1","package":{"name":"fleet_server"}}]},{"id":"eck-agent","is_default":true,"monitoring_enabled":["logs","metrics"],"name":"Elastic Agent on ECK policy","namespace":"default","package_policies":[{"name":"system-1","package":{"name":"system"}},{"name":"kubernetes-1","package":{"name":"kubernetes"}}],"unenroll_timeout":900}],"xpack.fleet.agents.elasticsearch.host":"https://eck-lab-es-http.default.svc:9200","xpack.fleet.agents.fleet_server.hosts":["https://fleet-server-agent-http.default.svc:8220"],"xpack.fleet.packages":[{"name":"system","version":"latest"},{"name":"elastic_agent","version":"latest"},{"name":"fleet_server","version":"latest"},{"name":"kubernetes","version":"0.14.0"}]}}}' > /dev/null 2>&1 fi echo "${green}[DEBUG]${reset} Sleeping for 60 seconds to wait for kibana to be updated with the patch" sleep 60 & # no healthchecks on fleet so just going to sleep for 60 while kill -0 $! >/dev/null 2>&1 do echo -n "." sleep 2 done echo "" # create fleet-server.yaml echo "${green}[DEBUG]${reset} Creating fleet.yaml" cat >> fleet.yaml<> fleet.yaml<> fleet.yaml< /dev/null 2>&1 # checkfleethealth checkhealth "agent" "elastic-agent" # get fleet url unset FLEETIP while [ "${FLEETIP}" = "" -o "${FLEETIP}" = "" ] do FLEETIP=`kubectl get service | grep fleet-server-agent-http | awk '{ print $4 }'` echo "${green}[DEBUG]${reset} Grabbing Fleet Server endpoint (external): ${blue}https://${FLEETIP}:8220${reset}" sleep 2 done echo "${1} Fleet Server endpoint: https://${FLEETIP}:8220" >> notes #### # things needed # fleet ip -> FLEETIP # es ip -> ESIP # fingerprint # ## fingerprint FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` # for Fleet Server 8.2+ - Add external output with fingerprint and verification_mode if [ $(checkversion $VERSION) -ge $(checkversion "8.2.0") ]; then echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the external output" sleep 30 & while kill -0 $! >/dev/null 2>&1 do echo -n "." sleep 2 done echo "" # need to set fleet server url generate_post_data() { cat </dev/null 2>&1 sleep 10 # generate fingerprint FINGERPRINT=`openssl x509 -fingerprint -sha256 -noout -in ${WORKDIR}/ca.crt | awk -F"=" {' print $2 '} | sed s/://g` generate_post_data() { cat </dev/null 2>&1 sleep 10 # Lets go ahead and create an External agent policy # get id for the external output EXTID=`curl -s -k -u "elastic:${PASSWORD}" https://${KIBANAIP}:5601/api/fleet/outputs | jq -r '.items[]| select(.name=="external")|.id'` generate_post_data() { cat </dev/null 2>&1 sleep 10 echo "${green}[DEBUG]${reset} Output: external created. You can use this output for elastic-agent from outside of k8s cluster." echo "${green}[DEBUG]${reset} Please create a new agent policy using the external output if you want to use elastic-agent from outside of k8s cluster." echo "${green}[DEBUG]${reset} Please use https://${FLEETIP}:8220 with --insecure to register your elastic-agent if you are coming from outside of k8s cluster." echo "" fi # end if for fleet server 8.2+ external output # for Fleet Server 8.1 - 1 output no changes needed # for Fleet Server 8.0 - 1 output sometimes the output is not set correctly. going to fix if [ $(checkversion $VERSION) -ge $(checkversion "8.0.0") ] && [ $(checkversion $VERSION) -lt $(checkversion "8.2.0") ]; then echo "${green}[DEBUG]${reset} Waiting 30 seconds for fleet server to calm down to set the output" sleep 30 & while kill -0 $! >/dev/null 2>&1 do echo -n "." sleep 2 done echo "" generate_post_data() { cat </dev/null 2>&1 fi # end if for fleet server 8.0-8.1 # for Fleet Server < 8.0 only 1 output can be set - do not need to do anything } # end fleet-server ############################################################################################################### # enterprisesearch ############################################################################################################### # maps server ############################################################################################################### # main script ckversion() { # manually limiting elasticsearch version to 7.10.0 or greater if [ $(checkversion $VERSION) -lt $(checkversion "7.10.0") ]; then echo "${red}[DEBUG]${reset} Script is limited to stack version 7.10.0 and higher" echo "" help exit fi # manually limiting eck version to 1.4 or greater if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.4.0") ]; then echo "${red}[DEBUG]${reset} Script is limited to operator 1.4.0 and higher" echo "" help exit else if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") ]; then if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/all-in-one.yaml -o /dev/null; then echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." else echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." echo "" help exit fi elif [ $(checkversion $ECKVERSION) -ge $(checkversion "1.7.0") ]; then if curl -sL --fail https://download.elastic.co/downloads/eck/${ECKVERSION}/crds.yaml -o /dev/null; then echo "${green}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version validated." else echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} version is invalid." echo "" help exit fi fi fi if [ $(checkversion $ECKVERSION) -lt $(checkversion "2.2.0") -a $(checkversion $VERSION) -ge $(checkversion "8.0.0") ]; then echo "${red}[DEBUG]${reset} ECK ${blue}${ECKVERSION}${reset} Can not run 8.x. Please use operator 2.2.0+" echo "" help exit fi echo "${green}[DEBUG]${reset} This might take a while. In another window you can ${blue}watch -n2 kubectl get all${reset} or ${blue}kubectl get events -w${reset} to watch the stack being stood up" echo "" } # end ckversion # preflight checks before creating directories checkjq checkkubectl case ${1} in operator) ECKVERSION=${2} checkdir operator ;; build|start|stack) VERSION=${2} ECKVERSION=${3} ckversion checkdir operator stack "eck-lab" summary ;; dedicated) VERSION=${2} ECKVERSION=${3} ckversion checkdir operator dedicated "eck-lab" summary ;; beats|beat) VERSION=${2} ECKVERSION=${3} ckversion checkdir operator stack "eck-lab" beats "eck-lab" summary ;; monitor1) VERSION=${2} ECKVERSION=${3} ckversion checkdir operator stack "eck-lab" stack "eck-lab-monitor" monitor1 "eck-lab" summary ;; monitor2) VERSION=${2} ECKVERSION=${3} ckversion if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then echo "${red}[DEBUG]${reset} Sidecar stack monitoring started with ECK 1.7.0 & STACK 7.14.0. Please run cleanup and re-run wiht ECK operator 1.7.0+/Stack 7.14.0+" echo "" help exit else checkdir operator stack "eck-lab-monitor" monitor2 "eck-lab" summary fi ;; # snapshot) # snapshot ${2} ${3} # ;; fleet) VERSION=${2} ECKVERSION=${3} ckversion if [ $(checkversion $ECKVERSION) -lt $(checkversion "1.7.0") -o $(checkversion $VERSION) -lt $(checkversion "7.14.0") ]; then echo "${red}[DEBUG]${reset} Fleet server started with ECK 1.7.0 and STACK 7.14.0. Please run cleanup and re-run with ECK operator 1.7.0+/Stack 7.14.0+" echo "" help exit else checkdir operator stack "eck-lab" fleet "eck-lab" summary fi ;; cleanup|clean|teardown|stop) cleanup exit ;; info|summary|detail) summary ;; *) help exit ;; esac