#!/bin/bash # Copyright 2016 - 2017 Bitnami # # Licensed under the Apache License, Version 2.0 (the "License"); # you may not use this file except in compliance with the License. # You may obtain a copy of the License at # # http://www.apache.org/licenses/LICENSE-2.0 # # Unless required by applicable law or agreed to in writing, software # distributed under the License is distributed on an "AS IS" BASIS, # WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied. # See the License for the specific language governing permissions and # limitations under the License. SUDO=$(which sudo) DOCKER_SERVER_VERSION=$(docker version --format '{{.Server.Version}}') DOCKER_CLIENT_VERSION=$(docker version --format '{{.Client.Version}}') DOCKERFILE=${DOCKERFILE:-Dockerfile} # Not to be confused with VARIANTS_LIST, which are different images of the same application but different modules SUPPORTED_VARIANTS="dev prod onbuild buildpack php" DOCKER_PROJECT=${DOCKER_PROJECT:-} QUAY_PROJECT=${QUAY_PROJECT:-} GCLOUD_PROJECT=${GCLOUD_PROJECT:-} AZURE_PROJECT=${AZURE_PROJECT:-} CHART_OUTPUT_DIR=${CHART_OUTPUT_DIR:-/charts} # Only set CHART_BRANCH to CIRCLE_BRANCH if it did not have a value previously if [[ -z "${CHART_BRANCH:-}" ]]; then CHART_BRANCH=${CIRCLE_BRANCH:-} fi if [[ ${CHART_BRANCH} == "master" ]]; then CHART_BRANCH="bitnami" fi GIT_AUTHOR_NAME=${GIT_AUTHOR_NAME:-Bitnami Containers} GIT_AUTHOR_EMAIL=${GIT_AUTHOR_EMAIL:-containers@bitnami.com} GITHUB_TOKEN=${GITHUB_TOKEN:-$GITHUB_PASSWORD} # required by hub export GITHUB_TOKEN IMAGE_NAME=${IMAGE_NAME:-} IMAGE_TAG=${CIRCLE_TAG:-} ROLLING_IMAGE_TAG="${IMAGE_TAG%-r*}" CHART_IMAGE_REPOSITORY=${CHART_IMAGE_REPOSITORY:-$DOCKER_PROJECT/$IMAGE_NAME} # eg: bitnami/wordpress LATEST_TAG_SOURCE=${LATEST_TAG_SOURCE:-LATEST_STABLE} SKIP_CHART_PULL_REQUEST=${SKIP_CHART_PULL_REQUEST:-0} SKIP_CHART_APP_VERSION_UPDATE=${SKIP_CHART_APP_VERSION_UPDATE:-0} BRANCH_AMEND_COMMITS=0 GOOGLE_CLOUD_SDK_VERSION=162.0.1 HUB_VERSION=2.2.9 HELM_VERSION=2.6.2 IBM_CLI_VERSION=0.6.5 # in release mode circle ci does not define CIRCLE_BRANCH, we assume "master" for caching purposes CIRCLE_BRANCH=${CIRCLE_BRANCH:-master} log() { echo -e "$(date "+%T.%2N") ${@}" } info() { log "INFO ==> ${@}" } warn() { log "WARN ==> ${@}" } error() { 2>&1 log "ERROR ==> ${@}" } # 0 if equal # 1 if $1 < $2 # -1 if $1 > $2 vercmp() { if [[ $1 == $2 ]]; then echo "0" else if [[ $( ( echo "$1"; echo "$2" ) | sort -rV | head -n1 ) == $1 ]]; then echo "-1" else echo "1" fi fi } DEPRECATED_DEFAULT_DISTRO="debian-8" DEFAULT_DISTRO="debian-9" is_default_image() { local distro=$1 local variant=$2 if ! is_default_distro ${distro} || [ -n "${variant}" ]; then return 1 fi return 0 } get_default_distro() { if [[ $DISTRIBUTIONS_LIST == *${DEFAULT_DISTRO}* ]]; then echo "${DEFAULT_DISTRO}" else echo "${DEPRECATED_DEFAULT_DISTRO}" fi } is_default_distro() { [[ $1 == "$(get_default_distro)" ]] } get_distro() { local -a distros IFS=',' read -ra distros <<< "${DISTRIBUTIONS_LIST}" for distro in "${distros[@]}"; do if [[ $1 == *${distro}-r* ]]; then echo "${distro}" return 0 fi done error "Unable to identify target distribution from the current environment:" error " Distributions list: ${DISTRIBUTIONS_LIST}" error " Default distro: $(get_default_distro)" exit 1 } get_variant() { local image_tag=$1 local distro=$2 local -a variants IFS=',' read -ra variants <<< "${VARIANTS_LIST:-}" for variant in "${variants[@]}"; do if is_default_distro ${distro} ; then if [[ ${image_tag} == *${variant}-r* ]]; then echo "${variant}" return 0 fi else if [[ ${image_tag} == *${variant}-${distro}-r* ]]; then echo "${variant}" return 0 fi fi done echo "" return 0 } # this filters the branches from the release series, which may include combinations of ${branch}-${variant} get_target_branch() { local image_tag=$1 shift local -a all_release_series=("${@}") local -a result for rs in "${all_release_series[@]}"; do # Release series with dashes mean it's a variant if [[ $rs != *-* && $image_tag == $rs* ]]; then result+=($rs) fi done if [[ ${#result[@]} > 1 ]]; then error "Found several possible release series that matches ${image_tag}: ${result[*]}." error "Please review the definition of possible release series" exit 1 elif [[ ${#result[@]} == 0 ]]; then error "Cannot find branch for ${image_tag} in release series: ${all_release_series[*]}" exit 1 fi echo "${result[0]}" } is_base_image() { [[ "${IS_BASE_IMAGE}" == "1" ]] } get_chart_image_tag() { local rolling_tag local image_tag local distro distro="$(get_distro "${IMAGE_TAG}")" rolling_tag="${ROLLING_IMAGE_TAG}" image_tag="${rolling_tag//-${distro}/}" echo "${CHART_IMAGE_TAG:-$image_tag}" return 0 } get_chart_image() { local image_tag local image_repository image_tag="$(get_chart_image_tag)" image_repository="${CHART_IMAGE_REPOSITORY}:${image_tag}" echo "${CHART_IMAGE:-$image_repository}" # eg: bitnami/wordpress:4.7.6 return 0 } # NOTE(jotadrilo) keep different function for the chart app version value, # it might evolve separatelly to the image tag value get_chart_app_version() { local rolling_tag local image_tag local distro distro="$(get_distro "${IMAGE_TAG}")" rolling_tag="${ROLLING_IMAGE_TAG}" image_tag="${rolling_tag//-${distro}/}" echo "${CHART_IMAGE_TAG:-$image_tag}" return 0 } install_google_cloud_sdk() { if ! which gcloud >/dev/null ; then if ! which python >/dev/null; then info "Installing google-cloud-sdk dependencies..." if which apt-get >/dev/null; then apt-get update apt-get install -y python || return 1 elif which apk >/dev/null; then apk add --no-cache python || return 1 fi fi info "Downloading google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-x86_64.tar.gz..." if ! curl -sSLO https://dl.google.com/dl/cloudsdk/channels/rapid/downloads/google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-x86_64.tar.gz; then error "Could not download google-cloud-sdk..." return 1 fi info "Extracting google-cloud-sdk..." if ! tar -zxf google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-x86_64.tar.gz -C /usr/local/lib/; then error "Could not extract google-cloud-sdk-${GOOGLE_CLOUD_SDK_VERSION}-linux-x86_64.tar.gz..." return 1 fi info "Installing google-cloud-sdk..." if ! /usr/local/lib/google-cloud-sdk/install.sh -q >/dev/null; then error "Could not install google-cloud-sdk..." return 1 fi export PATH=/usr/local/lib/google-cloud-sdk/bin:$PATH if ! gcloud version; then return 1 fi fi } install_azure_cli() { if ! which az >/dev/null ; then info "Installing Python3 and its dependencies..." apk update if ! which python3 >/dev/null; then apk add --no-cache build-base libffi-dev openssl-dev python3 python3-dev python3 -m ensurepip rm -r /usr/lib/python*/ensurepip pip3 install --upgrade pip setuptools if [ ! -e /usr/bin/pip ]; then ln -s pip3 /usr/bin/pip fi if [ ! -e /usr/bin/python ]; then ln -sf /usr/bin/python3 /usr/bin/python fi rm -r /root/.cache fi info "Obtaining Python installation script..." apk add --quiet --no-cache bash libcurl curl apk upgrade --quiet libcurl curl curl -lO https://azurecliprod.blob.core.windows.net/install.py chmod 775 install.py info "Installing azure cli..." if ! yes "" | python3 install.py ; then error "Could not install azure cli..." return 1 fi export PATH=$PATH:/root/bin if ! az -v ; then return 1 fi fi } install_hub() { if ! which hub >/dev/null ; then if which apk >/dev/null; then echo "http://dl-cdn.alpinelinux.org/alpine/edge/testing" >> /etc/apk/repositories apk add --no-cache hub else info "Downloading hub-linux-amd64-$HUB_VERSION.tgz..." if ! curl -sSLO https://github.com/github/hub/releases/download/v$HUB_VERSION/hub-linux-amd64-$HUB_VERSION.tgz; then error "Could not download hub..." return 1 fi info "Installing hub..." if ! tar -zxf hub-linux-amd64-$HUB_VERSION.tgz --strip 2 hub-linux-amd64-$HUB_VERSION/bin/hub; then error "Could not install hub..." return 1 fi chmod +x hub $SUDO mv hub /usr/local/bin/hub fi if ! hub version; then return 1 fi fi } install_yq() { if ! which yq >/dev/null ; then apk update apk add jq py-pip pip install yq fi } install_helm() { if ! which helm >/dev/null ; then log "Downloading helm-v${HELM_VERSION}-linux-amd64.tar.gz..." if ! curl -sSLO https://storage.googleapis.com/kubernetes-helm/helm-v${HELM_VERSION}-linux-amd64.tar.gz; then log "Could not download helm..." return 1 fi log "Installing helm..." if ! tar -zxf helm-v${HELM_VERSION}-linux-amd64.tar.gz --strip 1 linux-amd64/helm; then log "Could not install helm..." return 1 fi chmod +x helm $SUDO mv helm /usr/local/bin/helm if ! helm version --client; then return 1 fi if ! helm init --client-only >/dev/null; then return 1 fi fi # update repo indices if ! helm repo update >/dev/null; then return 1 fi } install_s3cmd() { if ! which s3cmd >/dev/null ; then if ! which pip >/dev/null; then info "Installing s3cmd dependencies..." if which apt-get >/dev/null; then apt-get update apt-get install -y python-pip || return 1 elif which apk >/dev/null; then apk add --quiet --no-cache py-pip || return 1 fi fi log "Installing s3cmd..." pip install --quiet s3cmd if ! s3cmd --version; then return 1 fi fi } install_ibm_cloud_cli() { if ! which bx >/dev/null ; then log "downloading ibm cloud client..." if ! curl -sSLO https://public.dhe.ibm.com/cloud/bluemix/cli/bluemix-cli/${IBM_CLI_VERSION}/IBM_Cloud_CLI_${IBM_CLI_VERSION}_amd64.tar.gz; then log "could not download bx..." return 1 fi log "installing bx..." if ! tar -xzf IBM_Cloud_CLI_${IBM_CLI_VERSION}_amd64.tar.gz; then log "could not extract bx..." return 1 fi if ! ./Bluemix_CLI/install_bluemix_cli; then log "could not install bx..." return 1 fi if ! bx --version; then return 1 fi fi } docker_login() { local username=$DOCKER_USER local password=$DOCKER_PASS local email=$DOCKER_EMAIL local registry=${1} case "$1" in quay.io ) username=$QUAY_USER password=$QUAY_PASS email=$QUAY_EMAIL ;; esac info "Authenticating with ${registry:-docker.io}..." if [[ $(vercmp 17.06.0 ${DOCKER_CLIENT_VERSION%%-*}) -lt 0 ]]; then DOCKER_LOGIN_ARGS="${email:+-e $email}" fi DOCKER_LOGIN_ARGS+=" -u $username -p $password" docker login $DOCKER_LOGIN_ARGS $registry } ## docker cache load should probably be performed in the circle.yml build steps, ## but we noticed that the cache was not being loaded properly when done this way. ## As a workaround, the cache load/save is being performed from the script itself. docker_load_cache() { if [[ $(vercmp 1.13 ${DOCKER_SERVER_VERSION%%-*}) -ge 0 ]] && [[ -f /cache/layers.tar ]]; then log "Loading docker image layer cache..." docker load -i /cache/layers.tar fi } docker_save_cache() { if [[ $(vercmp 1.13 ${DOCKER_SERVER_VERSION%%-*}) -ge 0 ]]; then log "Saving docker image layer cache..." mkdir -p /cache docker save -o /cache/layers.tar $1 fi } docker_build() { local IMAGE_BUILD_TAG=${1} local IMAGE_BUILD_DIR=${2:-.} local IMAGE_BUILD_CACHE=${3:-$1} if [[ ! -f $IMAGE_BUILD_DIR/$DOCKERFILE ]]; then error "$IMAGE_BUILD_DIR/$DOCKERFILE does not exist, please inspect the release configuration in circle.yml" return 1 fi if [[ $(vercmp 1.13 ${DOCKER_SERVER_VERSION%%-*}) -ge 0 ]]; then DOCKER_BUILD_CACHE_FROM_ARGS="--cache-from $IMAGE_BUILD_CACHE" fi info "Building '$IMAGE_BUILD_TAG' from '$IMAGE_BUILD_DIR/'..." docker build $DOCKER_BUILD_CACHE_FROM_ARGS --rm=false -f $IMAGE_BUILD_DIR/$DOCKERFILE -t $IMAGE_BUILD_TAG $IMAGE_BUILD_DIR/ || return 1 # The `prod` containers are built (and published) at the same time that master builds run. # Let's skip variant builds for master builds, since `prod` containers use the image tag # being published in a parallel build so they will not succeed, ever. if [ -n "${CIRCLE_TAG}" ]; then for VARIANT in $SUPPORTED_VARIANTS do if [[ -f $IMAGE_BUILD_DIR/$VARIANT/Dockerfile ]]; then if [[ $(vercmp 1.13 ${DOCKER_SERVER_VERSION%%-*}) -ge 0 ]]; then DOCKER_BUILD_CACHE_FROM_ARGS="--cache-from $IMAGE_BUILD_CACHE-$VARIANT" fi info "Building '$IMAGE_BUILD_TAG-$VARIANT' from '$IMAGE_BUILD_DIR/$VARIANT/'..." if grep -q "^FROM " $IMAGE_BUILD_DIR/$VARIANT/Dockerfile; then docker build $DOCKER_BUILD_CACHE_FROM_ARGS --rm=false -t $IMAGE_BUILD_TAG-$VARIANT $IMAGE_BUILD_DIR/$VARIANT/ || return 1 else echo -e "FROM $IMAGE_BUILD_TAG\n$(cat $IMAGE_BUILD_DIR/$VARIANT/Dockerfile)" | docker build $DOCKER_BUILD_CACHE_FROM_ARGS -t $IMAGE_BUILD_TAG-$VARIANT - || return 1 fi fi done fi } docker_pull() { local IMAGE_BUILD_TAG=${1} info "Pulling '$IMAGE_BUILD_TAG'..." docker pull $IMAGE_BUILD_TAG if [ -n "${CIRCLE_TAG}" ]; then for VARIANT in $SUPPORTED_VARIANTS do if [[ -f $RS/$VARIANT/Dockerfile ]]; then info "Pulling '$IMAGE_BUILD_TAG-$VARIANT'..." docker pull $IMAGE_BUILD_TAG-$VARIANT fi done fi } import_signing_key() { if [ -n "${DOCKER_CONTENT_TRUST_REPOSITORY_PASSPHRASE:-}" -a -n "${DOCKER_CONTENT_TRUST_REPOSITORY_KEY:-}" ]; then base64 -d <<<"${DOCKER_CONTENT_TRUST_REPOSITORY_KEY}" | docker trust key load /dev/stdin return 0 fi return 1 } docker_push() { local IMAGE_BUILD_TAG=${1} local IMAGE_BUILD_DIR=${2:-.} local SUPPORTS_CONTENT_TRUST=${3:-0} local ENABLE_DOCKER_CONTENT_TRUST=0 if [[ "${SUPPORTS_CONTENT_TRUST}" == "1" ]] && import_signing_key; then ENABLE_DOCKER_CONTENT_TRUST=1 fi info "Pushing '$IMAGE_BUILD_TAG'..." DOCKER_CONTENT_TRUST=${ENABLE_DOCKER_CONTENT_TRUST} docker push $IMAGE_BUILD_TAG if [ -n "${CIRCLE_TAG}" ]; then for VARIANT in $SUPPORTED_VARIANTS do if [[ -f $IMAGE_BUILD_DIR/$VARIANT/Dockerfile ]]; then info "Pushing '$IMAGE_BUILD_TAG-$VARIANT'..." DOCKER_CONTENT_TRUST=${ENABLE_DOCKER_CONTENT_TRUST} docker push $IMAGE_BUILD_TAG-$VARIANT fi done fi } docker_build_and_push() { if ! docker_build ${1} ${2} ${3}; then return 1 fi docker_push ${1} ${2} ${4} } gcloud_docker_push() { local IMAGE_BUILD_TAG=${1} local IMAGE_BUILD_DIR=${2:-.} info "Pushing '$IMAGE_BUILD_TAG'..." gcloud docker -- push $IMAGE_BUILD_TAG if [ -n "${CIRCLE_TAG}" ]; then for VARIANT in $SUPPORTED_VARIANTS do if [[ -f $IMAGE_BUILD_DIR/$VARIANT/Dockerfile ]]; then info "Pushing '$IMAGE_BUILD_TAG-$VARIANT'..." gcloud docker -- push $IMAGE_BUILD_TAG-$VARIANT fi done fi } gcloud_login() { install_google_cloud_sdk || return 1 info "Authenticating with Google Cloud..." echo $GCLOUD_SERVICE_KEY | base64 -d > ${HOME}/gcloud-service-key.json gcloud auth activate-service-account --key-file ${HOME}/gcloud-service-key.json } az_login() { install_azure_cli || return 1 info "Authenticating with Azure Portal..." az login -u "${AZURE_PORTAL_USER}" -p "${AZURE_PORTAL_PASS}" >/dev/null } docker_build_and_gcloud_push() { if ! docker_build ${1} ${2} ${3}; then return 1 fi gcloud_docker_push ${1} ${2} } ibm_login() { install_ibm_cloud_cli || return 1 info "Install the Container Registry plug-in...." bx plugin install container-registry -r Bluemix info "Authenticating with IBM Cloud..." bx login -a https://api.ng.bluemix.net --apikey $IBM_API_KEY info "Logging Docker daemon into the IBM Cloud Container Registry." bx cr login } git_configure() { git config --global user.name "$GIT_AUTHOR_NAME" git config --global user.email "$GIT_AUTHOR_EMAIL" if [[ -n $GITHUB_USER && -n $GITHUB_TOKEN ]]; then git config --global credential.helper store echo "https://$GITHUB_USER:$GITHUB_TOKEN@github.com" > ~/.git-credentials fi } git_create_branch() { git fetch development 2>/dev/null || (error "Could not fetch development remote" && return 1) if ! git checkout $1 2>/dev/null; then info "Creating branch for new pull-request..." git checkout -b $1 else info "Amending updates to existing branch..." BRANCH_AMEND_COMMITS=1 fi return 0 } # This function expects two arguments: git_automerge_branch() { local REPO_TO_UPDATE="${1%.git}.git" BRANCH_NAME=$2 info "Auto-merging $BRANCH_NAME in $REPO_TO_UPDATE..." git checkout master >/dev/null git merge --no-ff $BRANCH_NAME >/dev/null git remote add origin-automerge https://$GITHUB_USER@$(echo ${REPO_TO_UPDATE/https:\/\/}) >/dev/null git push origin-automerge master >/dev/null git remote remove origin-automerge >/dev/null } # This function expects three arguments: git_cleanup_old_branches() { local REPO_TO_UPDATE="${1%.git}.git" PATTERN_TO_SEARCH=$2 PATTERN_TO_EXCLUDE=$3 TEMP_REMOTE="origin-cleanup" info "Cleaning up old branches in '$TEMP_REMOTE' remote that match '$PATTERN_TO_SEARCH' (excluding '$PATTERN_TO_EXCLUDE')" git remote add $TEMP_REMOTE https://$GITHUB_USER@$(echo ${REPO_TO_UPDATE/https:\/\/}) >/dev/null for branch in $(git branch --remote --list $TEMP_REMOTE/$PATTERN_TO_SEARCH | sed "s?.*${TEMP_REMOTE}/??" | grep -v "$PATTERN_TO_EXCLUDE") do log "Deleting $branch..." git push $TEMP_REMOTE :$branch >/dev/null done git remote remove $TEMP_REMOTE >/dev/null } dockerhub_update_description() { if [[ -f README.md ]]; then if ! curl -sSf "https://hub.docker.com/v2/users/login/" \ -H "Content-Type: application/json" \ --data '{"username": "'${DOCKER_USER}'", "password": "'${DOCKER_PASS}'"}' -o /tmp/token.json; then return 1 fi DOCKER_TOKEN=$(grep token /tmp/token.json | cut -d':' -f2 | cut -d'"' -f2) info "Updating image description on Docker Hub..." echo "{\"full_description\": \"$(sed 's/\\/\\\\/g' README.md | sed 's/"/\\"/g' | sed -E ':a;N;$!ba;s/\r{0,1}\t/\\t/g' | sed -E ':a;N;$!ba;s/\r{0,1}\n/\\n/g')\"}" > /tmp/description.json if ! curl -sSf "https://hub.docker.com/v2/repositories/$DOCKER_PROJECT/$IMAGE_NAME/" -o /dev/null \ -H "Content-Type: application/json" \ -H "Authorization: JWT ${DOCKER_TOKEN}" \ -X PATCH --data @/tmp/description.json; then return 1 fi fi } add_repo_to_helm() { local REPO_NAME=${1} local REPO_URL=${2} helm repo add $REPO_NAME $REPO_URL } chart_update_image_tag() { local CHART_PATH=${1} local CHART_VALUES_YAMLS=$(find ${CHART_PATH} -name "values*.yaml") local CHART_NEW_IMAGE_TAG=${2} local CHART_IMAGE_REPOSITORY=${3} local CHART_CURRENT_IMAGE_TAG="null" # As Tensorflow and Parse chart uses two docker images we need to update the proper image tag case ${CHART_IMAGE_REPOSITORY} in "bitnami/parse" | "bitnami/tensorflow-serving" ) CHART_CURRENT_IMAGE_TAG=$(yq -r .server.image.tag ${CHART_PATH}/values.yaml) ;; "bitnami/parse-dashboard" ) CHART_CURRENT_IMAGE_TAG=$(yq -r .dashboard.image.tag ${CHART_PATH}/values.yaml) ;; "bitnami/tensorflow-inception" ) CHART_CURRENT_IMAGE_TAG=$(yq -r .client.image.tag ${CHART_PATH}/values.yaml) ;; * ) CHART_CURRENT_IMAGE_TAG=$(yq -r .image.tag ${CHART_PATH}/values.yaml) ;; esac if [[ ${CHART_CURRENT_IMAGE_TAG} == "null" ]]; then error "Unable to determine current image tag" exit 1 fi case $(vercmp $CHART_CURRENT_IMAGE_TAG $CHART_NEW_IMAGE_TAG) in "0" ) warn "Chart image tag \`${CHART_NEW_IMAGE_TAG}\` has not been updated." return 1 ;; "-1" ) info "Current chart image tag \`${CHART_CURRENT_IMAGE_TAG}\` is newer than ${CHART_NEW_IMAGE_TAG}" return 1 ;; "1" ) info "Updating chart image tag to '${CHART_NEW_IMAGE_TAG}'..." sed -i 's|\(.*\)'"${CHART_CURRENT_IMAGE_TAG}"'|\1'"${CHART_NEW_IMAGE_TAG}"'|' ${CHART_VALUES_YAMLS} git add ${CHART_VALUES_YAMLS} git commit --signoff -m "$CHART_NAME: update to \`${CHART_NEW_IMAGE_TAG}\`" >/dev/null ;; esac } chart_update_appVersion() { if [[ $SKIP_CHART_APP_VERSION_UPDATE -eq 0 ]]; then local CHART_PATH=${1} local CHART_NEW_APP_VERSION=${2} local CHART_CURRENT_APP_VERSION=$(grep ^appVersion ${CHART_PATH}/Chart.yaml | awk '{print $2}') # adds appVersion field if its not present if ! grep -q ^appVersion ${CHART_PATH}/Chart.yaml; then sed -i '/^version/a appVersion: ' ${CHART_PATH}/Chart.yaml fi if [[ $(vercmp $CHART_CURRENT_APP_VERSION $CHART_NEW_APP_VERSION) -ne 0 ]]; then info "Updating chart appVersion to '$CHART_NEW_APP_VERSION'..." sed -i 's|^appVersion:.*|appVersion: '"${CHART_NEW_APP_VERSION}"'|g' ${CHART_PATH}/Chart.yaml git add ${1}/Chart.yaml git commit --signoff -m "$CHART_NAME: bump chart appVersion to \`$CHART_NEW_APP_VERSION\`" >/dev/null fi fi } chart_update_requirements() { if [[ -f ${1}/requirements.lock ]]; then install_helm || exit 1 rm -rf ${1}/requirements.lock helm dependency update ${1} >/dev/null if git diff | grep -q '^+[ ]*version:' ; then info "Updating chart requirements.lock..." git add ${1}/requirements.lock git commit --signoff -m "$CHART_NAME: updated chart requirements" >/dev/null else git checkout ${1}/requirements.lock fi fi } chart_update_version() { info "Updating chart version to '$2'..." sed -i 's|^version:.*|version: '"${2}"'|g' ${1}/Chart.yaml git add ${1}/Chart.yaml git commit --signoff -m "$CHART_NAME: bump chart version to \`$2\`" >/dev/null } chart_package() { local chart=${1} local name=${chart##*/} local src=${CIRCLE_WORKING_DIRECTORY}/${chart} local dest=${CHART_OUTPUT_DIR}/${CHART_BRANCH} local repo_index=${dest}/index.yaml mkdir -p $dest # download the repo index.yaml file if it does not exist MERGE_INDEX= if [[ -f $repo_index ]]; then MERGE_INDEX=1 elif [[ -n $AWS_BUCKET && -n $AWS_ACCESS_KEY_ID && -n $AWS_SECRET_ACCESS_KEY ]]; then # install s3cmd install_s3cmd || exit 1 # the download will fail for new repos if s3cmd get s3://$AWS_BUCKET/${CHART_BRANCH}/index.yaml $repo_index; then MERGE_INDEX=1 fi fi if [[ -f $src/requirements.yaml ]]; then info "Building $name dependencies..." helm dependency build $src >/dev/null fi info "Packaging $src at $dest..." helm package --destination $dest $src >/dev/null || return 1 helm repo index $dest ${CHART_URL:+--url $CHART_URL/${CHART_BRANCH}} ${MERGE_INDEX:+--merge $repo_index} } update_chart_in_repo() { local REPO_TO_UPDATE="${1%.git}.git" info "Cloning '$REPO_TO_UPDATE' repo..." if ! git clone --quiet --single-branch $REPO_TO_UPDATE charts; then error "Could not clone $REPO_TO_UPDATE..." exit 1 fi cd charts # add development remote git remote add development https://$GITHUB_USER@github.com/$GITHUB_USER/$(echo ${REPO_TO_UPDATE/https:\/\/github.com\/} | tr / -) # lookup chart in the chart repo CHART_PATH= for d in $(find * -type d -name $CHART_NAME ) do if [ -f $d/Chart.yaml ]; then CHART_PATH=$d break fi done if [[ -z $CHART_PATH ]]; then error "Chart '$CHART_NAME' could not be found in '$REPO_TO_UPDATE' repo" exit 1 fi if [[ -z $GITHUB_USER || -z $GITHUB_TOKEN ]]; then error "GitHub credentials not configured. Aborting..." exit 1 fi git_configure # generate next chart version CHART_VERSION=$(grep '^version:' $CHART_PATH/Chart.yaml | awk '{print $2}') CHART_VERSION_FIRST_UPDATE="${CHART_VERSION%.*}.$((${CHART_VERSION##*.}+1))" # create a branch for the updates git_create_branch "${CHART_NAME}-${CHART_VERSION_FIRST_UPDATE}" # check if we should calculate next chart version from version in master or version in existing branch CHART_VERSION_IN_BRANCH=$(grep '^version:' $CHART_PATH/Chart.yaml | awk '{print $2}') case $(vercmp $CHART_VERSION_IN_BRANCH $CHART_VERSION) in "0" ) warn "Chart version is the same in master and the branch" CHART_VERSION_TO_UPDATE="${CHART_VERSION%.*}.$((${CHART_VERSION##*.}+1))" ;; "1" ) warn "Chart version in master (${CHART_VERSION}) is newer than chart version in branch (${CHART_VERSION_IN_BRANCH})" CHART_VERSION_TO_UPDATE="${CHART_VERSION%.*}.$((${CHART_VERSION##*.}+1))" ;; "-1" ) info "Chart version in branch (${CHART_VERSION_IN_BRANCH}) is newer than chart version in master (${CHART_VERSION})" CHART_VERSION_TO_UPDATE="${CHART_VERSION_IN_BRANCH}" ;; esac info "Installing jq and yq..." install_yq || exit 1 if chart_update_image_tag $CHART_PATH "$(get_chart_image_tag)" $CHART_IMAGE_REPOSITORY; then chart_update_requirements $CHART_PATH chart_update_appVersion $CHART_PATH "$(get_chart_app_version)" chart_update_version $CHART_PATH $CHART_VERSION_TO_UPDATE info "Publishing branch to remote repo..." git push development $CHART_NAME-$CHART_VERSION_FIRST_UPDATE >/dev/null if [[ $SKIP_CHART_PULL_REQUEST -eq 0 && $BRANCH_AMEND_COMMITS -eq 0 ]]; then install_hub || exit 1 info "Creating pull request with '$REPO_TO_UPDATE' repo..." if ! hub pull-request -m "[$CHART_PATH] Release $CHART_VERSION_FIRST_UPDATE"; then error "Could not create pull request" exit 1 fi # auto merge updates to https://github.com/bitnami/charts if [[ $REPO_TO_UPDATE == "https://github.com/bitnami/charts.git" ]]; then git_automerge_branch $REPO_TO_UPDATE "${CHART_NAME}-${CHART_VERSION_FIRST_UPDATE}" fi fi info "Cleaning up old branches..." git_cleanup_old_branches $REPO_TO_UPDATE "${CHART_NAME}-*" "^$CHART_NAME-$CHART_VERSION_FIRST_UPDATE$" else warn "Chart release/updates skipped!" fi info "Setting original value for BRANCH_AMEND_COMMITS variable..." BRANCH_AMEND_COMMITS=0 info "Cleaning up repository" # Exiting from charts/ repository cd .. rm -rf charts } # Expects three parameters: and update_minideb_derived() { if [ -z "$1" ] || [ -z "$2" ] || [ -z "$3" ] then error "Wrong arguments passed to update_minideb_extras function" return 1 fi # Append '.git' so TravisCI git version can understand the URL local REPO_TO_UPDATE="${1%.git}.git" local DIST=$2 local DIST_REPO_DIGEST=$3 local BASENAME=bitnami/minideb if [[ -z $GITHUB_USER || -z $GITHUB_TOKEN ]]; then error "GitHub credentials not configured. Aborting..." return 1 fi git_configure info "Cloning '$REPO_TO_UPDATE' repo..." if ! git clone --quiet --single-branch "$REPO_TO_UPDATE" minideb-derived; then error "Could not clone $REPO_TO_UPDATE..." return 1 fi cd minideb-derived # add development remote git remote add development "https://$GITHUB_USER@github.com/$GITHUB_USER/$(echo ${REPO_TO_UPDATE/https:\/\/github.com\/bitnami\/} | tr / -)" # lookup the distro inside the repo local DIST_PATH= for d in $(find * -type d -name $DIST ) do if [ -f "$d/Dockerfile" ]; then DIST_PATH=$d break fi done if [[ -z $DIST_PATH ]]; then error "Distro '$DIST' could not be found in '$REPO_TO_UPDATE' repo" return 0 else cd "$DIST_PATH" fi # Check if we need to update the Dockerfile if grep "${DIST_REPO_DIGEST}" Dockerfile &>/dev/null ; then info "Dockerfile for dist ${DIST} is already using the new build" return 0 fi # Get version local IMAGE_VERSION=$(grep '^ENV BITNAMI_IMAGE_VERSION' ./Dockerfile | awk -F '=' '{print $2}') local NEW_IMAGE_VERSION=${IMAGE_VERSION%r*}r$((${IMAGE_VERSION##*r}+1)) # Create branch local BRANCH_NAME_BASE="minideb_update" local BRANCH_NAME="${BRANCH_NAME_BASE}_${NEW_IMAGE_VERSION}" if ! git_create_branch $BRANCH_NAME ; then error "Could not create new branch..."; exit 1; fi # Substitute and bump image version sed -i "s|^FROM .*$|FROM ${DIST_REPO_DIGEST}|" ./Dockerfile # Bump image version sed -i "s|^ENV BITNAMI_IMAGE_VERSION.*$|ENV BITNAMI_IMAGE_VERSION=${NEW_IMAGE_VERSION}|" ./Dockerfile # Commit changes git add ./Dockerfile git commit --signoff -m "[$DIST] Update minideb base image" >/dev/null # Push branch with changes info "Publishing branch to remote repo..." git push development ${BRANCH_NAME} >/dev/null if [[ $BRANCH_AMEND_COMMITS -eq 0 ]]; then install_hub || return 1 info "Creating pull request with '$REPO_TO_UPDATE' repo..." if ! hub pull-request -m "[$DIST] Update minideb base image"; then error "Could not create pull request" return 1 fi fi # auto merge updates info "Auto-merging ${BRANCH_NAME} in ${REPO_TO_UPDATE}..." git_automerge_branch $REPO_TO_UPDATE $BRANCH_NAME # create a new release if ! hub release create -m "Update minideb base image" $NEW_IMAGE_VERSION ; then error "Could not create a release" return 1 fi info "Cleaning up old branches..." git_cleanup_old_branches $REPO_TO_UPDATE "${BRANCH_NAME_BASE}-*" "^${BRANCH_NAME}$" info "Setting original value for BRANCH_AMEND_COMMITS variable..." BRANCH_AMEND_COMMITS=0 info "Cleaning up repository" # Exiting from 'minideb-derived' repository cd ../../ rm -rf minideb-derived }