# This file is a template, and might need editing before it works on your project. # Auto DevOps # This CI/CD configuration provides a standard pipeline for # * building a Docker image (using a buildpack if necessary), # * storing the image in the container registry, # * running tests from a buildpack, # * running code quality analysis, # * creating a review app for each topic branch, # * and continuous deployment to production # # To deploy, you must have a working sloppy.io Account # via a project integration, or via group/project variables. # AUTO_DEVOPS_DOMAIN must also be set as a variable at the group or project # level, or manually added below. # # If you want to deploy to staging first, or enable canary deploys, # uncomment the relevant jobs in the pipeline below. # # If Auto DevOps fails to detect the proper buildpack, or if you want to # specify a custom buildpack, set a project variable `BUILDPACK_URL` to the # repository URL of the buildpack. # e.g. BUILDPACK_URL=https://github.com/heroku/heroku-buildpack-ruby.git#v142 # If you need multiple buildpacks, add a file to your project called # `.buildpacks` that contains the URLs, one on each line, in order. # Note: Auto CI does not work with multiple buildpacks yet image: alpine:3.8 variables: # AUTO_DEVOPS_DOMAIN is the application deployment domain and should be set as a variable at the group or project level. # AUTO_DEVOPS_DOMAIN: domain.example.com POSTGRES_USER: user POSTGRES_PASSWORD: testing-password POSTGRES_ENABLED: "true" POSTGRES_DB: $CI_ENVIRONMENT_SLUG CODECLIMATE_VERSION: 0.78.1 stages: - build - test - review - staging - canary - production - performance - cleanup build: stage: build image: docker:git services: - docker:dind variables: DOCKER_DRIVER: overlay2 script: - setup_docker - build only: - branches # Example for running tests with a postgres database. Write your own ones. .test: services: - postgres:latest variables: POSTGRES_DB: test stage: test image: gliderlabs/herokuish:latest script: - setup_test_db - cp -R . /tmp/app - /bin/herokuish buildpack test only: - branches # Performance tests using sitespeed.io .performance: stage: performance image: docker:latest variables: DOCKER_DRIVER: overlay2 allow_failure: true services: - docker:dind script: - setup_docker - performance artifacts: paths: - performance.json only: refs: - branches .codequality: image: docker:latest variables: DOCKER_DRIVER: overlay2 allow_failure: true services: - docker:dind script: - setup_docker - codeclimate artifacts: paths: [codeclimate.json] review: stage: review script: - check_autodevops_domain - install_dependencies - ensure_namespace - deploy - persist_environment_url environment: #name: review/$CI_COMMIT_REF_NAME name: $CI_COMMIT_REF_NAME url: https://$SLOPPY_PROJECTNAME-$CI_COMMIT_REF_NAME.$AUTO_DEVOPS_DOMAIN on_stop: stop_review artifacts: paths: [environment_url.txt] only: refs: - branches except: - master stop_review: stage: cleanup variables: GIT_STRATEGY: none script: - install_dependencies - delete environment: name: review/$CI_COMMIT_REF_NAME name: $CI_COMMIT_REF_NAME action: stop when: manual allow_failure: true only: refs: - branches except: - master # Keys that start with a dot (.) will not be processed by GitLab CI. # Staging and canary jobs are disabled by default, to enable them # remove the dot (.) before the job name. # https://docs.gitlab.com/ee/ci/yaml/README.html#hidden-keys # Staging deploys are disabled by default since # continuous deployment to production is enabled by default # If you prefer to automatically deploy to staging and # only manually promote to production, enable this job by removing the dot (.), # and uncomment the `when: manual` line in the `production` job. staging: stage: staging script: - check_autodevops_domain - install_dependencies - ensure_namespace - deploy - persist_environment_url environment: name: staging url: https://$SLOPPY_PROJECTNAME-staging.$AUTO_DEVOPS_DOMAIN artifacts: paths: [environment_url.txt] only: refs: - master # Canaries are disabled by default, but if you want them, # and know what the downsides are, enable this job by removing the dot (.), # and uncomment the `when: manual` line in the `production` job. .canary: stage: canary script: - check_autodevops_domain - install_dependencies - download_chart - ensure_namespace - install_tiller - create_secret - deploy canary environment: name: production url: https://$CI_PROJECT_PATH_SLUG-production.$AUTO_DEVOPS_DOMAIN when: manual only: refs: - master kubernetes: active # This job continuously deploys to production on every push to `master`. # To make this a manual process, either because you're enabling `staging` # or `canary` deploys, or you simply want more control over when you deploy # to production, uncomment the `when: manual` line in the `production` job. production: stage: production script: - check_autodevops_domain - install_dependencies - ensure_namespace - deploy - persist_environment_url # - delete canary environment: name: production url: https://$SLOPPY_PROJECTNAME-production.$AUTO_DEVOPS_DOMAIN artifacts: paths: [environment_url.txt] when: manual only: refs: - master # --------------------------------------------------------------------------- .auto_devops: &auto_devops | # Auto DevOps variables and functions [[ "$TRACE" ]] && set -x auto_database_url=postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${CI_ENVIRONMENT_SLUG}-postgres:5432/${POSTGRES_DB} export DATABASE_URL=${DATABASE_URL-$auto_database_url} export CI_APPLICATION_REPOSITORY=$CI_REGISTRY_IMAGE/$CI_COMMIT_REF_SLUG export CI_APPLICATION_TAG=$CI_COMMIT_SHA export CI_CONTAINER_NAME=ci_job_build_${CI_JOB_ID} export TILLER_NAMESPACE=$KUBE_NAMESPACE function codeclimate() { cc_opts="--env CODECLIMATE_CODE="$PWD" \ --volume "$PWD":/code \ --volume /var/run/docker.sock:/var/run/docker.sock \ --volume /tmp/cc:/tmp/cc" docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" init docker run ${cc_opts} "codeclimate/codeclimate:${CODECLIMATE_VERSION}" analyze -f json > codeclimate.json } function deploy() { track="${1-stable}" name="$CI_ENVIRONMENT_SLUG" if [[ "$track" != "stable" ]]; then name="$name-$track" fi replicas="1" service_enabled="false" postgres_enabled="$POSTGRES_ENABLED" # canary uses stable db [[ "$track" == "canary" ]] && postgres_enabled="false" env_track=$( echo $track | tr -s '[:lower:]' '[:upper:]' ) env_slug=$( echo ${CI_ENVIRONMENT_SLUG//-/_} | tr -s '[:lower:]' '[:upper:]' ) if [[ "$track" == "stable" ]]; then # for stable track get number of replicas from `PRODUCTION_REPLICAS` eval new_replicas=\$${env_slug}_REPLICAS service_enabled="true" else # for all tracks get number of replicas from `CANARY_PRODUCTION_REPLICAS` eval new_replicas=\$${env_track}_${env_slug}_REPLICAS fi if [[ -n "$new_replicas" ]]; then replicas="$new_replicas" fi # To-Do: Deploy new Project for a new Branch echo "Doing: sloppy change -img $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG $SLOPPY_PROJECTNAME-$CI_ENVIRONMENT_NAME$SLOPPY_APP_PATH" sloppy change -img $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG $SLOPPY_PROJECTNAME-$CI_ENVIRONMENT_NAME$SLOPPY_APP_PATH } function install_dependencies() { apk add -U openssl curl tar gzip bash ca-certificates git wget -q -O /etc/apk/keys/sgerrand.rsa.pub https://alpine-pkgs.sgerrand.com/sgerrand.rsa.pub wget https://github.com/sgerrand/alpine-pkg-glibc/releases/download/2.23-r3/glibc-2.23-r3.apk apk add glibc-2.23-r3.apk rm glibc-2.23-r3.apk curl -L -o /usr/local/bin/sloppy https://files.sloppy.io/sloppy-`uname -s`-`uname -m` > /tmp/sloppy chmod +x /usr/local/bin/sloppy sloppy version } function setup_docker() { if ! docker info &>/dev/null; then if [ -z "$DOCKER_HOST" -a "$KUBERNETES_PORT" ]; then export DOCKER_HOST='tcp://localhost:2375' fi fi } function setup_test_db() { if [ -z ${KUBERNETES_PORT+x} ]; then DB_HOST=postgres else DB_HOST=localhost fi export DATABASE_URL="postgres://${POSTGRES_USER}:${POSTGRES_PASSWORD}@${DB_HOST}:5432/${POSTGRES_DB}" } function ensure_namespace() { CLEANED_CI_ENVIRONMENT_NAME=$( echo ${CI_ENVIRONMENT_NAME//\//-} | sed s/review/r/g | cut -c 1-23 ) # replace "/"" with "-"" or sloppy projectname is invalid. Also shorten to 23 chars or the fqdn might be to long! echo "CLEANED_CI_ENVIRONMENT_NAME=$CLEANED_CI_ENVIRONMENT_NAME" echo "Doing: sloppy show $SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME || sloppy start --var=domain:$SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME.$AUTO_DEVOPS_DOMAIN --var=project:$SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME --var=image:$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG sloppy.yml" sloppy show $SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME || sloppy start --var=domain:$SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME.$AUTO_DEVOPS_DOMAIN --var=project:$SLOPPY_PROJECTNAME-$CLEANED_CI_ENVIRONMENT_NAME --var=image:$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG sloppy.yml } function check_autodevops_domain() { if [ -z ${AUTO_DEVOPS_DOMAIN+x} ]; then echo "In order to deploy, AUTO_DEVOPS_DOMAIN must be set as a variable at the group or project level, or manually added in .gitlab-cy.yml" false else true fi } function build() { if [[ -f Dockerfile ]]; then echo "Building Dockerfile-based application..." docker build -t "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" . else echo "Building Heroku-based application using gliderlabs/herokuish docker image..." docker run -i --name="$CI_CONTAINER_NAME" -v "$(pwd):/tmp/app:ro" gliderlabs/herokuish /bin/herokuish buildpack build docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" docker rm "$CI_CONTAINER_NAME" >/dev/null echo "" echo "Configuring $CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG docker image..." docker create --expose 5000 --env PORT=5000 --name="$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" /bin/herokuish procfile start web docker commit "$CI_CONTAINER_NAME" "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" docker rm "$CI_CONTAINER_NAME" >/dev/null echo "" fi if [[ -n "$CI_REGISTRY_USER" ]]; then echo "Logging to GitLab Container Registry with CI credentials..." docker login -u "$CI_REGISTRY_USER" -p "$CI_REGISTRY_PASSWORD" "$CI_REGISTRY" echo "" fi echo "Pushing to GitLab Container Registry..." docker push "$CI_APPLICATION_REPOSITORY:$CI_APPLICATION_TAG" echo "" } function delete() { track="${1-stable}" name="$CI_ENVIRONMENT_SLUG" if [[ "$track" != "stable" ]]; then name="$name-$track" fi # helm delete "$name" || true sloppy delete $SLOPPY_PROJECTNAME-$CI_ENVIRONMENT_NAME } function performance() { export CI_ENVIRONMENT_URL=$(cat environment_url.txt) echo "CI_ENVIRONMENT_URL is $CI_ENVIRONMENT_URL" mkdir gitlab-exporter wget -O gitlab-exporter/index.js https://gitlab.com/gitlab-org/gl-performance/raw/10-3/index.js mkdir sitespeed-results if [ -f .gitlab-urls.txt ] then sed -i -e 's@^@'"$CI_ENVIRONMENT_URL"'@' .gitlab-urls.txt docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results .gitlab-urls.txt else docker run --shm-size=1g --rm -v "$(pwd)":/sitespeed.io sitespeedio/sitespeed.io:6.0.3 --plugins.add ./gitlab-exporter --outputFolder sitespeed-results "$CI_ENVIRONMENT_URL" fi mv sitespeed-results/data/performance.json performance.json } function persist_environment_url() { echo "CI_ENVIRONMENT_URL is $CI_ENVIRONMENT_URL" echo $CI_ENVIRONMENT_URL > environment_url.txt } before_script: - *auto_devops