# Template docker-push # This template allows you to build and push your docker image to a Docker Hub account. # The workflow allows running tests, code linting and security scans on feature branches (as well as master). # The docker image will be validated and pushed to the docker registry after the code is merged to master. # Prerequisites: $DOCKERHUB_USERNAME, $DOCKERHUB_PASSWORD setup as deployment variables image: atlassian/default-image:2 definitions: services: docker: memory: 3072 pipelines: default: - parallel: - step: name: Build and Test script: - IMAGE_NAME=$BITBUCKET_REPO_SLUG - docker build . --file k8s/linode-autoscaler/Dockerfile --tag ${IMAGE_NAME} services: - docker caches: - docker - step: name: Lint the Dockerfile image: hadolint/hadolint:latest-debian script: - hadolint Dockerfile branches: master: - step: name: Build and Test script: - IMAGE_NAME=linode-autoscaler - docker build . --file k8s/linode-autoscaler/Dockerfile --tag ${IMAGE_NAME} - docker save ${IMAGE_NAME} --output "${IMAGE_NAME}.tar" services: - docker caches: - docker artifacts: - "*.tar" - step: name: Push COTURN docker image to GCR image: google/cloud-sdk:alpine script: # Authenticating with the service account key file - echo $GCLOUD_API_KEYFILE | base64 -d > ./gcloud-api-key.json - gcloud auth activate-service-account --key-file gcloud-api-key.json - gcloud config set project $GCLOUD_PROJECT - IMAGE_NAME="linode-autoscaler" - docker load --input "${IMAGE_NAME}.tar" - VERSION="prod-0.1.${BITBUCKET_BUILD_NUMBER}" - docker tag "${IMAGE_NAME}" "gcr.io/${GCLOUD_PROJECT}/${IMAGE_NAME}:${VERSION}" # Login to google docker hub - cat ./gcloud-api-key.json | docker login -u _json_key --password-stdin https://gcr.io - docker push "gcr.io/${GCLOUD_PROJECT}/${IMAGE_NAME}:${VERSION}" services: - docker - step: name: Deploy to K8s deployment: production script: - IMAGE="gcr.io/$GCLOUD_PROJECT/$BITBUCKET_REPO_SLUG:prod-0.1.$BITBUCKET_BUILD_NUMBER" - sed -i "s|{{linode_autoscaler_image}}|$IMAGE|g" k8s/linode-autoscaler/webrtc-be-autoscaler.yaml - pipe: atlassian/kubectl-run:1.1.2 variables: KUBE_CONFIG: $KUBE_CONFIG KUBECTL_COMMAND: 'apply' RESOURCE_PATH: 'k8s/'