visit
Enter Buildkite. I recently started at an early-stage startup building Kubernetes-native tooling, and when I joined the team I found that they had recently migrated from CircleCI to Buildkite. I found the transition to Buildkite from CircleCI to be an interesting new adventure, after years of working with the same tooling. I’m going to share some of the reasons behind the migration, and the benefits we’ve seen from using Buildkite for our CI/CD operations.
version: 2.1
executors:
common:
working_directory: "~/base/mono"
docker:
- image: 634375685434.dkr.ecr.us-east-1.amazonaws.com/build-tools:latest
aws_auth:
aws_access_key_id: $AWS_ACCESS_KEY_ID
aws_secret_access_key: $AWS_SECRET_ACCESS_KEY
commands:
build-service:
parameters:
service:
type: string
steps:
- setup_remote_docker:
docker_layer_caching: true
- restore_cache: # **restores saved dependency cache if the Branch key template or requirements.txt files have not changed since the previous run**
key: v1-repo-{{ .Environment.CIRCLE_SHA1 }}
- run: "komo ci build-and-push << parameters.service >>"
jobs:
init-workspace:
executor: common
steps:
- checkout
- save_cache: # ** special step to save dependency cache **
key: v1-repo-{{ .Environment.CIRCLE_SHA1 }}
paths:
- "."
shared:
executor: common
steps:
- build-service:
service: "shared"
test-authz:
executor: common
steps:
- checkout
- setup_remote_docker:
docker_layer_caching: true
- run: "yarn workspace authz-tests run start:ci"
web:
executor: common
steps:
- build-service:
service: "web"
admin:
executor: common
steps:
- build-service:
service: "admin"
brain:
executor: common
steps:
- build-service:
service: "brain"
cloudtrail-collector:
executor: common
steps:
- build-service:
service: "cloudtrail-collector"
es-proxy:
executor: common
steps:
- build-service:
service: "es-proxy"
github-collector:
executor: common
steps:
- build-service:
service: "github-collector"
github-installer:
executor: common
steps:
- build-service:
service: "github-installer"
komodor-service-api:
executor: common
steps:
- build-service:
service: "komodor-service-api"
pager-duty-collector:
executor: common
steps:
- build-service:
service: "pager-duty-collector"
k8s-events-collector:
executor: common
steps:
- build-service:
service: "k8s-events-collector"
sentry-collector:
executor: common
steps:
- build-service:
service: "sentry-collector"
hasura-actions:
executor: common
steps:
- build-service:
service: "hasura-actions"
slack-collector:
executor: common
steps:
- build-service:
service: "slack-collector"
opsgenie-collector:
executor: common
steps:
- build-service:
service: "opsgenie-collector"
slack-installer:
executor: common
steps:
- build-service:
service: "slack-installer"
setup:
executor: common
steps:
- build-service:
service: "setup"
deploy:
executor: common
steps:
- add_ssh_keys:
fingerprints:
- "3b:25:2e:68:8c:e9:81:29:47:7f:79:5f:d4:43:bd:a3"
- checkout
- run: >
komo ci deploy
shared
brain
web
admin
cloudtrail-collector
es-proxy
github-collector
github-installer
komodor-service-api
pager-duty-collector
k8s-events-collector
sentry-collector
opsgenie-collector
slack-collector
slack-installer
hasura-actions
setup
workflows:
version: 2
build-and-test:
jobs:
- init-workspace:
context: aws
- shared:
context: aws
requires:
- init-workspace
- brain:
context: aws
requires:
- init-workspace
- web:
context: aws
requires:
- shared
- admin:
context: aws
requires:
- shared
- cloudtrail-collector:
context: aws
requires:
- shared
- es-proxy:
context: aws
requires:
- shared
- github-collector:
context: aws
requires:
- shared
- github-installer:
context: aws
requires:
- shared
- komodor-service-api:
context: aws
requires:
- shared
- pager-duty-collector:
context: aws
requires:
- shared
- opsgenie-collector:
context: aws
requires:
- shared
- k8s-events-collector:
context: aws
requires:
- shared
- sentry-collector:
context: aws
requires:
- shared
- hasura-actions:
context: aws
requires:
- shared
- slack-collector:
context: aws
requires:
- shared
- slack-installer:
context: aws
requires:
- shared
- setup:
context: aws
requires:
- shared
- test-authz:
context: aws
requires:
- shared
- deploy:
context: aws
requires:
- brain
- web
- admin
- k8s-events-collector
- cloudtrail-collector
- github-collector
- github-installer
- komodor-service-api
- pager-duty-collector
- slack-collector
- slack-installer
- sentry-collector
- hasura-actions
- setup
filters:
branches:
only: master
#!/usr/bin/env python3
from os import environ
from typing import Optional, List
import yaml
services = [
"slack-installer",
"opsgenie-collector",
"hasura-actions",
"web",
"admin",
"brain",
"github-collector",
"github-installer",
"pager-duty-collector",
"k8s-events-collector",
"sentry-collector",
"slack-collector",
"slack-message-sender",
"rest-api",
"datadog-app",
"webhook-listener",
]
with open("umbrella/Chart.yaml") as f:
dep = yaml.load(f)["dependencies"]
dep_names = [d["name"] for d in dep]
for s in services:
assert s in dep_names, f"Missing {s} chart dependency in umbrella/Chart.yaml"
def do_command(
label: str,
command: str,
key: Optional[str] = None,
concurrency_group: str = None,
concurrency: int = 1,
soft_fail: bool = False,
depends_on: Optional[List[str]] = None,
):
print(f" - command: {command}")
print(f" label: '{label}'")
if key:
print(f" key: '{key}'")
if concurrency_group:
print(f" concurrency_group: {concurrency_group}")
print(f" concurrency: {concurrency}")
if soft_fail:
print(" soft_fail:")
print(" - exit_status: 1")
print(" - exit_status: 2")
if depends_on:
print(" depends_on:")
for dependency in depends_on:
print(f' - "{dependency}"')
def wait():
print(" - wait")
def require_approval(reason: str):
print(f" - block: '{reason}'")
print(" branches: master")
def pipeline():
# begin the pipeline.yml file
print("steps:")
do_command(
":docker: build and push shared :docker:", "komo ci build-and-push shared"
)
wait()
for svc in services:
do_command(f":docker: {svc}", f"komo ci build-and-push {svc}")
do_command(
":docker: e2e (testcafe)",
"komo ci build-and-push e2e --dockerfile tests/e2e/Dockerfile",
)
if environ.get("BUILDKITE_BRANCH") == "master":
wait()
services_and_shared = services[:] + ["shared"]
services_and_shared_as_string = " ".join(services_and_shared)
production_arguments = '"-f values-production.yaml --timeout 20m"'
staging_arguments = '"-f values-staging.yaml"'
do_command(
label=":docker: upload images :ecr:",
command=f"komo ci upload-images {services_and_shared_as_string}",
concurrency_group="ecr/tagging",
)
wait()
do_command(
label=":kubernetes: deploy staging :rocket:",
command=f"komo ci deploy --cluster-name komodor-staging-eks --extra-arg {staging_arguments}",
concurrency_group="staging/deploy",
key="staging-deploy",
)
do_command(
label=":rooster: E2E Test - Staging",
command="komo test trigger testcafe --env staging",
concurrency_group="staging/deploy",
depends_on=["staging-deploy"],
)
# require_approval("Approve deploy to production manually")
wait()
do_command(
label=":kubernetes: deploy production :rocket:",
command=f"komo ci deploy --extra-arg {production_arguments}",
concurrency_group="production/deploy",
key="production-deploy",
)
# do_command(
# label=":rooster: E2E Test - Production",
# command="komo test trigger testcafe",
# concurrency_group="production/deploy",
# depends_on=["production-deploy"],
# )
if __name__ == "__main__":
pipeline()
While both CircleCI and Buildkite have the Docker Layer Caching functionality, this performed better for us with our own Docker images (which brings me to the next point). As a small startup that needs to move very rapidly, we found that even an incremental improvement in build time would ultimately translate to big gains in terms of long-term performance.