Skip to content

Deploy (add-helm-charts -> demo) by @aprilrieger #20

Deploy (add-helm-charts -> demo) by @aprilrieger

Deploy (add-helm-charts -> demo) by @aprilrieger #20

Workflow file for this run

# In-repo deploy: checkout → kubeconfig → envsubst (secrets only) → bin/helm_deploy.
# Container image comes from chart defaults + ops/<environment>-deploy.yaml (not ghcr.io/<this repo>).
name: Deploy
run-name: Deploy (${{ github.ref_name }} -> ${{ inputs.environment }}) by @${{ github.actor }}
on:
workflow_dispatch:
inputs:
environment:
description: Deploy target (must match ops/<name>-deploy.tmpl.yaml and a GitHub Environment)
required: true
type: choice
default: demo
options:
- demo
debug_enabled:
description: Open an interactive tmate session on the runner before deploy
required: false
type: boolean
default: false
k8s_release_name:
description: Helm release name (leave blank for <repo>-<environment>)
required: false
type: string
k8s_namespace:
description: Kubernetes namespace (leave blank for <repo>-<environment>)
required: false
type: string
run_bootstrap_job:
description: Re-run the bootstrap Job (helmHook=false, compose). Requires Secret dataverse-admin-api-token (key token) in the namespace when the instance is already bootstrapped — see ops/*-deploy.tmpl.yaml bootstrapJob.compose.existingAdminApiTokenSecret. Deletes the prior Job by name first.
required: false
type: boolean
default: false
permissions:
contents: read
jobs:
deploy:
runs-on: ubuntu-latest
container: dtzar/helm-kubectl:3.9.4
environment: ${{ inputs.environment }}
env:
DB_PASSWORD: ${{ secrets.DB_PASSWORD }}
# Optional mail — secrets + Environment variables (see ops/demo-deploy.tmpl.yaml header).
SYSTEM_EMAIL: ${{ secrets.SYSTEM_EMAIL }}
NO_REPLY_EMAIL: ${{ secrets.NO_REPLY_EMAIL }}
SMTP_PASSWORD: ${{ secrets.SMTP_PASSWORD }}
MAIL_SMTP_PASSWORD: ${{ secrets.MAIL_SMTP_PASSWORD }}
SMTP_ADDRESS: ${{ vars.SMTP_ADDRESS }}
SMTP_USER_NAME: ${{ vars.SMTP_USER_NAME }}
SMTP_PORT: ${{ vars.SMTP_PORT }}
SOCKET_PORT: ${{ vars.SOCKET_PORT }}
SMTP_AUTH: ${{ vars.SMTP_AUTH }}
SMTP_STARTTLS: ${{ vars.SMTP_STARTTLS }}
SMTP_TYPE: ${{ vars.SMTP_TYPE }}
SMTP_ENABLED: ${{ vars.SMTP_ENABLED }}
SMTP_DOMAIN: ${{ vars.SMTP_DOMAIN }}
DEPLOY_ENVIRONMENT: ${{ inputs.environment }}
HELM_RELEASE_NAME: ${{ inputs.k8s_release_name || format('{0}-{1}', inputs.environment, github.event.repository.name) }}
HELM_NAMESPACE: ${{ inputs.k8s_namespace || format('{0}-{1}', inputs.environment, github.event.repository.name) }}
HELM_APP_NAME: ${{ vars.HELM_APP_NAME || github.event.repository.name }}
HELM_CHART_PATH: ${{ vars.HELM_CHART_PATH || './charts/dataverseup' }}
DEPLOY_ROLLOUT_TIMEOUT: ${{ vars.DEPLOY_ROLLOUT_TIMEOUT || '10m' }}
DEPLOY_BOOTSTRAP_JOB_TIMEOUT: ${{ vars.DEPLOY_BOOTSTRAP_JOB_TIMEOUT || '25m' }}
# Bumped every workflow run so the Deployment pod template changes and Kubernetes rolls pods even when
# image.tag and the rest of values are identical (otherwise `helm upgrade` can "succeed" with no rollout).
GITHUB_RUN_ID: ${{ github.run_id }}
HELM_EXPERIMENTAL_OCI: 1
HELM_EXTRA_ARGS: --values ops/${{ inputs.environment }}-deploy.yaml
KUBECONFIG: ./kubeconfig.yml
KUBECONFIG_FILE: ${{ secrets.KUBECONFIG_FILE }}
steps:
# Local actions under ./.github/actions/* are not on disk until checkout runs — checkout must be first.
- name: Checkout
uses: actions/checkout@v4
with:
fetch-depth: 0
submodules: recursive
token: ${{ secrets.GITHUB_TOKEN }}
- name: Validate deploy template exists
run: test -f "ops/${DEPLOY_ENVIRONMENT}-deploy.tmpl.yaml"
- name: Setup tmate session
uses: mxschmitt/action-tmate@v3
if: github.event_name == 'workflow_dispatch' && inputs.debug_enabled
with:
limit-access-to-actor: true
- name: Prepare kubeconfig and render deploy values
run: |
set -e
TMPL="ops/${DEPLOY_ENVIRONMENT}-deploy.tmpl.yaml"
OUT="ops/${DEPLOY_ENVIRONMENT}-deploy.yaml"
echo "$KUBECONFIG_FILE" | base64 -d >"$KUBECONFIG"
export SMTP_PORT="${SMTP_PORT:-25}"
export SOCKET_PORT="${SOCKET_PORT:-${SMTP_PORT}}"
export SMTP_PASSWORD="${SMTP_PASSWORD:-${MAIL_SMTP_PASSWORD:-}}"
if [ -z "${NO_REPLY_EMAIL:-}" ] && [ -n "${SMTP_DOMAIN:-}" ]; then
export NO_REPLY_EMAIL="noreply@${SMTP_DOMAIN}"
fi
# Only secrets + rollout id — hosts, Solr DNS, ingress, and bucket are literals in the *.tmpl.yaml file.
ENVSUBST_VARS='$GITHUB_RUN_ID $DB_PASSWORD $SYSTEM_EMAIL $SMTP_PASSWORD $SMTP_AUTH'
envsubst "$ENVSUBST_VARS" <"$TMPL" >"$OUT"
- name: Solr conf ConfigMap (pre-Helm)
env:
DV_REF: ${{ vars.DV_REF || 'v6.10.1' }}
SOLR_DIST_VERSION: ${{ vars.SOLR_DIST_VERSION || '9.10.1' }}
run: |
set -e
chmod +x scripts/solr-init-k8s.sh scripts/k8s/ensure-solr-conf-configmap.sh
SOLR_RESTART_DEPLOYMENTS=false ./scripts/solr-init-k8s.sh "$HELM_NAMESPACE" "$HELM_RELEASE_NAME"
- name: Deploy with Helm
run: |
set -e
chmod +x bin/helm_deploy
./bin/helm_deploy "$HELM_RELEASE_NAME" "$HELM_NAMESPACE"
echo "=== helm status ==="
helm status "$HELM_RELEASE_NAME" -n "$HELM_NAMESPACE"
echo "=== rollout (Dataverse deployment) ==="
kubectl -n "$HELM_NAMESPACE" rollout status deployment \
-l "app.kubernetes.io/instance=${HELM_RELEASE_NAME},app.kubernetes.io/name=${HELM_APP_NAME}" \
--timeout="${DEPLOY_ROLLOUT_TIMEOUT}"
- name: Solr workloads pick up ConfigMap updates
if: ${{ vars.SOLR_POST_HELM_ROLLOUT == 'true' }}
env:
DV_REF: ${{ vars.DV_REF || 'v6.10.1' }}
SOLR_DIST_VERSION: ${{ vars.SOLR_DIST_VERSION || '9.10.1' }}
run: |
set -e
chmod +x scripts/solr-init-k8s.sh scripts/k8s/ensure-solr-conf-configmap.sh
SOLR_APPLY_CM=false ./scripts/solr-init-k8s.sh "$HELM_NAMESPACE" "$HELM_RELEASE_NAME"
- name: One-shot bootstrap Job (non-hook)
if: github.event_name == 'workflow_dispatch' && inputs.run_bootstrap_job
run: |
set -e
OUT="$(mktemp)"
trap 'rm -f "$OUT"' EXIT
helm template "$HELM_RELEASE_NAME" "$HELM_CHART_PATH" \
--namespace "$HELM_NAMESPACE" \
$HELM_EXTRA_ARGS \
--show-only templates/bootstrap-chain-configmap.yaml \
--show-only templates/bootstrap-job.yaml \
--set bootstrapJob.enabled=true \
--set bootstrapJob.helmHook=false \
--set bootstrapJob.mode=compose \
>"$OUT"
JOB="$(awk '/^kind: Job$/{j=1} j && /^ name: /{print $2; exit}' "$OUT")"
if [ -z "$JOB" ]; then
echo "Could not parse Job metadata.name from helm template ($HELM_CHART_PATH templates/bootstrap-job.yaml)." >&2
exit 1
fi
kubectl -n "$HELM_NAMESPACE" delete job "$JOB" --ignore-not-found=true
kubectl apply -f "$OUT"
kubectl -n "$HELM_NAMESPACE" wait --for=condition=complete "job/$JOB" --timeout="${DEPLOY_BOOTSTRAP_JOB_TIMEOUT}"
kubectl -n "$HELM_NAMESPACE" logs "job/$JOB"