pr-env (#1673)
* actions: build options * building backend all/optional * pr-env: don't pull image it already exists * pr-env: exit in case of error * build all images * refactor(pr-env): build script * pr-env: building all images * chore(actions): logging with aws cred * actions: enable failure job * actions: get lb dns address * actions: fix if condition * actions: change ingress name * resources: fix redis persistence * resources: change resource type to preferred It'll help to schedule, pods * actions: Build all image * fix(actions): variable override and folder consistency --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
parent
abfa4c6b47
commit
5f855a8df0
3 changed files with 168 additions and 82 deletions
122
.github/workflows/pr-env.yaml
vendored
122
.github/workflows/pr-env.yaml
vendored
|
|
@ -4,7 +4,9 @@ on:
|
|||
workflow_dispatch:
|
||||
inputs:
|
||||
build_service:
|
||||
description: 'Name of a single service to build(in small letters), eg: chalice or frontend etc. backend:sevice-name to build service'
|
||||
description: |
|
||||
Name of a single service to build(in small letters), eg: api or frontend etc. backend:sevice-name to build service.
|
||||
Options: all/service-name/backend:{app1/app1,app2,app3/all}
|
||||
required: true
|
||||
default: 'frontend'
|
||||
env_flavour:
|
||||
|
|
@ -21,6 +23,12 @@ jobs:
|
|||
steps:
|
||||
- name: Checkout Code
|
||||
uses: actions/checkout@v2
|
||||
- name: Configure AWS Credentials
|
||||
uses: aws-actions/configure-aws-credentials@v4
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||
aws-secret-access-key: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||
aws-region: ${{ secrets.OR_PR_AWS_DEFAULT_REGION}}
|
||||
- name: Setting up env variables
|
||||
run: |
|
||||
# Fetching details open/draft PR for current branch
|
||||
|
|
@ -61,8 +69,8 @@ jobs:
|
|||
id: vcluster_exists
|
||||
continue-on-error: true
|
||||
run: |
|
||||
if [ ! $(vcluster list | grep $PR_NUMBER) ]; then
|
||||
echo "failed state"
|
||||
if ! $(vcluster list | grep $PR_NUMBER &> /dev/null); then
|
||||
echo "no cluster found for $PR_NUMBER"
|
||||
echo "::set-output name=failed::true"
|
||||
exit 100
|
||||
fi
|
||||
|
|
@ -74,7 +82,8 @@ jobs:
|
|||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
id: lb-ip
|
||||
run: |
|
||||
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
# LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
|
||||
echo "::set-output name=ip::$LB_IP"
|
||||
|
||||
- name: Create vCluster
|
||||
|
|
@ -104,7 +113,7 @@ jobs:
|
|||
"Action": "CREATE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_1",
|
||||
"Type": "A",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
|
|
@ -113,7 +122,7 @@ jobs:
|
|||
"Action": "CREATE",
|
||||
"ResourceRecordSet": {
|
||||
"Name": "$DOMAIN_NAME_2",
|
||||
"Type": "A",
|
||||
"Type": "CNAME",
|
||||
"TTL": 300,
|
||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||
}
|
||||
|
|
@ -138,14 +147,20 @@ jobs:
|
|||
env:
|
||||
EXPECTED_IP: ${{ steps.lb-ip.outputs.ip }}
|
||||
run: |
|
||||
DOMAIN_NAME_1="$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}"
|
||||
DOMAIN_NAME="$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}"
|
||||
MAX_ATTEMPTS=30
|
||||
attempt=1
|
||||
until [[ $attempt -gt $MAX_ATTEMPTS ]]
|
||||
do
|
||||
DNS_IP=$(dig +short $DOMAIN_NAME_1 @8.8.8.8)
|
||||
if [[ "$DNS_IP" == "$EXPECTED_IP" ]]; then
|
||||
echo "DNS has propagated for $DOMAIN_NAME_1"
|
||||
# Use dig to query DNS records
|
||||
DNS_RESULT=$(dig +short $DOMAIN_NAME @1.1.1.1)
|
||||
|
||||
# Check if DNS result is empty
|
||||
if [ -z "$DNS_RESULT" ]; then
|
||||
echo "No IP or CNAME records found for $DOMAIN_NAME."
|
||||
else
|
||||
echo "DNS records found for $DOMAIN_NAME:"
|
||||
echo "$DNS_RESULT"
|
||||
break
|
||||
fi
|
||||
echo "Waiting for DNS propagation... Attempt $attempt of $MAX_ATTEMPTS"
|
||||
|
|
@ -154,7 +169,7 @@ jobs:
|
|||
done
|
||||
|
||||
if [[ $attempt -gt $MAX_ATTEMPTS ]]; then
|
||||
echo "DNS propagation check failed for $DOMAIN_NAME_1 after $MAX_ATTEMPTS attempts."
|
||||
echo "DNS propagation check failed for $DOMAIN_NAME after $MAX_ATTEMPTS attempts."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
|
|
@ -191,27 +206,82 @@ jobs:
|
|||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
env: ${{ github.event.inputs.env_flavour }}
|
||||
run: |
|
||||
set -x
|
||||
|
||||
app_name=${{github.event.inputs.build_service}}
|
||||
echo "building and deploying $app_name"
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
export KUBECONFIG=/tmp/kubeconfig.yaml
|
||||
|
||||
case $app_name in
|
||||
function build_and_deploy {
|
||||
apps_to_build=$1
|
||||
case $apps_to_build in
|
||||
backend*)
|
||||
echo "In backend build"
|
||||
cd backend
|
||||
component=`echo $app_name | cut -d ':' -f 2`
|
||||
echo "Building backend build"
|
||||
cd $GITHUB_WORKSPACE/backend
|
||||
components=()
|
||||
if [ $apps_to_build == "backend:all" ]; then
|
||||
# Append all folder names from 'cmd/' directory to the array
|
||||
for folder in cmd/*/; do
|
||||
# Use basename to extract the folder name without path
|
||||
folder_name=$(basename "$folder")
|
||||
components+=("$folder_name")
|
||||
done
|
||||
else
|
||||
# "${apps_to_build#*:}" :: Strip backend: and output app1,app2,app3 to read -ra
|
||||
IFS=',' read -ra components <<< "${apps_to_build#*:}"
|
||||
fi
|
||||
echo "Building components: " ${components[@]}
|
||||
for component in "${components[@]}"; do
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$component-openreplay $component=${DOCKER_REPO}/$component:${IMAGE_TAG}
|
||||
done
|
||||
;;
|
||||
chalice)
|
||||
echo "Chalice build"
|
||||
component=$apps_to_build
|
||||
cd $GITHUB_WORKSPACE/api || (Nothing to build: $apps_to_build; exit 100)
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$apps_to_build-openreplay $apps_to_build=${DOCKER_REPO}/$apps_to_build:${IMAGE_TAG}
|
||||
;;
|
||||
*)
|
||||
set -x
|
||||
cd $app_name || (Nothing to build: $app_name; exit 100)
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env
|
||||
kubectl set image -n app deployment/$app_name-openreplay $app_name=${DOCKER_REPO}/$app_name:${IMAGE_TAG}
|
||||
echo "$apps_to_build build"
|
||||
cd $GITHUB_WORKSPACE/$apps_to_build || (Nothing to build: $apps_to_build; exit 100)
|
||||
component=$apps_to_build
|
||||
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||
echo Image present upstream. Skipping build: $component
|
||||
else
|
||||
echo "Building backend:$component"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||
fi
|
||||
kubectl set image -n app deployment/$apps_to_build-openreplay $apps_to_build=${DOCKER_REPO}/$apps_to_build:${IMAGE_TAG}
|
||||
;;
|
||||
esac
|
||||
}
|
||||
|
||||
case $app_name in
|
||||
all)
|
||||
build_and_deploy "backend:all"
|
||||
build_and_deploy "frontend"
|
||||
build_and_deploy "chalice"
|
||||
build_and_deploy "sourcemapreader"
|
||||
build_and_deploy "assist-stats"
|
||||
;;
|
||||
*)
|
||||
build_and_deploy $app_name
|
||||
;;
|
||||
esac
|
||||
|
||||
- name: Sent results to slack
|
||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||
env:
|
||||
|
|
@ -256,10 +326,10 @@ jobs:
|
|||
# run: |
|
||||
# # Add any cleanup commands if necessary
|
||||
|
||||
# - name: Debug Job
|
||||
# if: failure()
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
- name: Debug Job
|
||||
if: failure()
|
||||
uses: mxschmitt/action-tmate@v3
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
|
|
|||
|
|
@ -1,4 +1,7 @@
|
|||
#!/bin/bash
|
||||
|
||||
set -e
|
||||
|
||||
if [ $# -lt 1 ]; then
|
||||
echo "bash $0 pr-number.openreplay.tools"
|
||||
echo "eg: bash $0 pr-111.openreplay.tools"
|
||||
|
|
|
|||
|
|
@ -6,6 +6,7 @@ minio:
|
|||
postgresql:
|
||||
persistence: *persistence
|
||||
redis:
|
||||
master:
|
||||
persistence: *persistence
|
||||
kafka:
|
||||
persistence: *persistence
|
||||
|
|
@ -39,8 +40,10 @@ chalice:
|
|||
cpu: 50m
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
|
|
@ -77,8 +80,10 @@ imagestorage:
|
|||
cpu: 50m
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
|
|
@ -89,6 +94,7 @@ imagestorage:
|
|||
- imagestorage
|
||||
- videostorage
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
ingress-nginx:
|
||||
resources:
|
||||
requests:
|
||||
|
|
@ -111,8 +117,10 @@ sink:
|
|||
cpu: 50m
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
|
|
@ -133,8 +141,10 @@ storage:
|
|||
cpu: 50m
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
|
|
@ -155,8 +165,10 @@ videostorage:
|
|||
cpu: 50m
|
||||
affinity:
|
||||
podAffinity:
|
||||
requiredDuringSchedulingIgnoredDuringExecution:
|
||||
- labelSelector:
|
||||
preferredDuringSchedulingIgnoredDuringExecution:
|
||||
- weight: 1
|
||||
podAffinityTerm:
|
||||
labelSelector:
|
||||
matchExpressions:
|
||||
- key: app.kubernetes.io/name
|
||||
operator: In
|
||||
|
|
@ -167,3 +179,4 @@ videostorage:
|
|||
- imagestorage
|
||||
- videostorage
|
||||
topologyKey: kubernetes.io/hostname
|
||||
|
||||
|
|
|
|||
Loading…
Add table
Reference in a new issue