pr-env (#1673)
* actions: build options * building backend all/optional * pr-env: don't pull image it already exists * pr-env: exit in case of error * build all images * refactor(pr-env): build script * pr-env: building all images * chore(actions): logging with aws cred * actions: enable failure job * actions: get lb dns address * actions: fix if condition * actions: change ingress name * resources: fix redis persistence * resources: change resource type to preferred It'll help to schedule, pods * actions: Build all image * fix(actions): variable override and folder consistency --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
parent
abfa4c6b47
commit
5f855a8df0
3 changed files with 168 additions and 82 deletions
122
.github/workflows/pr-env.yaml
vendored
122
.github/workflows/pr-env.yaml
vendored
|
|
@ -4,7 +4,9 @@ on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
build_service:
|
build_service:
|
||||||
description: 'Name of a single service to build(in small letters), eg: chalice or frontend etc. backend:sevice-name to build service'
|
description: |
|
||||||
|
Name of a single service to build(in small letters), eg: api or frontend etc. backend:sevice-name to build service.
|
||||||
|
Options: all/service-name/backend:{app1/app1,app2,app3/all}
|
||||||
required: true
|
required: true
|
||||||
default: 'frontend'
|
default: 'frontend'
|
||||||
env_flavour:
|
env_flavour:
|
||||||
|
|
@ -21,6 +23,12 @@ jobs:
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout Code
|
- name: Checkout Code
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
- name: Configure AWS Credentials
|
||||||
|
uses: aws-actions/configure-aws-credentials@v4
|
||||||
|
with:
|
||||||
|
aws-access-key-id: ${{ secrets.OR_PR_AWS_ACCESS_KEY_ID }}
|
||||||
|
aws-secret-access-key: ${{ secrets.OR_PR_AWS_SECRET_ACCESS_KEY }}
|
||||||
|
aws-region: ${{ secrets.OR_PR_AWS_DEFAULT_REGION}}
|
||||||
- name: Setting up env variables
|
- name: Setting up env variables
|
||||||
run: |
|
run: |
|
||||||
# Fetching details open/draft PR for current branch
|
# Fetching details open/draft PR for current branch
|
||||||
|
|
@ -61,8 +69,8 @@ jobs:
|
||||||
id: vcluster_exists
|
id: vcluster_exists
|
||||||
continue-on-error: true
|
continue-on-error: true
|
||||||
run: |
|
run: |
|
||||||
if [ ! $(vcluster list | grep $PR_NUMBER) ]; then
|
if ! $(vcluster list | grep $PR_NUMBER &> /dev/null); then
|
||||||
echo "failed state"
|
echo "no cluster found for $PR_NUMBER"
|
||||||
echo "::set-output name=failed::true"
|
echo "::set-output name=failed::true"
|
||||||
exit 100
|
exit 100
|
||||||
fi
|
fi
|
||||||
|
|
@ -74,7 +82,8 @@ jobs:
|
||||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||||
id: lb-ip
|
id: lb-ip
|
||||||
run: |
|
run: |
|
||||||
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
# LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].ip}')
|
||||||
|
LB_IP=$(kubectl get svc ingress-ingress-nginx-controller -n default -o jsonpath='{.status.loadBalancer.ingress[0].hostname}')
|
||||||
echo "::set-output name=ip::$LB_IP"
|
echo "::set-output name=ip::$LB_IP"
|
||||||
|
|
||||||
- name: Create vCluster
|
- name: Create vCluster
|
||||||
|
|
@ -104,7 +113,7 @@ jobs:
|
||||||
"Action": "CREATE",
|
"Action": "CREATE",
|
||||||
"ResourceRecordSet": {
|
"ResourceRecordSet": {
|
||||||
"Name": "$DOMAIN_NAME_1",
|
"Name": "$DOMAIN_NAME_1",
|
||||||
"Type": "A",
|
"Type": "CNAME",
|
||||||
"TTL": 300,
|
"TTL": 300,
|
||||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||||
}
|
}
|
||||||
|
|
@ -113,7 +122,7 @@ jobs:
|
||||||
"Action": "CREATE",
|
"Action": "CREATE",
|
||||||
"ResourceRecordSet": {
|
"ResourceRecordSet": {
|
||||||
"Name": "$DOMAIN_NAME_2",
|
"Name": "$DOMAIN_NAME_2",
|
||||||
"Type": "A",
|
"Type": "CNAME",
|
||||||
"TTL": 300,
|
"TTL": 300,
|
||||||
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
"ResourceRecords": [{ "Value": "${{ steps.lb-ip.outputs.ip }}" }]
|
||||||
}
|
}
|
||||||
|
|
@ -138,14 +147,20 @@ jobs:
|
||||||
env:
|
env:
|
||||||
EXPECTED_IP: ${{ steps.lb-ip.outputs.ip }}
|
EXPECTED_IP: ${{ steps.lb-ip.outputs.ip }}
|
||||||
run: |
|
run: |
|
||||||
DOMAIN_NAME_1="$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}"
|
DOMAIN_NAME="$PR_NUMBER-vcluster.${{ secrets.OR_PR_DOMAIN_NAME }}"
|
||||||
MAX_ATTEMPTS=30
|
MAX_ATTEMPTS=30
|
||||||
attempt=1
|
attempt=1
|
||||||
until [[ $attempt -gt $MAX_ATTEMPTS ]]
|
until [[ $attempt -gt $MAX_ATTEMPTS ]]
|
||||||
do
|
do
|
||||||
DNS_IP=$(dig +short $DOMAIN_NAME_1 @8.8.8.8)
|
# Use dig to query DNS records
|
||||||
if [[ "$DNS_IP" == "$EXPECTED_IP" ]]; then
|
DNS_RESULT=$(dig +short $DOMAIN_NAME @1.1.1.1)
|
||||||
echo "DNS has propagated for $DOMAIN_NAME_1"
|
|
||||||
|
# Check if DNS result is empty
|
||||||
|
if [ -z "$DNS_RESULT" ]; then
|
||||||
|
echo "No IP or CNAME records found for $DOMAIN_NAME."
|
||||||
|
else
|
||||||
|
echo "DNS records found for $DOMAIN_NAME:"
|
||||||
|
echo "$DNS_RESULT"
|
||||||
break
|
break
|
||||||
fi
|
fi
|
||||||
echo "Waiting for DNS propagation... Attempt $attempt of $MAX_ATTEMPTS"
|
echo "Waiting for DNS propagation... Attempt $attempt of $MAX_ATTEMPTS"
|
||||||
|
|
@ -154,7 +169,7 @@ jobs:
|
||||||
done
|
done
|
||||||
|
|
||||||
if [[ $attempt -gt $MAX_ATTEMPTS ]]; then
|
if [[ $attempt -gt $MAX_ATTEMPTS ]]; then
|
||||||
echo "DNS propagation check failed for $DOMAIN_NAME_1 after $MAX_ATTEMPTS attempts."
|
echo "DNS propagation check failed for $DOMAIN_NAME after $MAX_ATTEMPTS attempts."
|
||||||
exit 1
|
exit 1
|
||||||
fi
|
fi
|
||||||
|
|
||||||
|
|
@ -191,27 +206,82 @@ jobs:
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
env: ${{ github.event.inputs.env_flavour }}
|
env: ${{ github.event.inputs.env_flavour }}
|
||||||
run: |
|
run: |
|
||||||
|
set -x
|
||||||
|
|
||||||
app_name=${{github.event.inputs.build_service}}
|
app_name=${{github.event.inputs.build_service}}
|
||||||
echo "building and deploying $app_name"
|
echo "building and deploying $app_name"
|
||||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||||
export KUBECONFIG=/tmp/kubeconfig.yaml
|
export KUBECONFIG=/tmp/kubeconfig.yaml
|
||||||
|
|
||||||
case $app_name in
|
function build_and_deploy {
|
||||||
|
apps_to_build=$1
|
||||||
|
case $apps_to_build in
|
||||||
backend*)
|
backend*)
|
||||||
echo "In backend build"
|
echo "Building backend build"
|
||||||
cd backend
|
cd $GITHUB_WORKSPACE/backend
|
||||||
component=`echo $app_name | cut -d ':' -f 2`
|
components=()
|
||||||
|
if [ $apps_to_build == "backend:all" ]; then
|
||||||
|
# Append all folder names from 'cmd/' directory to the array
|
||||||
|
for folder in cmd/*/; do
|
||||||
|
# Use basename to extract the folder name without path
|
||||||
|
folder_name=$(basename "$folder")
|
||||||
|
components+=("$folder_name")
|
||||||
|
done
|
||||||
|
else
|
||||||
|
# "${apps_to_build#*:}" :: Strip backend: and output app1,app2,app3 to read -ra
|
||||||
|
IFS=',' read -ra components <<< "${apps_to_build#*:}"
|
||||||
|
fi
|
||||||
|
echo "Building components: " ${components[@]}
|
||||||
|
for component in "${components[@]}"; do
|
||||||
|
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||||
|
echo Image present upstream. Skipping build: $component
|
||||||
|
else
|
||||||
|
echo "Building backend:$component"
|
||||||
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||||
|
fi
|
||||||
kubectl set image -n app deployment/$component-openreplay $component=${DOCKER_REPO}/$component:${IMAGE_TAG}
|
kubectl set image -n app deployment/$component-openreplay $component=${DOCKER_REPO}/$component:${IMAGE_TAG}
|
||||||
|
done
|
||||||
|
;;
|
||||||
|
chalice)
|
||||||
|
echo "Chalice build"
|
||||||
|
component=$apps_to_build
|
||||||
|
cd $GITHUB_WORKSPACE/api || (Nothing to build: $apps_to_build; exit 100)
|
||||||
|
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||||
|
echo Image present upstream. Skipping build: $component
|
||||||
|
else
|
||||||
|
echo "Building backend:$component"
|
||||||
|
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||||
|
fi
|
||||||
|
kubectl set image -n app deployment/$apps_to_build-openreplay $apps_to_build=${DOCKER_REPO}/$apps_to_build:${IMAGE_TAG}
|
||||||
;;
|
;;
|
||||||
*)
|
*)
|
||||||
set -x
|
echo "$apps_to_build build"
|
||||||
cd $app_name || (Nothing to build: $app_name; exit 100)
|
cd $GITHUB_WORKSPACE/$apps_to_build || (Nothing to build: $apps_to_build; exit 100)
|
||||||
PUSH_IMAGE=1 bash -x ./build.sh $env
|
component=$apps_to_build
|
||||||
kubectl set image -n app deployment/$app_name-openreplay $app_name=${DOCKER_REPO}/$app_name:${IMAGE_TAG}
|
if [ $(docker manifest inspect ${DOCKER_REPO}/$component:${IMAGE_TAG} > /dev/null) ]; then
|
||||||
|
echo Image present upstream. Skipping build: $component
|
||||||
|
else
|
||||||
|
echo "Building backend:$component"
|
||||||
|
PUSH_IMAGE=1 bash -x ./build.sh $env $component
|
||||||
|
fi
|
||||||
|
kubectl set image -n app deployment/$apps_to_build-openreplay $apps_to_build=${DOCKER_REPO}/$apps_to_build:${IMAGE_TAG}
|
||||||
;;
|
;;
|
||||||
esac
|
esac
|
||||||
|
}
|
||||||
|
|
||||||
|
case $app_name in
|
||||||
|
all)
|
||||||
|
build_and_deploy "backend:all"
|
||||||
|
build_and_deploy "frontend"
|
||||||
|
build_and_deploy "chalice"
|
||||||
|
build_and_deploy "sourcemapreader"
|
||||||
|
build_and_deploy "assist-stats"
|
||||||
|
;;
|
||||||
|
*)
|
||||||
|
build_and_deploy $app_name
|
||||||
|
;;
|
||||||
|
esac
|
||||||
|
|
||||||
- name: Sent results to slack
|
- name: Sent results to slack
|
||||||
if: steps.vcluster_exists.outputs.failed == 'true'
|
if: steps.vcluster_exists.outputs.failed == 'true'
|
||||||
env:
|
env:
|
||||||
|
|
@ -256,10 +326,10 @@ jobs:
|
||||||
# run: |
|
# run: |
|
||||||
# # Add any cleanup commands if necessary
|
# # Add any cleanup commands if necessary
|
||||||
|
|
||||||
# - name: Debug Job
|
- name: Debug Job
|
||||||
# if: failure()
|
if: failure()
|
||||||
# uses: mxschmitt/action-tmate@v3
|
uses: mxschmitt/action-tmate@v3
|
||||||
# env:
|
env:
|
||||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
# IMAGE_TAG: ${{ github.sha }}
|
IMAGE_TAG: ${{ github.sha }}
|
||||||
# ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,7 @@
|
||||||
#!/bin/bash
|
#!/bin/bash
|
||||||
|
|
||||||
|
set -e
|
||||||
|
|
||||||
if [ $# -lt 1 ]; then
|
if [ $# -lt 1 ]; then
|
||||||
echo "bash $0 pr-number.openreplay.tools"
|
echo "bash $0 pr-number.openreplay.tools"
|
||||||
echo "eg: bash $0 pr-111.openreplay.tools"
|
echo "eg: bash $0 pr-111.openreplay.tools"
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ minio:
|
||||||
postgresql:
|
postgresql:
|
||||||
persistence: *persistence
|
persistence: *persistence
|
||||||
redis:
|
redis:
|
||||||
|
master:
|
||||||
persistence: *persistence
|
persistence: *persistence
|
||||||
kafka:
|
kafka:
|
||||||
persistence: *persistence
|
persistence: *persistence
|
||||||
|
|
@ -39,8 +40,10 @@ chalice:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: app.kubernetes.io/name
|
- key: app.kubernetes.io/name
|
||||||
operator: In
|
operator: In
|
||||||
|
|
@ -77,8 +80,10 @@ imagestorage:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: app.kubernetes.io/name
|
- key: app.kubernetes.io/name
|
||||||
operator: In
|
operator: In
|
||||||
|
|
@ -89,6 +94,7 @@ imagestorage:
|
||||||
- imagestorage
|
- imagestorage
|
||||||
- videostorage
|
- videostorage
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
|
|
||||||
ingress-nginx:
|
ingress-nginx:
|
||||||
resources:
|
resources:
|
||||||
requests:
|
requests:
|
||||||
|
|
@ -111,8 +117,10 @@ sink:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: app.kubernetes.io/name
|
- key: app.kubernetes.io/name
|
||||||
operator: In
|
operator: In
|
||||||
|
|
@ -133,8 +141,10 @@ storage:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: app.kubernetes.io/name
|
- key: app.kubernetes.io/name
|
||||||
operator: In
|
operator: In
|
||||||
|
|
@ -155,8 +165,10 @@ videostorage:
|
||||||
cpu: 50m
|
cpu: 50m
|
||||||
affinity:
|
affinity:
|
||||||
podAffinity:
|
podAffinity:
|
||||||
requiredDuringSchedulingIgnoredDuringExecution:
|
preferredDuringSchedulingIgnoredDuringExecution:
|
||||||
- labelSelector:
|
- weight: 1
|
||||||
|
podAffinityTerm:
|
||||||
|
labelSelector:
|
||||||
matchExpressions:
|
matchExpressions:
|
||||||
- key: app.kubernetes.io/name
|
- key: app.kubernetes.io/name
|
||||||
operator: In
|
operator: In
|
||||||
|
|
@ -167,3 +179,4 @@ videostorage:
|
||||||
- imagestorage
|
- imagestorage
|
||||||
- videostorage
|
- videostorage
|
||||||
topologyKey: kubernetes.io/hostname
|
topologyKey: kubernetes.io/hostname
|
||||||
|
|
||||||
|
|
|
||||||
Loading…
Add table
Reference in a new issue