docker-compose upgrade (#1769)

* chore(docker-compose): keep the original directory

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(docker-compose): keep user editer common.env backup

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(docker-compose): adding uxtesting missing feat

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* refactor(docker-compose): Adding new services

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* refactor(docker-compose): don't reset the file while running

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(docker-compose): keep db versions unless its overridden explicit

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* docker-compose: proper container names

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* docker-compose: fix profiles

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* docker-compose: upgrade script

* docker-compose: script for migration

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* refactor(docker-compose): spacing for code

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(docker-compose): replace values

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* refactor(docker-compose): Adding db versions section

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(docker-compose): pg migration

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

---------

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
Rajesh Rajendran 2023-12-11 16:23:37 +01:00 committed by GitHub
parent d6450d9356
commit 5b9e1236d1
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
9 changed files with 202 additions and 85 deletions

View file

@ -5,3 +5,9 @@ COMMON_S3_KEY="change_me_s3_key"
COMMON_S3_SECRET="change_me_s3_secret"
COMMON_PG_PASSWORD="change_me_pg_password"
COMMON_VERSION="v1.16.0"
## DB versions
######################################
POSTGRES_VERSION="14.5.0"
REDIS_VERSION="6.0.12-debian-10-r33"
MINIO_VERSION="2023.2.10-debian-11-r1"
######################################

View file

@ -2,7 +2,7 @@ version: '3'
services:
postgresql:
image: bitnami/postgresql:14.5.0
image: bitnami/postgresql:${POSTGRES_VERSION}
container_name: postgres
volumes:
- pgdata:/var/lib/postgresql/data
@ -12,7 +12,7 @@ services:
POSTGRESQL_PASSWORD: ${COMMON_PG_PASSWORD}
redis:
image: bitnami/redis:6.0.12-debian-10-r33
image: bitnami/redis:${REDIS_VERSION}
container_name: redis
volumes:
- redisdata:/var/lib/postgresql/data
@ -22,7 +22,7 @@ services:
ALLOW_EMPTY_PASSWORD: "yes"
minio:
image: bitnami/minio:2023.2.10-debian-11-r1
image: bitnami/minio:${MINIO_VERSION}
container_name: minio
volumes:
- miniodata:/bitnami/minio/data
@ -37,6 +37,8 @@ services:
fs-permission:
image: debian:stable-slim
container_name: fs-permission
profiles:
- "migration"
volumes:
- shared-volume:/mnt/efs
- miniodata:/mnt/minio
@ -50,6 +52,8 @@ services:
minio-migration:
image: bitnami/minio:2020.10.9-debian-10-r6
container_name: minio-migration
profiles:
- "migration"
depends_on:
- minio
- fs-permission
@ -77,6 +81,8 @@ services:
db-migration:
image: bitnami/postgresql:14.5.0
container_name: db-migration
profiles:
- "migration"
depends_on:
- postgresql
- minio-migration
@ -102,183 +108,155 @@ services:
psql -v ON_ERROR_STOP=1 -f /tmp/init_schema.sql
frontend-openreplay:
image: public.ecr.aws/p1t3u8a3/frontend:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/frontend:v1.16.0
container_name: frontend
depends_on:
- db-migration
networks:
- opereplay-net
restart: on-failure
restart: unless-stopped
alerts-openreplay:
image: public.ecr.aws/p1t3u8a3/alerts:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/alerts:v1.16.0
container_name: alerts
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- alerts.env
restart: on-failure
restart: unless-stopped
assets-openreplay:
image: public.ecr.aws/p1t3u8a3/assets:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/assets:v1.16.0
container_name: assets
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- assets.env
restart: on-failure
restart: unless-stopped
assist-openreplay:
image: public.ecr.aws/p1t3u8a3/assist:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/assist:v1.16.0
container_name: assist
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- assist.env
restart: on-failure
restart: unless-stopped
db-openreplay:
image: public.ecr.aws/p1t3u8a3/db:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/db:v1.16.0
container_name: db
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- db.env
restart: on-failure
restart: unless-stopped
ender-openreplay:
image: public.ecr.aws/p1t3u8a3/ender:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/ender:v1.16.0
container_name: ender
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- ender.env
restart: on-failure
restart: unless-stopped
heuristics-openreplay:
image: public.ecr.aws/p1t3u8a3/heuristics:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/heuristics:v1.16.0
domainname: app.svc.cluster.local
container_name: heuristics
depends_on:
- db-migration
networks:
opereplay-net:
aliases:
- heuristics-openreplay.app.svc.cluster.local
env_file:
- heuristics.env
restart: on-failure
restart: unless-stopped
# imagestorage-openreplay:
# image: public.ecr.aws/p1t3u8a3/imagestorage:${COMMON_VERSION}
# depends_on:
# - db-migration
# networks:
# - opereplay-net
# restart: on-failure
imagestorage-openreplay:
image: public.ecr.aws/p1t3u8a3/imagestorage:v1.16.0
container_name: imagestorage
env_file:
- imagestorage.env
networks:
- opereplay-net
restart: unless-stopped
integrations-openreplay:
image: public.ecr.aws/p1t3u8a3/integrations:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/integrations:v1.16.0
container_name: integrations
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- integrations.env
restart: on-failure
restart: unless-stopped
peers-openreplay:
image: public.ecr.aws/p1t3u8a3/peers:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/peers:v1.16.0
container_name: peers
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- peers.env
restart: on-failure
restart: unless-stopped
sourcemapreader-openreplay:
image: public.ecr.aws/p1t3u8a3/sourcemapreader:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/sourcemapreader:v1.16.0
container_name: sourcemapreader
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- sourcemapreader.env
restart: on-failure
restart: unless-stopped
# videostorage-openreplay:
# image: public.ecr.aws/p1t3u8a3/videostorage:${COMMON_VERSION}
# depends_on:
# - db-migration
# networks:
# - opereplay-net
# env_file:
# - common.env
# restart: on-failure
videostorage-openreplay:
image: public.ecr.aws/p1t3u8a3/videostorage:v1.16.0
container_name: videostorage
networks:
- opereplay-net
env_file:
- videostorage.env
restart: unless-stopped
http-openreplay:
image: public.ecr.aws/p1t3u8a3/http:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/http:v1.16.0
container_name: http
depends_on:
- db-migration
networks:
- opereplay-net
env_file:
- http.env
restart: on-failure
restart: unless-stopped
chalice-openreplay:
image: public.ecr.aws/p1t3u8a3/chalice:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/chalice:v1.16.0
container_name: chalice
depends_on:
- db-migration
volumes:
- shared-volume:/mnt/efs
networks:
- opereplay-net
env_file:
- chalice.env
restart: on-failure
restart: unless-stopped
sink-openreplay:
image: public.ecr.aws/p1t3u8a3/sink:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/sink:v1.16.0
container_name: sink
depends_on:
- db-migration
volumes:
- shared-volume:/mnt/efs
networks:
- opereplay-net
env_file:
- sink.env
restart: on-failure
restart: unless-stopped
storage-openreplay:
image: public.ecr.aws/p1t3u8a3/storage:${COMMON_VERSION}
image: public.ecr.aws/p1t3u8a3/storage:v1.16.0
container_name: storage
depends_on:
- db-migration
volumes:
- shared-volume:/mnt/efs
networks:
- opereplay-net
env_file:
- storage.env
restart: on-failure
restart: unless-stopped
nginx-openreplay:
image: nginx:latest
@ -287,7 +265,7 @@ services:
- opereplay-net
volumes:
- ./nginx.conf:/etc/nginx/conf.d/default.conf
restart: on-failure
restart: unless-stopped
caddy:
@ -304,8 +282,8 @@ services:
- opereplay-net
environment:
- ACME_AGREE=true # Agree to Let's Encrypt Subscriber Agreement
- CADDY_DOMAIN=${COMMON_DOMAIN_NAME}
restart: on-failure
- CADDY_DOMAIN=or-foss.rjsh.me
restart: unless-stopped
volumes:

View file

@ -9,3 +9,4 @@ KAFKA_USE_SSL='false'
pg_password="${COMMON_PG_PASSWORD}"
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql:5432/postgres"
REDIS_STRING='redis://redis:6379'
BUCKET_NAME='uxtesting-records'

View file

@ -0,0 +1,10 @@
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
AWS_ENDPOINT='http://minio:9000'
AWS_REGION='us-east-1'
BUCKET_NAME=mobs
LICENSE_KEY=''
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
KAFKA_USE_SSL='false'
REDIS_STRING='redis://redis:6379'
FS_CLEAN_HRS='24'

View file

@ -88,10 +88,9 @@ set +a
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}"; git checkout -- "$file"; cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
sudo -E docker-compose pull --no-parallel
sudo -E docker-compose up -d
sudo -E docker compose --profile migration up -d
cp common.env common.env.bak
echo "🎉🎉🎉 Done! 🎉🎉🎉"
cp -rf ../docker-compose ~/docker-compose
rm -rf "$(git rev-parse --show-toplevel)"
info "Installation complete!! open https://${DOMAIN_NAME} 🐳"
info "${HOME}/docker-compose have the docker-compose file. you can use docker-compose stop/start"
info "${PWD} have the docker-compose file. you can use docker-compose stop/start"

View file

@ -13,7 +13,7 @@ map $http_x_forwarded_proto $origin_proto {
server {
listen 80;
location ~ ^/(mobs|sessions-assets|frontend|static|sourcemaps|ios-images)/ {
location ~ ^/(mobs|sessions-assets|frontend|static|sourcemaps|ios-images|uxtesting-records)/ {
proxy_set_header X-Real-IP $remote_addr;
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
proxy_set_header X-Forwarded-Proto $scheme;

View file

@ -1,9 +1,9 @@
# Load variables from common.env into the current shell's environment
set -a # automatically export all variables
set -a # automatically export all variables
source common.env
set +a
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}"; git checkout -- "$file"; cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}";cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
sudo -E docker-compose up -d

View file

@ -0,0 +1,113 @@
#!/usr/bin/env bash
# Path to the original and new env files
original_env_file="$1"
# Check if the original env file exists and is not empty
if [ ! -s "$original_env_file" ]; then
echo "Error: The original env file is empty or does not exist."
echo "Usage: $0 /path/to/original.env"
exit 1
fi
new_env_file="./common.env"
temp_env_file=$(mktemp)
# Function to merge environment variables from original to new env file
function merge_envs() {
while IFS='=' read -r key value; do
# Skip the line if the key is COMMON_VERSION
case "$key" in
COMMON_VERSION)
original_version=$(echo "$value" | xargs)
continue
;;
COMMON_PG_PASSWORD)
pgpassword=$value
;;
POSTGRES_VERSION | REDIS_VERSION | MINIO_VERSION)
# Don't update db versions automatically.
continue
;;
esac
# Remove any existing entry from the new env file and add the new value
grep -v "^$key=" "$new_env_file" >"$temp_env_file"
mv "$temp_env_file" "$new_env_file"
echo "$key=$value" >>"$new_env_file"
done <"$original_env_file"
}
# Function to normalize version numbers for comparison
function normalise_version {
echo "$1" | awk -F. '{ printf("%03d%03d%03d\n", $1, $2, $3); }'
}
# Function to log messages
function log_message() {
echo "$@" >&2
}
# Function to create migration versions based on the current and previous application versions
function create_migration_versions() {
cd "${SCHEMA_DIR:-/opt/openreplay/openreplay/scripts/schema}" || {
log_message "not able to cd $SCHEMA_DIR"
exit 100
}
db=postgresql
# List all version directories excluding 'create' directory
all_versions=($(find db/init_dbs/$db -maxdepth 1 -type d -exec basename {} \; | grep -v create))
# Normalize the previous application version for comparison
PREVIOUS_APP_VERSION_NORMALIZED=$(normalise_version "${PREVIOUS_APP_VERSION}")
migration_versions=()
for ver in "${all_versions[@]}"; do
if [[ $(normalise_version "$ver") > "$PREVIOUS_APP_VERSION_NORMALIZED" ]]; then
migration_versions+=("$ver")
fi
done
# Join migration versions into a single string separated by commas
joined_migration_versions=$(
IFS=,
echo "${migration_versions[*]}"
)
# Return to the previous directory
cd - >/dev/null || {
log_message "not able to cd back"
exit 100
}
log_message "output: $joined_migration_versions"
echo "$joined_migration_versions"
}
# Function to perform migration
function migrate() {
# Set schema directory and previous application version
export SCHEMA_DIR="../schema/"
export PREVIOUS_APP_VERSION=${original_version#v}
# Create migration versions array
IFS=',' read -ra joined_migration_versions <<<"$(create_migration_versions)"
# Check if there are versions to migrate
[[ ${#joined_migration_versions[@]} -eq 0 ]] && {
echo "Nothing to migrate"
return
}
# Loop through versions and prepare Docker run commands
for ver in "${joined_migration_versions[@]}"; do
echo "$ver"
"docker run --rm --network openreplay-net \
--name pgmigrate -e 'PGHOST=postgres' -e 'PGPORT=5432' \
-e 'PGDATABASE=postgres' -e 'PGUSER=postgres' -e 'PGPASSWORD=$pgpassword' \
-v /opt/data/:$SCHEMA_DIR postgres psql -f /opt/data/schema/db/init_dbs/postgresql/$ver/$ver.sql"
done
}
# Merge environment variables and perform migration
merge_envs
migrate

View file

@ -0,0 +1,10 @@
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
AWS_ENDPOINT='http://minio:9000'
AWS_REGION='us-east-1'
BUCKET_NAME=mobs
LICENSE_KEY=''
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
KAFKA_USE_SSL='false'
REDIS_STRING='redis://redis:6379'
FS_CLEAN_HRS='24'