Docker (#1668)
* docker-compose: Adding docker-compose installation * docker-compose: init scripts * fix: shell expansion Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com> --------- Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>
This commit is contained in:
parent
778de0c39e
commit
e0186a73c6
21 changed files with 725 additions and 0 deletions
3
scripts/docker-compose/Caddyfile
Normal file
3
scripts/docker-compose/Caddyfile
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
{$CADDY_DOMAIN} {
|
||||
reverse_proxy nginx-openreplay:80
|
||||
}
|
||||
27
scripts/docker-compose/alerts.env
Normal file
27
scripts/docker-compose/alerts.env
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
ASSIST_JWT_SECRET=${COMMON_JWT_SECRET}
|
||||
ASSIST_KEY=${COMMON_JWT_SECRET}
|
||||
ASSIST_RECORDS_BUCKET=records
|
||||
ASSIST_URL="http://assist-openreplay:9001/assist/%s"
|
||||
AWS_DEFAULT_REGION="us-east-1"
|
||||
CH_COMPRESSION="false"
|
||||
PYTHONUNBUFFERED="0"
|
||||
REDIS_STRING="redis://redis:6379"
|
||||
S3_HOST="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
S3_KEY="${COMMON_S3_KEY}"
|
||||
S3_SECRET="${COMMON_S3_SECRET}"
|
||||
SITE_URL="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
ch_host="clickhouse"
|
||||
ch_port="9000"
|
||||
ch_username="default"
|
||||
js_cache_bucket=sessions-assets
|
||||
jwt_secret="${COMMON_JWT_SECRET}"
|
||||
pg_dbname="postgres"
|
||||
pg_host="postgresql"
|
||||
pg_password="${COMMON_PG_PASSWORD}"
|
||||
sessions_bucket=mobs
|
||||
sessions_region="us-east-1"
|
||||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader="http://sourcemapreader-openreplay:9000/sourcemaps/%s/sourcemaps"
|
||||
version_number="${COMMON_VERSION}"
|
||||
CLUSTER_URL=""
|
||||
POD_NAMESPACE=""
|
||||
10
scripts/docker-compose/assets.env
Normal file
10
scripts/docker-compose/assets.env
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
|
||||
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
|
||||
BUCKET_NAME=sessions-assets
|
||||
LICENSE_KEY=''
|
||||
AWS_ENDPOINT='http://minio:9000'
|
||||
AWS_REGION='us-east-1'
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
ASSETS_ORIGIN='https://${COMMON_DOMAIN_NAME}:443/sessions-assets'
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
11
scripts/docker-compose/assist.env
Normal file
11
scripts/docker-compose/assist.env
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
ASSIST_JWT_SECRET=${COMMON_JWT_SECRET}
|
||||
ASSIST_KEY=${COMMON_JWT_SECRET}
|
||||
AWS_DEFAULT_REGION="us-east-1"
|
||||
S3_HOST="https://${COMMON_DOMAIN_NAME}:443"
|
||||
S3_KEY=changeMeMinioAccessKey
|
||||
S3_SECRET=changeMeMinioPassword
|
||||
REDIS_URL=redis
|
||||
CLEAR_SOCKET_TIME='720'
|
||||
debug='0'
|
||||
redis='false'
|
||||
uws='false'
|
||||
27
scripts/docker-compose/chalice.env
Normal file
27
scripts/docker-compose/chalice.env
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
ASSIST_JWT_SECRET=${COMMON_JWT_SECRET}
|
||||
ASSIST_KEY=${COMMON_JWT_SECRET}
|
||||
ASSIST_RECORDS_BUCKET=records
|
||||
ASSIST_URL="http://assist-openreplay:9001/assist/%s"
|
||||
AWS_DEFAULT_REGION="us-east-1"
|
||||
CH_COMPRESSION="false"
|
||||
PYTHONUNBUFFERED="0"
|
||||
REDIS_STRING="redis://redis:6379"
|
||||
S3_HOST="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
S3_KEY="${COMMON_S3_KEY}"
|
||||
S3_SECRET="${COMMON_S3_SECRET}"
|
||||
SITE_URL="${COMMON_PROTOCOL}://${COMMON_DOMAIN_NAME}"
|
||||
ch_host="clickhouse"
|
||||
ch_port="9000"
|
||||
ch_username="default"
|
||||
js_cache_bucket=sessions-assets
|
||||
jwt_secret="${COMMON_JWT_SECRET}"
|
||||
pg_dbname="postgres"
|
||||
pg_host="postgresql"
|
||||
pg_password="${COMMON_PG_PASSWORD}"
|
||||
sessions_bucket=mobs
|
||||
sessions_region="us-east-1"
|
||||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader="http://sourcemapreader-openreplay:9000/sourcemaps/%s/sourcemaps"
|
||||
version_number="${COMMON_VERSION}"
|
||||
CLUSTER_URL=""
|
||||
POD_NAMESPACE=""
|
||||
7
scripts/docker-compose/common.env
Executable file
7
scripts/docker-compose/common.env
Executable file
|
|
@ -0,0 +1,7 @@
|
|||
COMMON_PROTOCOL="https"
|
||||
COMMON_DOMAIN_NAME="change_me_domain"
|
||||
COMMON_JWT_SECRET="change_me_jwt"
|
||||
COMMON_S3_KEY="change_me_s3_key"
|
||||
COMMON_S3_SECRET="change_me_s3_secret"
|
||||
COMMON_PG_PASSWORD="change_me_pg_password"
|
||||
COMMON_VERSION="v1.15.0"
|
||||
11
scripts/docker-compose/db.env
Normal file
11
scripts/docker-compose/db.env
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
CH_USERNAME='default'
|
||||
CH_PASSWORD=''
|
||||
CLICKHOUSE_STRING='clickhouse-openreplay-clickhouse.db.svc.cluster.local:9000/default'
|
||||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
pg_password="${COMMON_PG_PASSWORD}"
|
||||
QUICKWIT_ENABLED='false'
|
||||
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql:5432/postgres"
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
ch_db='default'
|
||||
320
scripts/docker-compose/docker-compose.yaml
Normal file
320
scripts/docker-compose/docker-compose.yaml
Normal file
|
|
@ -0,0 +1,320 @@
|
|||
version: '3'
|
||||
|
||||
services:
|
||||
postgresql:
|
||||
image: bitnami/postgresql:14.5.0
|
||||
container_name: postgres
|
||||
volumes:
|
||||
- pgdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- opereplay-net
|
||||
environment:
|
||||
POSTGRESQL_PASSWORD: ${COMMON_PG_PASSWORD}
|
||||
|
||||
redis:
|
||||
image: bitnami/redis:6.0.12-debian-10-r33
|
||||
container_name: redis
|
||||
volumes:
|
||||
- redisdata:/var/lib/postgresql/data
|
||||
networks:
|
||||
- opereplay-net
|
||||
environment:
|
||||
ALLOW_EMPTY_PASSWORD: "yes"
|
||||
|
||||
minio:
|
||||
image: bitnami/minio:2023.2.10-debian-11-r1
|
||||
container_name: minio
|
||||
volumes:
|
||||
- miniodata:/bitnami/minio/data
|
||||
networks:
|
||||
- opereplay-net
|
||||
ports:
|
||||
- 9001:9001
|
||||
environment:
|
||||
MINIO_ROOT_USER: ${COMMON_S3_KEY}
|
||||
MINIO_ROOT_PASSWORD: ${COMMON_S3_SECRET}
|
||||
|
||||
fs-permission:
|
||||
image: debian:stable-slim
|
||||
container_name: fs-permission
|
||||
volumes:
|
||||
- shared-volume:/mnt/efs
|
||||
- miniodata:/mnt/minio
|
||||
- pgdata:/mnt/postgres
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
chown -R 1001:1001 /mnt/{efs,minio,postgres}
|
||||
|
||||
minio-migration:
|
||||
image: bitnami/minio:2020.10.9-debian-10-r6
|
||||
container_name: minio-migration
|
||||
depends_on:
|
||||
- minio
|
||||
- fs-permission
|
||||
networks:
|
||||
- opereplay-net
|
||||
volumes:
|
||||
- ../helmcharts/openreplay/files/minio.sh:/tmp/minio.sh
|
||||
environment:
|
||||
MINIO_HOST: http://minio:9000
|
||||
MINIO_ACCESS_KEY: ${COMMON_S3_KEY}
|
||||
MINIO_SECRET_KEY: ${COMMON_S3_SECRET}
|
||||
user: root
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
apt update && apt install netcat -y
|
||||
# Wait for Minio to be ready
|
||||
until nc -z -v -w30 minio 9000; do
|
||||
echo "Waiting for Minio server to be ready..."
|
||||
sleep 1
|
||||
done
|
||||
bash /tmp/minio.sh init || exit 100
|
||||
|
||||
db-migration:
|
||||
image: bitnami/postgresql:14.5.0
|
||||
container_name: db-migration
|
||||
depends_on:
|
||||
- postgresql
|
||||
- minio-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
volumes:
|
||||
- ../schema/db/init_dbs/postgresql/init_schema.sql:/tmp/init_schema.sql
|
||||
environment:
|
||||
PGHOST: postgresql
|
||||
PGPORT: 5432
|
||||
PGDATABASE: postgres
|
||||
PGUSER: postgres
|
||||
PGPASSWORD: ${COMMON_PG_PASSWORD}
|
||||
entrypoint:
|
||||
- /bin/bash
|
||||
- -c
|
||||
- |
|
||||
until PGPASSWORD=${COMMON_PG_PASSWORD} psql -h postgresql -U postgres -d postgres -c '\q'; do
|
||||
echo "PostgreSQL is unavailable - sleeping"
|
||||
sleep 1
|
||||
done
|
||||
echo "PostgreSQL is up - executing command"
|
||||
psql -v ON_ERROR_STOP=1 -f /tmp/init_schema.sql
|
||||
|
||||
frontend-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/frontend:${COMMON_VERSION}
|
||||
container_name: frontend
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
restart: on-failure
|
||||
|
||||
alerts-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/alerts:${COMMON_VERSION}
|
||||
container_name: alerts
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- alerts.env
|
||||
restart: on-failure
|
||||
|
||||
assets-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/assets:${COMMON_VERSION}
|
||||
container_name: assets
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- assets.env
|
||||
restart: on-failure
|
||||
|
||||
assist-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/assist:${COMMON_VERSION}
|
||||
container_name: assist
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- assist.env
|
||||
restart: on-failure
|
||||
|
||||
db-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/db:${COMMON_VERSION}
|
||||
container_name: db
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- db.env
|
||||
restart: on-failure
|
||||
|
||||
ender-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/ender:${COMMON_VERSION}
|
||||
container_name: ender
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- ender.env
|
||||
restart: on-failure
|
||||
|
||||
heuristics-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/heuristics:${COMMON_VERSION}
|
||||
domainname: app.svc.cluster.local
|
||||
container_name: heuristics
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
opereplay-net:
|
||||
aliases:
|
||||
- heuristics-openreplay.app.svc.cluster.local
|
||||
env_file:
|
||||
- heuristics.env
|
||||
restart: on-failure
|
||||
|
||||
# imagestorage-openreplay:
|
||||
# image: public.ecr.aws/p1t3u8a3/imagestorage:${COMMON_VERSION}
|
||||
# depends_on:
|
||||
# - db-migration
|
||||
# networks:
|
||||
# - opereplay-net
|
||||
# restart: on-failure
|
||||
|
||||
integrations-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/integrations:${COMMON_VERSION}
|
||||
container_name: integrations
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- integrations.env
|
||||
restart: on-failure
|
||||
|
||||
peers-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/peers:${COMMON_VERSION}
|
||||
container_name: peers
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- peers.env
|
||||
restart: on-failure
|
||||
|
||||
sourcemapreader-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/sourcemaps-reader:${COMMON_VERSION}
|
||||
container_name: sourcemaps-reader
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- sourcemaps-reader.env
|
||||
restart: on-failure
|
||||
|
||||
# videostorage-openreplay:
|
||||
# image: public.ecr.aws/p1t3u8a3/videostorage:${COMMON_VERSION}
|
||||
# depends_on:
|
||||
# - db-migration
|
||||
# networks:
|
||||
# - opereplay-net
|
||||
# env_file:
|
||||
# - common.env
|
||||
# restart: on-failure
|
||||
|
||||
http-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/http:${COMMON_VERSION}
|
||||
container_name: http
|
||||
depends_on:
|
||||
- db-migration
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- http.env
|
||||
restart: on-failure
|
||||
|
||||
chalice-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/chalice:${COMMON_VERSION}
|
||||
container_name: chalice
|
||||
depends_on:
|
||||
- db-migration
|
||||
volumes:
|
||||
- shared-volume:/mnt/efs
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- chalice.env
|
||||
restart: on-failure
|
||||
|
||||
sink-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/sink:${COMMON_VERSION}
|
||||
container_name: sink
|
||||
depends_on:
|
||||
- db-migration
|
||||
volumes:
|
||||
- shared-volume:/mnt/efs
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- sink.env
|
||||
restart: on-failure
|
||||
|
||||
storage-openreplay:
|
||||
image: public.ecr.aws/p1t3u8a3/storage:${COMMON_VERSION}
|
||||
container_name: storage
|
||||
depends_on:
|
||||
- db-migration
|
||||
volumes:
|
||||
- shared-volume:/mnt/efs
|
||||
networks:
|
||||
- opereplay-net
|
||||
env_file:
|
||||
- storage.env
|
||||
restart: on-failure
|
||||
|
||||
nginx-openreplay:
|
||||
image: nginx:latest
|
||||
container_name: nginx
|
||||
networks:
|
||||
- opereplay-net
|
||||
volumes:
|
||||
- ./nginx.conf:/etc/nginx/conf.d/default.conf
|
||||
restart: on-failure
|
||||
|
||||
|
||||
caddy:
|
||||
image: caddy:latest
|
||||
container_name: caddy
|
||||
ports:
|
||||
- "80:80"
|
||||
- "443:443"
|
||||
volumes:
|
||||
- ./Caddyfile:/etc/caddy/Caddyfile
|
||||
- caddy_data:/data
|
||||
- caddy_config:/config
|
||||
networks:
|
||||
- opereplay-net
|
||||
environment:
|
||||
- ACME_AGREE=true # Agree to Let's Encrypt Subscriber Agreement
|
||||
- CADDY_DOMAIN=${COMMON_DOMAIN_NAME}
|
||||
restart: on-failure
|
||||
|
||||
|
||||
volumes:
|
||||
pgdata:
|
||||
redisdata:
|
||||
miniodata:
|
||||
shared-volume:
|
||||
caddy_data:
|
||||
caddy_config:
|
||||
|
||||
networks:
|
||||
opereplay-net:
|
||||
47
scripts/docker-compose/docker-install.sh
Normal file
47
scripts/docker-compose/docker-install.sh
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
#!/bin/bash
|
||||
|
||||
REPO_URL="https://github.com/openreplay/openreplay"
|
||||
|
||||
# Ask for the branch to clone (default is master/main)
|
||||
read -rp "Enter the version to clone (default is 'latest'): " REPO_BRANCH
|
||||
REPO_BRANCH=${REPO_BRANCH:-main}
|
||||
|
||||
# Directory in which to clone the repository
|
||||
CLONE_DIR="openreplay"
|
||||
|
||||
info() {
|
||||
echo -e "\033[0;32m[INFO] $1 \033[0m"
|
||||
}
|
||||
|
||||
error() {
|
||||
echo -e "\033[0;31m[ERROR] $1 \033[0m"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Check if git is installed
|
||||
if ! command -v git &> /dev/null; then
|
||||
error "Git is not installed. Please install Git and run this script again."
|
||||
fi
|
||||
|
||||
# Clone the repository
|
||||
if git clone --depth 1 --branch "$REPO_BRANCH" "$REPO_URL" "$CLONE_DIR"; then
|
||||
info "Repository cloned successfully."
|
||||
else
|
||||
error "Failed to clone the repository."
|
||||
fi
|
||||
|
||||
# Navigate into the repository directory
|
||||
cd "$CLONE_DIR/scripts/docker-compose" || error "The directory $CLONE_DIR does not exist."
|
||||
|
||||
# Path to the script to run
|
||||
SCRIPT_PATH="./install.sh"
|
||||
|
||||
# Check if the script exists and is executable
|
||||
if [[ -f "$SCRIPT_PATH" ]]; then
|
||||
bash "$SCRIPT_PATH"
|
||||
else
|
||||
error "The script $SCRIPT_PATH does not exist or is not executable."
|
||||
fi
|
||||
|
||||
# End of wrapper script
|
||||
|
||||
6
scripts/docker-compose/ender.env
Normal file
6
scripts/docker-compose/ender.env
Normal file
|
|
@ -0,0 +1,6 @@
|
|||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
pg_password="${COMMON_PG_PASSWORD}"
|
||||
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql:5432/postgres"
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
4
scripts/docker-compose/heuristics.env
Normal file
4
scripts/docker-compose/heuristics.env
Normal file
|
|
@ -0,0 +1,4 @@
|
|||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
11
scripts/docker-compose/http.env
Normal file
11
scripts/docker-compose/http.env
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
CACHE_ASSETS='true'
|
||||
TOKEN_SECRET='secret_token_string'
|
||||
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
|
||||
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
|
||||
AWS_REGION='us-east-1'
|
||||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
pg_password="${COMMON_PG_PASSWORD}"
|
||||
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql:5432/postgres"
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
97
scripts/docker-compose/install.sh
Normal file
97
scripts/docker-compose/install.sh
Normal file
|
|
@ -0,0 +1,97 @@
|
|||
#!/bin/bash
|
||||
|
||||
# Interactive Bash Script with Emojis
|
||||
|
||||
set -e
|
||||
|
||||
# Color codes for pretty printing
|
||||
RED='\033[0;31m'
|
||||
GREEN='\033[0;32m'
|
||||
YELLOW='\033[0;33m'
|
||||
NC='\033[0m' # No Color
|
||||
|
||||
# --- Helper functions for logs ---
|
||||
info() {
|
||||
echo -e "${GREEN}[INFO] $1 ${NC} 👍"
|
||||
}
|
||||
|
||||
warn() {
|
||||
echo -e "${YELLOW}[WARN] $1 ${NC} ⚠️"
|
||||
}
|
||||
|
||||
fatal() {
|
||||
echo -e "${RED}[FATAL] $1 ${NC} 🔥"
|
||||
exit 1
|
||||
}
|
||||
|
||||
# Function to check if a command exists
|
||||
function exists() {
|
||||
type "$1" &> /dev/null
|
||||
}
|
||||
|
||||
# Generate a random password using openssl
|
||||
randomPass() {
|
||||
exists openssl || {
|
||||
info "Installing openssl... 🔐"
|
||||
sudo apt update &> /dev/null
|
||||
sudo apt install openssl -y &> /dev/null
|
||||
}
|
||||
openssl rand -hex 10
|
||||
}
|
||||
|
||||
# Create dynamic passwords and update the environment file
|
||||
function create_passwords() {
|
||||
info "Creating dynamic passwords..."
|
||||
sed -i "s/change_me_domain/${DOMAIN_NAME}/g" common.env
|
||||
sed -i "s/change_me_jwt/$(randomPass)/g" common.env
|
||||
sed -i "s/change_me_s3_key/$(randomPass)/g" common.env
|
||||
sed -i "s/change_me_s3_secret/$(randomPass)/g" common.env
|
||||
sed -i "s/change_me_pg_password/$(randomPass)/g" common.env
|
||||
info "Passwords created and updated in common.env file."
|
||||
}
|
||||
|
||||
# update apt cache
|
||||
info "Grabbing latest apt caches"
|
||||
sudo apt update
|
||||
|
||||
# setup docker
|
||||
info "Setting up Docker"
|
||||
sudo apt install docker.io docker-compose -y
|
||||
|
||||
# enable docker without sudo
|
||||
sudo usermod -aG docker "${USER}" || true
|
||||
|
||||
# Prompt for DOMAIN_NAME input
|
||||
echo -e "${GREEN}Please provide your domain name.${NC}"
|
||||
echo "Let's get the exact domain OpenReplay will be installed on"
|
||||
echo "Make sure that you have a Host A DNS record pointing to this instance!"
|
||||
echo "This will be used for TLS 🔐"
|
||||
echo -e "ie: my-openreplay.company.com (NOT an IP address)\n"
|
||||
|
||||
echo -e "${GREEN}"
|
||||
read -rp "Enter DOMAIN_NAME: " DOMAIN_NAME
|
||||
echo -e "${NC}"
|
||||
if [[ -z $DOMAIN_NAME ]]; then
|
||||
fatal "DOMAIN_NAME variable is empty. Please provide a valid domain name to proceed."
|
||||
fi
|
||||
info "Using domain name: $DOMAIN_NAME 🌐"
|
||||
|
||||
# Create passwords if they don't exist
|
||||
create_passwords
|
||||
|
||||
info "Starting the application with Docker... 🐳"
|
||||
# Load variables from common.env into the current shell's environment
|
||||
set -a # automatically export all variables
|
||||
source common.env
|
||||
set +a
|
||||
|
||||
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
|
||||
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}"; git checkout -- "$file"; cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
|
||||
sudo -E docker-compose pull --no-parallel
|
||||
sudo -E docker-compose up -d
|
||||
echo "🎉🎉🎉 Done! 🎉🎉🎉"
|
||||
|
||||
cp -rf ../docker-compose ~/openreplay-docker-compose
|
||||
rm -rf "$(git rev-parse --show-toplevel)"
|
||||
info "Installation complete!! open https://${DOMAIN_NAME} 🐳"
|
||||
info "${HOME}/openreplay-docker-compose have the docker-compose file. you can use docker-compose stop/start"
|
||||
7
scripts/docker-compose/integrations.env
Normal file
7
scripts/docker-compose/integrations.env
Normal file
|
|
@ -0,0 +1,7 @@
|
|||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
pg_password=${COMMON_PG_PASSWORD}
|
||||
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql:5432/postgres"
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
TOKEN_SECRET='secret_token_string'
|
||||
92
scripts/docker-compose/nginx.conf
Normal file
92
scripts/docker-compose/nginx.conf
Normal file
|
|
@ -0,0 +1,92 @@
|
|||
map $http_x_forwarded_for $real_ip {
|
||||
~^(\d+\.\d+\.\d+\.\d+) $1;
|
||||
default $remote_addr;
|
||||
}
|
||||
map $http_upgrade $connection_upgrade {
|
||||
default upgrade;
|
||||
'' close;
|
||||
}
|
||||
map $http_x_forwarded_proto $origin_proto {
|
||||
default $http_x_forwarded_proto;
|
||||
'' $scheme;
|
||||
}
|
||||
server {
|
||||
listen 80;
|
||||
|
||||
location ~ ^/(mobs|sessions-assets|frontend|static|sourcemaps|ios-images)/ {
|
||||
proxy_set_header X-Real-IP $remote_addr;
|
||||
proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for;
|
||||
proxy_set_header X-Forwarded-Proto $scheme;
|
||||
proxy_set_header Host $http_host;
|
||||
|
||||
proxy_connect_timeout 300;
|
||||
# Default is HTTP/1, keepalive is only enabled in HTTP/1.1
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Connection "";
|
||||
chunked_transfer_encoding off;
|
||||
|
||||
proxy_pass http://minio:9000;
|
||||
}
|
||||
|
||||
location /minio/ {
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://minio:9000;
|
||||
}
|
||||
location /ingest/ {
|
||||
rewrite ^/ingest/(.*) /$1 break;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header X-Forwarded-For $real_ip;
|
||||
proxy_set_header X-Forwarded-Host $real_ip;
|
||||
proxy_set_header X-Real-IP $real_ip;
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://http-openreplay:8080;
|
||||
proxy_read_timeout 300;
|
||||
proxy_connect_timeout 120;
|
||||
proxy_send_timeout 300;
|
||||
# CORS Headers
|
||||
add_header 'Access-Control-Allow-Origin' '*';
|
||||
add_header 'Access-Control-Allow-Methods' 'POST';
|
||||
add_header 'Access-Control-Allow-Headers' 'Content-Type,Authorization,Content-Encoding';
|
||||
add_header 'Access-Control-Expose-Headers' 'Content-Length';
|
||||
}
|
||||
|
||||
location /api/ {
|
||||
rewrite ^/api/(.*) /$1 break;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_set_header X-Forwarded-Proto $origin_proto;
|
||||
proxy_pass http://chalice-openreplay:8000;
|
||||
}
|
||||
|
||||
location /assist/ {
|
||||
rewrite ^/assist/(.*) /$1 break;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://peers-openreplay:9000;
|
||||
}
|
||||
location /ws-assist/ {
|
||||
rewrite ^/ws-assist/(.*) /$1 break;
|
||||
proxy_http_version 1.1;
|
||||
proxy_set_header Upgrade $http_upgrade;
|
||||
proxy_set_header Connection "Upgrade";
|
||||
proxy_set_header Host $host;
|
||||
proxy_pass http://assist-openreplay:9001;
|
||||
}
|
||||
location / {
|
||||
index /index.html;
|
||||
rewrite ^((?!.(js|css|png|svg|jpg|woff|woff2)).)*$ /index.html break;
|
||||
proxy_set_header Host $http_host;
|
||||
proxy_pass http://frontend-openreplay:8080;
|
||||
proxy_intercept_errors on; # see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors
|
||||
error_page 404 =200 /index.html;
|
||||
}
|
||||
}
|
||||
3
scripts/docker-compose/peers.env
Normal file
3
scripts/docker-compose/peers.env
Normal file
|
|
@ -0,0 +1,3 @@
|
|||
ASSIST_KEY=SetARandomStringHere
|
||||
S3_KEY=${COMMON_S3_KEY}
|
||||
debug='0'
|
||||
8
scripts/docker-compose/readme.md
Normal file
8
scripts/docker-compose/readme.md
Normal file
|
|
@ -0,0 +1,8 @@
|
|||
|
||||
Source variable file
|
||||
|
||||
```bash
|
||||
set -a
|
||||
source common.env
|
||||
set +a
|
||||
```
|
||||
9
scripts/docker-compose/run.sh
Normal file
9
scripts/docker-compose/run.sh
Normal file
|
|
@ -0,0 +1,9 @@
|
|||
# Load variables from common.env into the current shell's environment
|
||||
set -a # automatically export all variables
|
||||
source common.env
|
||||
set +a
|
||||
|
||||
# Use the `envsubst` command to substitute the shell environment variables into reference_var.env and output to a combined .env
|
||||
find ./ -type f \( -iname "*.env" -o -iname "docker-compose.yaml" \) ! -name "common.env" -exec /bin/bash -c 'file="{}"; git checkout -- "$file"; cp "$file" "$file.bak"; envsubst < "$file.bak" > "$file"; rm "$file.bak"' \;
|
||||
|
||||
sudo -E docker-compose up -d
|
||||
5
scripts/docker-compose/sink.env
Normal file
5
scripts/docker-compose/sink.env
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
ASSETS_ORIGIN="https://${COMMON_DOMAIN_NAME}:443/sessions-assets"
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
10
scripts/docker-compose/sourcemaps-reader.env
Normal file
10
scripts/docker-compose/sourcemaps-reader.env
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
SMR_HOST='0.0.0.0'
|
||||
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
|
||||
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
|
||||
AWS_REGION='us-east-1'
|
||||
LICENSE_KEY=''
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
POSTGRES_STRING="postgres://postgres:${COMMON_PG_PASSWORD}@postgresql.db.svc.cluster.local:5432/postgres"
|
||||
ASSETS_ORIGIN="sourcemapreaders://${COMMON_DOMAIN_NAME}:443/sessions-assets"
|
||||
10
scripts/docker-compose/storage.env
Normal file
10
scripts/docker-compose/storage.env
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
AWS_ACCESS_KEY_ID=${COMMON_S3_KEY}
|
||||
AWS_SECRET_ACCESS_KEY=${COMMON_S3_SECRET}
|
||||
AWS_ENDPOINT='http://minio:9000'
|
||||
AWS_REGION='us-east-1'
|
||||
BUCKET_NAME=mobs
|
||||
LICENSE_KEY=''
|
||||
KAFKA_SERVERS='kafka.db.svc.cluster.local:9092'
|
||||
KAFKA_USE_SSL='false'
|
||||
REDIS_STRING='redis://redis:6379'
|
||||
FS_CLEAN_HRS='24'
|
||||
Loading…
Add table
Reference in a new issue