version: '3' services: postgresql: image: bitnami/postgresql:14.5.0 container_name: postgres volumes: - pgdata:/var/lib/postgresql/data networks: - opereplay-net environment: POSTGRESQL_PASSWORD: ${COMMON_PG_PASSWORD} redis: image: bitnami/redis:6.0.12-debian-10-r33 container_name: redis volumes: - redisdata:/var/lib/postgresql/data networks: - opereplay-net environment: ALLOW_EMPTY_PASSWORD: "yes" minio: image: bitnami/minio:2023.2.10-debian-11-r1 container_name: minio volumes: - miniodata:/bitnami/minio/data networks: - opereplay-net ports: - 9001:9001 environment: MINIO_ROOT_USER: ${COMMON_S3_KEY} MINIO_ROOT_PASSWORD: ${COMMON_S3_SECRET} fs-permission: image: debian:stable-slim container_name: fs-permission volumes: - shared-volume:/mnt/efs - miniodata:/mnt/minio - pgdata:/mnt/postgres entrypoint: - /bin/bash - -c - | chown -R 1001:1001 /mnt/{efs,minio,postgres} minio-migration: image: bitnami/minio:2020.10.9-debian-10-r6 container_name: minio-migration depends_on: - minio - fs-permission networks: - opereplay-net volumes: - ../helmcharts/openreplay/files/minio.sh:/tmp/minio.sh environment: MINIO_HOST: http://minio:9000 MINIO_ACCESS_KEY: ${COMMON_S3_KEY} MINIO_SECRET_KEY: ${COMMON_S3_SECRET} user: root entrypoint: - /bin/bash - -c - | apt update && apt install netcat -y # Wait for Minio to be ready until nc -z -v -w30 minio 9000; do echo "Waiting for Minio server to be ready..." sleep 1 done bash /tmp/minio.sh init || exit 100 db-migration: image: bitnami/postgresql:14.5.0 container_name: db-migration depends_on: - postgresql - minio-migration networks: - opereplay-net volumes: - ../schema/db/init_dbs/postgresql/init_schema.sql:/tmp/init_schema.sql environment: PGHOST: postgresql PGPORT: 5432 PGDATABASE: postgres PGUSER: postgres PGPASSWORD: ${COMMON_PG_PASSWORD} entrypoint: - /bin/bash - -c - | until PGPASSWORD=${COMMON_PG_PASSWORD} psql -h postgresql -U postgres -d postgres -c '\q'; do echo "PostgreSQL is unavailable - sleeping" sleep 1 done echo "PostgreSQL is up - executing command" psql -v ON_ERROR_STOP=1 -f /tmp/init_schema.sql frontend-openreplay: image: public.ecr.aws/p1t3u8a3/frontend:${COMMON_VERSION} container_name: frontend depends_on: - db-migration networks: - opereplay-net restart: on-failure alerts-openreplay: image: public.ecr.aws/p1t3u8a3/alerts:${COMMON_VERSION} container_name: alerts depends_on: - db-migration networks: - opereplay-net env_file: - alerts.env restart: on-failure assets-openreplay: image: public.ecr.aws/p1t3u8a3/assets:${COMMON_VERSION} container_name: assets depends_on: - db-migration networks: - opereplay-net env_file: - assets.env restart: on-failure assist-openreplay: image: public.ecr.aws/p1t3u8a3/assist:${COMMON_VERSION} container_name: assist depends_on: - db-migration networks: - opereplay-net env_file: - assist.env restart: on-failure db-openreplay: image: public.ecr.aws/p1t3u8a3/db:${COMMON_VERSION} container_name: db depends_on: - db-migration networks: - opereplay-net env_file: - db.env restart: on-failure ender-openreplay: image: public.ecr.aws/p1t3u8a3/ender:${COMMON_VERSION} container_name: ender depends_on: - db-migration networks: - opereplay-net env_file: - ender.env restart: on-failure heuristics-openreplay: image: public.ecr.aws/p1t3u8a3/heuristics:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: heuristics depends_on: - db-migration networks: opereplay-net: aliases: - heuristics-openreplay.app.svc.cluster.local env_file: - heuristics.env restart: on-failure # imagestorage-openreplay: # image: public.ecr.aws/p1t3u8a3/imagestorage:${COMMON_VERSION} # depends_on: # - db-migration # networks: # - opereplay-net # restart: on-failure integrations-openreplay: image: public.ecr.aws/p1t3u8a3/integrations:${COMMON_VERSION} container_name: integrations depends_on: - db-migration networks: - opereplay-net env_file: - integrations.env restart: on-failure peers-openreplay: image: public.ecr.aws/p1t3u8a3/peers:${COMMON_VERSION} container_name: peers depends_on: - db-migration networks: - opereplay-net env_file: - peers.env restart: on-failure sourcemapreader-openreplay: image: public.ecr.aws/p1t3u8a3/sourcemapreader:${COMMON_VERSION} container_name: sourcemapreader depends_on: - db-migration networks: - opereplay-net env_file: - sourcemapreader.env restart: on-failure # videostorage-openreplay: # image: public.ecr.aws/p1t3u8a3/videostorage:${COMMON_VERSION} # depends_on: # - db-migration # networks: # - opereplay-net # env_file: # - common.env # restart: on-failure http-openreplay: image: public.ecr.aws/p1t3u8a3/http:${COMMON_VERSION} container_name: http depends_on: - db-migration networks: - opereplay-net env_file: - http.env restart: on-failure chalice-openreplay: image: public.ecr.aws/p1t3u8a3/chalice:${COMMON_VERSION} container_name: chalice depends_on: - db-migration volumes: - shared-volume:/mnt/efs networks: - opereplay-net env_file: - chalice.env restart: on-failure sink-openreplay: image: public.ecr.aws/p1t3u8a3/sink:${COMMON_VERSION} container_name: sink depends_on: - db-migration volumes: - shared-volume:/mnt/efs networks: - opereplay-net env_file: - sink.env restart: on-failure storage-openreplay: image: public.ecr.aws/p1t3u8a3/storage:${COMMON_VERSION} container_name: storage depends_on: - db-migration volumes: - shared-volume:/mnt/efs networks: - opereplay-net env_file: - storage.env restart: on-failure nginx-openreplay: image: nginx:latest container_name: nginx networks: - opereplay-net volumes: - ./nginx.conf:/etc/nginx/conf.d/default.conf restart: on-failure caddy: image: caddy:latest container_name: caddy ports: - "80:80" - "443:443" volumes: - ./Caddyfile:/etc/caddy/Caddyfile - caddy_data:/data - caddy_config:/config networks: - opereplay-net environment: - ACME_AGREE=true # Agree to Let's Encrypt Subscriber Agreement - CADDY_DOMAIN=${COMMON_DOMAIN_NAME} restart: on-failure volumes: pgdata: redisdata: miniodata: shared-volume: caddy_data: caddy_config: networks: opereplay-net: