# vim: ft=yaml version: '3' services: postgresql: image: bitnami/postgresql:${POSTGRES_VERSION} container_name: postgres volumes: - pgdata:/bitnami/postgresql networks: openreplay-net: aliases: - postgresql.db.svc.cluster.local environment: POSTGRESQL_PASSWORD: "${COMMON_PG_PASSWORD}" clickhouse: image: clickhouse/clickhouse-server:${CLICKHOUSE_VERSION} container_name: clickhouse volumes: - clickhouse:/var/lib/clickhouse networks: openreplay-net: aliases: - clickhouse-openreplay-clickhouse.db.svc.cluster.local environment: CLICKHOUSE_USER: "default" CLICKHOUSE_PASSWORD: "" CLICKHOUSE_DEFAULT_ACCESS_MANAGEMENT: "1" redis: image: bitnami/redis:${REDIS_VERSION} container_name: redis volumes: - redisdata:/bitnami/redis/data networks: openreplay-net: aliases: - redis-master.db.svc.cluster.local environment: ALLOW_EMPTY_PASSWORD: "yes" minio: image: bitnami/minio:${MINIO_VERSION} container_name: minio volumes: - miniodata:/bitnami/minio/data networks: openreplay-net: aliases: - minio.db.svc.cluster.local ports: - 9001:9001 environment: MINIO_ROOT_USER: ${COMMON_S3_KEY} MINIO_ROOT_PASSWORD: ${COMMON_S3_SECRET} fs-permission: image: debian:stable-slim container_name: fs-permission profiles: - "migration" volumes: - shared-volume:/mnt/efs - miniodata:/mnt/minio - pgdata:/mnt/postgres entrypoint: - /bin/bash - -c - | chown -R 1001:1001 /mnt/{efs,minio,postgres} restart: on-failure minio-migration: image: bitnami/minio:2020.10.9-debian-10-r6 container_name: minio-migration profiles: - "migration" depends_on: - minio - fs-permission networks: - openreplay-net volumes: - ../helmcharts/openreplay/files/minio.sh:/tmp/minio.sh environment: MINIO_HOST: http://minio.db.svc.cluster.local:9000 MINIO_ACCESS_KEY: ${COMMON_S3_KEY} MINIO_SECRET_KEY: ${COMMON_S3_SECRET} user: root entrypoint: - /bin/bash - -c - | apt update && apt install netcat -y # Wait for Minio to be ready until nc -z -v -w30 minio 9000; do echo "Waiting for Minio server to be ready..." sleep 1 done bash /tmp/minio.sh init || exit 100 db-migration: image: bitnami/postgresql:14.5.0 container_name: db-migration profiles: - "migration" depends_on: - postgresql - minio-migration networks: - openreplay-net volumes: - ../schema/db/init_dbs/postgresql/init_schema.sql:/tmp/init_schema.sql environment: PGHOST: postgresql PGPORT: 5432 PGDATABASE: postgres PGUSER: postgres PGPASSWORD: ${COMMON_PG_PASSWORD} entrypoint: - /bin/bash - -c - | until psql -c '\q'; do echo "PostgreSQL is unavailable - sleeping" sleep 1 done echo "PostgreSQL is up - executing command" psql -v ON_ERROR_STOP=1 -f /tmp/init_schema.sql clickhouse-migration: image: clickhouse/clickhouse-server:${CLICKHOUSE_VERSION} container_name: clickhouse-migration profiles: - "migration" depends_on: - clickhouse - minio-migration networks: - openreplay-net volumes: - ../schema/db/init_dbs/clickhouse/create/init_schema.sql:/tmp/init_schema.sql environment: CH_HOST: "clickhouse-openreplay-clickhouse.db.svc.cluster.local" CH_PORT: "9000" CH_PORT_HTTP: "8123" CH_USERNAME: "default" CH_PASSWORD: "" entrypoint: - /bin/bash - -c - | # Checking variable is empty. Shell independant method. # Wait for Minio to be ready until nc -z -v -w30 clickhouse-openreplay-clickhouse.db.svc.cluster.local 9000; do echo "Waiting for Minio server to be ready..." sleep 1 done echo "clickhouse is up - executing command" clickhouse-client -h clickhouse-openreplay-clickhouse.db.svc.cluster.local --user default --port 9000 --multiquery < /tmp/init_schema.sql || true alerts-openreplay: image: public.ecr.aws/p1t3u8a3/alerts:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: alerts networks: openreplay-net: aliases: - alerts-openreplay - alerts-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/alerts.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped analytics-openreplay: image: public.ecr.aws/p1t3u8a3/analytics:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: analytics networks: openreplay-net: aliases: - analytics-openreplay - analytics-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/analytics.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped http-openreplay: image: public.ecr.aws/p1t3u8a3/http:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: http networks: openreplay-net: aliases: - http-openreplay - http-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/http.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped images-openreplay: image: public.ecr.aws/p1t3u8a3/images:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: images networks: openreplay-net: aliases: - images-openreplay - images-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/images.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped integrations-openreplay: image: public.ecr.aws/p1t3u8a3/integrations:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: integrations networks: openreplay-net: aliases: - integrations-openreplay - integrations-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/integrations.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped sink-openreplay: image: public.ecr.aws/p1t3u8a3/sink:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: sink networks: openreplay-net: aliases: - sink-openreplay - sink-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/sink.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped sourcemapreader-openreplay: image: public.ecr.aws/p1t3u8a3/sourcemapreader:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: sourcemapreader networks: openreplay-net: aliases: - sourcemapreader-openreplay - sourcemapreader-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/sourcemapreader.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped spot-openreplay: image: public.ecr.aws/p1t3u8a3/spot:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: spot networks: openreplay-net: aliases: - spot-openreplay - spot-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/spot.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped storage-openreplay: image: public.ecr.aws/p1t3u8a3/storage:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: storage networks: openreplay-net: aliases: - storage-openreplay - storage-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/storage.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped assets-openreplay: image: public.ecr.aws/p1t3u8a3/assets:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: assets networks: openreplay-net: aliases: - assets-openreplay - assets-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/assets.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped assist-openreplay: image: public.ecr.aws/p1t3u8a3/assist:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: assist networks: openreplay-net: aliases: - assist-openreplay - assist-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/assist.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped canvases-openreplay: image: public.ecr.aws/p1t3u8a3/canvases:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: canvases networks: openreplay-net: aliases: - canvases-openreplay - canvases-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/canvases.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped chalice-openreplay: image: public.ecr.aws/p1t3u8a3/chalice:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: chalice networks: openreplay-net: aliases: - chalice-openreplay - chalice-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/chalice.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped db-openreplay: image: public.ecr.aws/p1t3u8a3/db:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: db networks: openreplay-net: aliases: - db-openreplay - db-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/db.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped ender-openreplay: image: public.ecr.aws/p1t3u8a3/ender:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: ender networks: openreplay-net: aliases: - ender-openreplay - ender-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/ender.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped frontend-openreplay: image: public.ecr.aws/p1t3u8a3/frontend:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: frontend networks: openreplay-net: aliases: - frontend-openreplay - frontend-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/frontend.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped heuristics-openreplay: image: public.ecr.aws/p1t3u8a3/heuristics:${COMMON_VERSION} domainname: app.svc.cluster.local container_name: heuristics networks: openreplay-net: aliases: - heuristics-openreplay - heuristics-openreplay.app.svc.cluster.local volumes: - shared-volume:/mnt/efs env_file: - docker-envs/heuristics.env environment: {} # Fallback empty environment if env_file is missing restart: unless-stopped nginx-openreplay: image: nginx:latest container_name: nginx networks: - openreplay-net volumes: - ./nginx.conf:/etc/nginx/conf.d/default.conf restart: unless-stopped caddy: image: caddy:latest container_name: caddy ports: - "80:80" - "443:443" volumes: - ./Caddyfile:/etc/caddy/Caddyfile - caddy_data:/data - caddy_config:/config networks: - openreplay-net environment: - ACME_AGREE=true # Agree to Let's Encrypt Subscriber Agreement - CADDY_DOMAIN=${CADDY_DOMAIN} restart: unless-stopped volumes: pgdata: clickhouse: redisdata: miniodata: shared-volume: caddy_data: caddy_config: networks: openreplay-net: