diff --git a/.github/workflows/frontend-ee.yaml b/.github/workflows/frontend-ee.yaml deleted file mode 100644 index f4de13db1..000000000 --- a/.github/workflows/frontend-ee.yaml +++ /dev/null @@ -1,51 +0,0 @@ -name: S3 Deploy EE -on: - push: - branches: - - dev - paths: - - ee/frontend/** - -jobs: - build: - runs-on: ubuntu-latest - steps: - - name: Checkout - uses: actions/checkout@v2 - - - name: Cache node modules - uses: actions/cache@v1 - with: - path: node_modules - key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }} - restore-keys: | - ${{ runner.OS }}-build- - ${{ runner.OS }}- - - - uses: azure/k8s-set-context@v1 - with: - method: kubeconfig - kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret. - id: setcontext - - name: Install - run: npm install - - - name: Build and deploy - run: | - cd frontend - bash build.sh - cp -arl public frontend - minio_pod=$(kubectl get po -n db -l app.kubernetes.io/name=minio -n db --output custom-columns=name:.metadata.name | tail -n+2) - echo $minio_pod - echo copying frontend to container. - kubectl -n db cp frontend $minio_pod:/data/ - rm -rf frontend - - # - name: Debug Job - # if: ${{ failure() }} - # uses: mxschmitt/action-tmate@v3 - # env: - # AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }} - # AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }} - # AWS_REGION: eu-central-1 - # AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }} diff --git a/.github/workflows/frontend.yaml b/.github/workflows/frontend.yaml index 84af48e6a..8c5038f5f 100644 --- a/.github/workflows/frontend.yaml +++ b/.github/workflows/frontend.yaml @@ -1,4 +1,4 @@ -name: S3 Deploy +name: Frontend FOSS Deployment on: push: branches: @@ -27,8 +27,8 @@ jobs: method: kubeconfig kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret. id: setcontext - - name: Install - run: npm install +# - name: Install +# run: npm install - name: Build and deploy run: | diff --git a/LICENSE b/LICENSE index 940c97860..406fe9608 100644 --- a/LICENSE +++ b/LICENSE @@ -1,6 +1,6 @@ MIT License -Copyright (c) 2021 Asayer SAS. +Copyright (c) 2022 Asayer SAS. Portions of this software are licensed as follows: diff --git a/api/.chalice/config.bundle.json b/api/.chalice/config.bundle.json deleted file mode 100644 index 95b29ab50..000000000 --- a/api/.chalice/config.bundle.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-foss": { - "api_gateway_stage": "default-fos", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "true", - "isEE": "false", - "stage": "default-foss", - "jwt_issuer": "openreplay-default-foss", - "sentryURL": "", - "pg_host": "postgresql.db.svc.cluster.local", - "pg_port": "5432", - "pg_dbname": "postgres", - "pg_user": "postgres", - "pg_password": "asayerPostgres", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://0.0.0.0:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "js_cache_bucket": "sessions-assets", - "peers": "http://0.0.0.0:9000/assist/peers", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "version_number": "1.3.5" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} diff --git a/api/.chalice/config.json b/api/.chalice/config.json deleted file mode 100644 index d1fe6c36c..000000000 --- a/api/.chalice/config.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-foss": { - "api_gateway_stage": "default-fos", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "true", - "isEE": "false", - "stage": "default-foss", - "jwt_issuer": "openreplay-default-foss", - "sentryURL": "", - "pg_host": "postgresql.db.svc.cluster.local", - "pg_port": "5432", - "pg_dbname": "postgres", - "pg_user": "postgres", - "pg_password": "asayerPostgres", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "js_cache_bucket": "sessions-assets", - "peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "iosBucket": "openreplay-ios-images", - "version_number": "1.3.6" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} \ No newline at end of file diff --git a/api/.env.default b/api/.env.default index f92709a28..8f4ddc01c 100644 --- a/api/.env.default +++ b/api/.env.default @@ -34,6 +34,8 @@ pg_host=postgresql.db.svc.cluster.local pg_password=asayerPostgres pg_port=5432 pg_user=postgres +pg_timeout=30 +pg_minconn=45 put_S3_TTL=20 sentryURL= sessions_bucket=mobs diff --git a/api/Dockerfile.alerts b/api/Dockerfile.alerts new file mode 100644 index 000000000..7bff6a9dc --- /dev/null +++ b/api/Dockerfile.alerts @@ -0,0 +1,18 @@ +FROM python:3.9.7-slim +LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" +WORKDIR /work +COPY . . +RUN pip install -r requirements.txt +RUN mv .env.default .env && mv app_alerts.py app.py +ENV pg_minconn 2 + +# Add Tini +# Startup daemon +ENV TINI_VERSION v0.19.0 +ARG envarg +ENV ENTERPRISE_BUILD ${envarg} +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +RUN chmod +x /tini +ENTRYPOINT ["/tini", "--"] +CMD ./entrypoint.sh \ No newline at end of file diff --git a/api/app.py b/api/app.py index 47b200aef..d261dadac 100644 --- a/api/app.py +++ b/api/app.py @@ -1,4 +1,7 @@ +import logging + from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from starlette.responses import StreamingResponse @@ -60,5 +63,8 @@ Schedule.start() for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs: Schedule.add_job(id=job["func"].__name__, **job) -# for job in Schedule.get_jobs(): -# print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) +for job in Schedule.get_jobs(): + print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/api/app_alerts.py b/api/app_alerts.py new file mode 100644 index 000000000..57bfcd55d --- /dev/null +++ b/api/app_alerts.py @@ -0,0 +1,27 @@ +import logging + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config +from fastapi import FastAPI + +from chalicelib.core import alerts_processor + +app = FastAPI() +print("============= ALERTS =============") + + +@app.get("/") +async def root(): + return {"status": "Running"} + + +app.schedule = AsyncIOScheduler() +app.schedule.start() +app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval", + "minutes": config("ALERTS_INTERVAL", cast=int, default=5), + "misfire_grace_time": 20}) +for job in app.schedule.get_jobs(): + print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/api/build.sh b/api/build.sh index d66a54ab9..29b8911ca 100644 --- a/api/build.sh +++ b/api/build.sh @@ -22,7 +22,6 @@ function build_api(){ # Copy enterprise code [[ $1 == "ee" ]] && { cp -rf ../ee/api/* ./ - cp -rf ../ee/api/.chalice/* ./.chalice/ envarg="default-ee" tag="ee-" } @@ -31,8 +30,9 @@ function build_api(){ docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1} docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest - } +} } check_prereq build_api $1 +IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1 \ No newline at end of file diff --git a/api/build_alerts.sh b/api/build_alerts.sh new file mode 100644 index 000000000..51504a276 --- /dev/null +++ b/api/build_alerts.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Script to build alerts module +# flags to accept: +# envarg: build for enterprise edition. +# Default will be OSS build. + +# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh + +function make_submodule() { + [[ $1 != "ee" ]] && { + # -- this part was generated by modules_lister.py -- + mkdir alerts + cp -R ./{app_alerts,schemas}.py ./alerts/ + mkdir -p ./alerts/chalicelib/ + cp -R ./chalicelib/__init__.py ./alerts/chalicelib/ + mkdir -p ./alerts/chalicelib/core/ + cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,assist,events_ios,sessions_mobs,errors,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/ + mkdir -p ./alerts/chalicelib/utils/ + cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,email_helper,email_handler,smtp,s3,metrics_helper}.py ./alerts/chalicelib/utils/ + # -- end of generated part + } + [[ $1 == "ee" ]] && { + # -- this part was generated by modules_lister.py -- + mkdir alerts + cp -R ./{app_alerts,schemas,schemas_ee}.py ./alerts/ + mkdir -p ./alerts/chalicelib/ + cp -R ./chalicelib/__init__.py ./alerts/chalicelib/ + mkdir -p ./alerts/chalicelib/core/ + cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,dashboard,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/ + mkdir -p ./alerts/chalicelib/utils/ + cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,SAML2_helper,email_helper,email_handler,smtp,s3,args_transformer,ch_client,metrics_helper}.py ./alerts/chalicelib/utils/ + # -- end of generated part + } + cp -R ./{Dockerfile.alerts,requirements.txt,.env.default,entrypoint.sh} ./alerts/ + cp -R ./chalicelib/utils/html ./alerts/chalicelib/utils/html +} + +git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)} +envarg="default-foss" +check_prereq() { + which docker || { + echo "Docker not installed, please install docker." + exit=1 + } + [[ exit -eq 1 ]] && exit 1 +} + +function build_api(){ + tag="" + # Copy enterprise code + [[ $1 == "ee" ]] && { + cp -rf ../ee/api/* ./ + envarg="default-ee" + tag="ee-" + } + make_submodule $1 + cd alerts + docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} . + cd .. + rm -rf alerts + [[ $PUSH_IMAGE -eq 1 ]] && { + docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1} + docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest + docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest + } +} + +check_prereq +build_api $1 \ No newline at end of file diff --git a/api/chalicelib/core/alerts.py b/api/chalicelib/core/alerts.py index 4c8bb151d..6fe799c19 100644 --- a/api/chalicelib/core/alerts.py +++ b/api/chalicelib/core/alerts.py @@ -1,14 +1,12 @@ import json +import logging import time -from fastapi import BackgroundTasks - +import schemas from chalicelib.core import notifications, slack, webhook from chalicelib.utils import pg_client, helper, email_helper from chalicelib.utils.TimeUTC import TimeUTC -ALLOW_UPDATE = ["name", "description", "active", "detectionMethod", "query", "options"] - def get(id): with pg_client.PostgresClient() as cur: @@ -38,34 +36,6 @@ def get_all(project_id): return all -SUPPORTED_THRESHOLD = [15, 30, 60, 120, 240, 1440] - - -def __transform_structure(data): - if data.get("options") is None: - return f"Missing 'options'", None - if data["options"].get("currentPeriod") not in SUPPORTED_THRESHOLD: - return f"Unsupported currentPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None - if data["options"].get("previousPeriod", 15) not in SUPPORTED_THRESHOLD: - return f"Unsupported previousPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None - if data["options"].get("renotifyInterval") is None: - data["options"]["renotifyInterval"] = 720 - data["query"]["right"] = float(data["query"]["right"]) - data["query"] = json.dumps(data["query"]) - data["description"] = data["description"] if data.get("description") is not None and len( - data["description"]) > 0 else None - if data.get("options"): - messages = [] - for m in data["options"].get("message", []): - if m.get("value") is None: - continue - m["value"] = str(m["value"]) - messages.append(m) - data["options"]["message"] = messages - data["options"] = json.dumps(data["options"]) - return None, data - - def __process_circular(alert): if alert is None: return None @@ -74,15 +44,16 @@ def __process_circular(alert): return alert -def create(project_id, data): - err, data = __transform_structure(data) - if err is not None: - return {"errors": [err]} +def create(project_id, data: schemas.AlertSchema): + data = data.dict() + data["query"] = json.dumps(data["query"]) + data["options"] = json.dumps(data["options"]) + with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""\ - INSERT INTO public.alerts(project_id, name, description, detection_method, query, options) - VALUES (%(project_id)s, %(name)s, %(description)s, %(detectionMethod)s, %(query)s, %(options)s::jsonb) + INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id) + VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s) RETURNING *;""", {"project_id": project_id, **data}) ) @@ -90,29 +61,30 @@ def create(project_id, data): return {"data": helper.dict_to_camel_case(__process_circular(a))} -def update(id, changes): - changes = {k: changes[k] for k in changes.keys() if k in ALLOW_UPDATE} - err, changes = __transform_structure(changes) - if err is not None: - return {"errors": [err]} - updateq = [] - for k in changes.keys(): - updateq.append(f"{helper.key_to_snake_case(k)} = %({k})s") - if len(updateq) == 0: - return {"errors": ["nothing to update"]} +def update(id, data: schemas.AlertSchema): + data = data.dict() + data["query"] = json.dumps(data["query"]) + data["options"] = json.dumps(data["options"]) + with pg_client.PostgresClient() as cur: - query = cur.mogrify(f"""\ + query = cur.mogrify("""\ UPDATE public.alerts - SET {", ".join(updateq)} + SET name = %(name)s, + description = %(description)s, + active = TRUE, + detection_method = %(detection_method)s, + query = %(query)s, + options = %(options)s, + series_id = %(series_id)s WHERE alert_id =%(id)s AND deleted_at ISNULL RETURNING *;""", - {"id": id, **changes}) + {"id": id, **data}) cur.execute(query=query) a = helper.dict_to_camel_case(cur.fetchone()) return {"data": __process_circular(a)} -def process_notifications(data, background_tasks: BackgroundTasks): +def process_notifications(data): full = {} for n in data: if "message" in n["options"]: @@ -133,15 +105,26 @@ def process_notifications(data, background_tasks: BackgroundTasks): BATCH_SIZE = 200 for t in full.keys(): for i in range(0, len(full[t]), BATCH_SIZE): - # helper.async_post(config('alert_ntf') % t, {"notifications": full[t][i:i + BATCH_SIZE]}) notifications_list = full[t][i:i + BATCH_SIZE] if t == "slack": - background_tasks.add_task(slack.send_batch, notifications_list=notifications_list) + try: + slack.send_batch(notifications_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending slack notifications batch") + logging.error(str(e)) elif t == "email": - background_tasks.add_task(send_by_email_batch, notifications_list=notifications_list) + try: + send_by_email_batch(notifications_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending email notifications batch") + logging.error(str(e)) elif t == "webhook": - background_tasks.add_task(webhook.trigger_batch, data_list=notifications_list) + try: + webhook.trigger_batch(data_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending webhook notifications batch") + logging.error(str(e)) def send_by_email(notification, destination): @@ -175,3 +158,13 @@ def delete(project_id, alert_id): {"alert_id": alert_id, "project_id": project_id}) ) return {"data": {"state": "success"}} + + +def get_predefined_values(): + values = [e.value for e in schemas.AlertColumn] + values = [{"name": v, "value": v, + "unit": "count" if v.endswith(".count") else "ms", + "predefined": True, + "metricId": None, + "seriesId": None} for v in values] + return values diff --git a/api/chalicelib/core/alerts_listener.py b/api/chalicelib/core/alerts_listener.py new file mode 100644 index 000000000..419f0326d --- /dev/null +++ b/api/chalicelib/core/alerts_listener.py @@ -0,0 +1,27 @@ +from chalicelib.utils import pg_client, helper + + +def get_all_alerts(): + with pg_client.PostgresClient(long_query=True) as cur: + query = """SELECT -1 AS tenant_id, + alert_id, + project_id, + detection_method, + query, + options, + (EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at, + alerts.name, + alerts.series_id, + filter + FROM public.alerts + LEFT JOIN metric_series USING (series_id) + INNER JOIN projects USING (project_id) + WHERE alerts.deleted_at ISNULL + AND alerts.active + AND projects.active + AND projects.deleted_at ISNULL + AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL) + ORDER BY alerts.created_at;""" + cur.execute(query=query) + all_alerts = helper.list_to_camel_case(cur.fetchall()) + return all_alerts diff --git a/api/chalicelib/core/alerts_processor.py b/api/chalicelib/core/alerts_processor.py new file mode 100644 index 000000000..80973fadd --- /dev/null +++ b/api/chalicelib/core/alerts_processor.py @@ -0,0 +1,250 @@ +import decimal +import logging + +import schemas +from chalicelib.core import alerts_listener +from chalicelib.core import sessions, alerts +from chalicelib.utils import pg_client +from chalicelib.utils.TimeUTC import TimeUTC + +LeftToDb = { + schemas.AlertColumn.performance__dom_content_loaded__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"}, + schemas.AlertColumn.performance__first_meaningful_paint__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"}, + schemas.AlertColumn.performance__page_load_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"}, + schemas.AlertColumn.performance__dom_build_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(dom_building_time,0))"}, + schemas.AlertColumn.performance__speed_index__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"}, + schemas.AlertColumn.performance__page_response_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(response_time,0))"}, + schemas.AlertColumn.performance__ttfb__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(first_paint_time,0))"}, + schemas.AlertColumn.performance__time_to_render__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(visually_complete,0))"}, + schemas.AlertColumn.performance__image_load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"}, + schemas.AlertColumn.performance__request_load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"}, + schemas.AlertColumn.resources__load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))"}, + schemas.AlertColumn.resources__missing__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE"}, + schemas.AlertColumn.errors__4xx_5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", + "condition": "status/100!=2"}, + schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__javascript__impacted_sessions__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, + schemas.AlertColumn.performance__crashes__count: { + "table": "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", + "formula": "COUNT(DISTINCT session_id)", "condition": "errors_count > 0"}, + schemas.AlertColumn.errors__javascript__count: { + "table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False}, + schemas.AlertColumn.errors__backend__count: { + "table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False}, +} + +# This is the frequency of execution for each threshold +TimeInterval = { + 15: 3, + 30: 5, + 60: 10, + 120: 20, + 240: 30, + 1440: 60, +} + + +def can_check(a) -> bool: + now = TimeUTC.now() + + repetitionBase = a["options"]["currentPeriod"] \ + if a["detectionMethod"] == schemas.AlertDetectionMethod.change \ + and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \ + else a["options"]["previousPeriod"] + + if TimeInterval.get(repetitionBase) is None: + logging.error(f"repetitionBase: {repetitionBase} NOT FOUND") + return False + + return (a["options"]["renotifyInterval"] <= 0 or + a["options"].get("lastNotification") is None or + a["options"]["lastNotification"] <= 0 or + ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + + +def Build(a): + params = {"project_id": a["projectId"]} + full_args = {} + j_s = True + if a["seriesId"] is not None: + a["filter"]["sort"] = "session_id" + a["filter"]["order"] = "DESC" + a["filter"]["startDate"] = -1 + a["filter"]["endDate"] = TimeUTC.now() + full_args, query_part, sort = sessions.search_query_parts( + data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), + error_status=None, errors_only=False, + favorite_only=False, issue=None, project_id=a["projectId"], + user_id=None) + subQ = f"""SELECT COUNT(session_id) AS value + {query_part}""" + else: + colDef = LeftToDb[a["query"]["left"]] + subQ = f"""SELECT {colDef["formula"]} AS value + FROM {colDef["table"]} + WHERE project_id = %(project_id)s + {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + j_s = colDef.get("joinSessions", True) + + q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid""" + + # if len(colDef.group) > 0 { + # subQ = subQ.Column(colDef.group + " AS group_value") + # subQ = subQ.GroupBy(colDef.group) + # q = q.Column("group_value") + # } + + if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold: + if a["seriesId"] is not None: + q += f""" FROM ({subQ}) AS stat""" + else: + q += f""" FROM ({subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}) AS stat""" + params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000} + else: + if a["options"]["change"] == schemas.AlertDetectionChangeType.change: + # if len(colDef.group) > 0: + # subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60)) + # sub2, args2, _ := subQ.Where( + # sq.And{ + # sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60), + # sq.Expr("timestamp>=$4 ", time.Now().Unix()-2 * a.Options.CurrentPeriod * 60), + # }).ToSql() + # sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) + # q = q.FromSelect(sub1, "stat") + # else: + if a["seriesId"] is not None: + sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s") + sub1 = f"SELECT (({subQ})-({sub2})) AS value" + q += f" FROM ( {sub1} ) AS stat" + params = {**params, **full_args, + "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, + "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} + else: + sub1 = f"""{subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}""" + params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 + sub2 = f"""{subQ} AND timestamp<%(startDate)s + AND timestamp>=%(timestamp_sub2)s + {"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}""" + params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000 + sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value" + q += f" FROM ( {sub1} ) AS stat" + + else: + # if len(colDef.group) >0 { + # subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60)) + # sub2, args2, _ := subQ.Where( + # sq.And{ + # sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60), + # sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod * 60-a.Options.CurrentPeriod * 60), + # }).ToSql() + # sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) + # q = q.FromSelect(sub1, "stat") + # } else { + if a["seriesId"] is not None: + sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s") + sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value" + q += f" FROM ({sub1}) AS stat" + params = {**params, **full_args, + "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, + "timestamp_sub2": TimeUTC.now() \ + - (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \ + * 60 * 1000} + else: + sub1 = f"""{subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}""" + params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 + sub2 = f"""{subQ} AND timestamp<%(startDate)s + AND timestamp>=%(timestamp_sub2)s + {"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}""" + params["timestamp_sub2"] = TimeUTC.now() \ + - (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000 + sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value" + q += f" FROM ({sub1}) AS stat" + + return q, params + + +def process(): + notifications = [] + all_alerts = alerts_listener.get_all_alerts() + with pg_client.PostgresClient() as cur: + for alert in all_alerts: + if can_check(alert): + logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}") + query, params = Build(alert) + query = cur.mogrify(query, params) + logging.debug(alert) + logging.debug(query) + try: + cur.execute(query) + result = cur.fetchone() + if result["valid"]: + logging.info("Valid alert, notifying users") + notifications.append({ + "alertId": alert["alertId"], + "tenantId": alert["tenantId"], + "title": alert["name"], + "description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).", + "buttonText": "Check metrics for more details", + "buttonUrl": f"/{alert['projectId']}/metrics", + "imageUrl": None, + "options": {"source": "ALERT", "sourceId": alert["alertId"], + "sourceMeta": alert["detectionMethod"], + "message": alert["options"]["message"], "projectId": alert["projectId"], + "data": {"title": alert["name"], + "limitValue": alert["query"]["right"], + "actualValue": float(result["value"]) \ + if isinstance(result["value"], decimal.Decimal) \ + else result["value"], + "operator": alert["query"]["operator"], + "trigger": alert["query"]["left"], + "alertId": alert["alertId"], + "detectionMethod": alert["detectionMethod"], + "currentPeriod": alert["options"]["currentPeriod"], + "previousPeriod": alert["options"]["previousPeriod"], + "createdAt": TimeUTC.now()}}, + }) + except Exception as e: + logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}") + logging.error(str(e)) + logging.error(query) + if len(notifications) > 0: + cur.execute( + cur.mogrify(f"""UPDATE public.Alerts + SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb + WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) + if len(notifications) > 0: + alerts.process_notifications(notifications) diff --git a/api/chalicelib/core/assist.py b/api/chalicelib/core/assist.py index 8242e69ff..cd76d0be4 100644 --- a/api/chalicelib/core/assist.py +++ b/api/chalicelib/core/assist.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.utils import pg_client, helper from chalicelib.core import projects, sessions, sessions_metas import requests @@ -44,7 +45,7 @@ def get_live_sessions(project_id, filters=None): continue filter_type = f["type"].upper() f["value"] = sessions.__get_sql_value_multiple(f["value"]) - if filter_type == sessions_metas.meta_type.USERID: + if filter_type == schemas.FilterType.user_id: op = sessions.__get_sql_operator(f["operator"]) extra_constraints.append(f"user_id {op} %(value_{i})s") extra_params[f"value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) diff --git a/api/chalicelib/core/custom_metrics.py b/api/chalicelib/core/custom_metrics.py new file mode 100644 index 000000000..10e86024c --- /dev/null +++ b/api/chalicelib/core/custom_metrics.py @@ -0,0 +1,232 @@ +import json + +import schemas +from chalicelib.core import sessions +from chalicelib.utils import helper, pg_client +from chalicelib.utils.TimeUTC import TimeUTC + + +def try_live(project_id, data: schemas.TryCustomMetricsSchema): + results = [] + for s in data.series: + s.filter.startDate = data.startDate + s.filter.endDate = data.endDate + results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density, + view_type=data.viewType)) + if data.viewType == schemas.MetricViewType.progress: + r = {"count": results[-1]} + diff = s.filter.endDate - s.filter.startDate + s.filter.startDate = data.endDate + s.filter.endDate = data.endDate - diff + r["previousCount"] = sessions.search2_series(data=s.filter, project_id=project_id, density=data.density, + view_type=data.viewType) + r["countProgress"] = helper.__progress(old_val=r["previousCount"], new_val=r["count"]) + results[-1] = r + return results + + +def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema): + metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False) + if metric is None: + return None + metric: schemas.TryCustomMetricsSchema = schemas.TryCustomMetricsSchema.parse_obj({**data.dict(), **metric}) + return try_live(project_id=project_id, data=metric) + + +def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema): + with pg_client.PostgresClient() as cur: + _data = {} + for i, s in enumerate(data.series): + for k in s.dict().keys(): + _data[f"{k}_{i}"] = s.__getattribute__(k) + _data[f"index_{i}"] = i + _data[f"filter_{i}"] = s.filter.json() + series_len = len(data.series) + data.series = None + params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data} + query = cur.mogrify(f"""\ + WITH m AS (INSERT INTO metrics (project_id, user_id, name) + VALUES (%(project_id)s, %(user_id)s, %(name)s) + RETURNING *) + INSERT + INTO metric_series(metric_id, index, name, filter) + VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)" + for i in range(series_len)])} + RETURNING metric_id;""", params) + + cur.execute( + query + ) + r = cur.fetchone() + return {"data": get(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)} + + +def __get_series_id(metric_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT series_id + FROM metric_series + WHERE metric_series.metric_id = %(metric_id)s + AND metric_series.deleted_at ISNULL;""", + {"metric_id": metric_id} + ) + ) + rows = cur.fetchall() + return [r["series_id"] for r in rows] + + +def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSchema): + series_ids = __get_series_id(metric_id) + n_series = [] + d_series_ids = [] + u_series = [] + u_series_ids = [] + params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name, + "user_id": user_id, "project_id": project_id} + for i, s in enumerate(data.series): + prefix = "u_" + if s.series_id is None: + n_series.append({"i": i, "s": s}) + prefix = "n_" + else: + u_series.append({"i": i, "s": s}) + u_series_ids.append(s.series_id) + ns = s.dict() + for k in ns.keys(): + if k == "filter": + ns[k] = json.dumps(ns[k]) + params[f"{prefix}{k}_{i}"] = ns[k] + for i in series_ids: + if i not in u_series_ids: + d_series_ids.append(i) + params["d_series_ids"] = tuple(d_series_ids) + + with pg_client.PostgresClient() as cur: + sub_queries = [] + if len(n_series) > 0: + sub_queries.append(f"""\ + n AS (INSERT INTO metric_series (metric_id, index, name, filter) + VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)" + for s in n_series])} + RETURNING 1)""") + if len(u_series) > 0: + sub_queries.append(f"""\ + u AS (UPDATE metric_series + SET name=series.name, + filter=series.filter, + index=series.index + FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)" + for s in u_series])}) AS series(series_id, index, name, filter) + WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id + RETURNING 1)""") + if len(d_series_ids) > 0: + sub_queries.append("""\ + d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s + RETURNING 1)""") + query = cur.mogrify(f"""\ + {"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)} + UPDATE metrics + SET name = %(name)s, is_public= %(is_public)s + WHERE metric_id = %(metric_id)s + AND project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING metric_id;""", params) + cur.execute( + query + ) + return get(metric_id=metric_id, project_id=project_id, user_id=user_id) + + +def get_all(project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT * + FROM metrics + LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series + FROM metric_series + WHERE metric_series.metric_id = metrics.metric_id + AND metric_series.deleted_at ISNULL + ) AS metric_series ON (TRUE) + WHERE metrics.project_id = %(project_id)s + AND metrics.deleted_at ISNULL + AND (user_id = %(user_id)s OR is_public) + ORDER BY created_at;""", + {"project_id": project_id, "user_id": user_id} + ) + ) + rows = cur.fetchall() + for r in rows: + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + for s in r["series"]: + s["filter"] = helper.old_search_payload_to_flat(s["filter"]) + rows = helper.list_to_camel_case(rows) + return rows + + +def delete(project_id, metric_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify("""\ + UPDATE public.metrics + SET deleted_at = timezone('utc'::text, now()) + WHERE project_id = %(project_id)s + AND metric_id = %(metric_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"metric_id": metric_id, "project_id": project_id, "user_id": user_id}) + ) + + return {"state": "success"} + + +def get(metric_id, project_id, user_id, flatten=True): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT * + FROM metrics + LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series + FROM metric_series + WHERE metric_series.metric_id = metrics.metric_id + AND metric_series.deleted_at ISNULL + ) AS metric_series ON (TRUE) + WHERE metrics.project_id = %(project_id)s + AND metrics.deleted_at ISNULL + AND (metrics.user_id = %(user_id)s OR metrics.is_public) + AND metrics.metric_id = %(metric_id)s + ORDER BY created_at;""", + {"metric_id": metric_id, "project_id": project_id, "user_id": user_id} + ) + ) + row = cur.fetchone() + if row is None: + return None + row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + if flatten: + for s in row["series"]: + s["filter"] = helper.old_search_payload_to_flat(s["filter"]) + return helper.dict_to_camel_case(row) + + +def get_series_for_alert(project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT series_id AS value, + metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name, + 'count' AS unit, + FALSE AS predefined, + metric_id, + series_id + FROM metric_series + INNER JOIN metrics USING (metric_id) + WHERE metrics.deleted_at ISNULL + AND metrics.project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + ORDER BY name;""", + {"project_id": project_id, "user_id": user_id} + ) + ) + rows = cur.fetchall() + return helper.list_to_camel_case(rows) diff --git a/api/chalicelib/core/dashboard.py b/api/chalicelib/core/dashboard.py index 919d6aa5a..9cd88eb6a 100644 --- a/api/chalicelib/core/dashboard.py +++ b/api/chalicelib/core/dashboard.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.core import metadata from chalicelib.utils import args_transformer from chalicelib.utils import helper, dev @@ -94,25 +95,25 @@ def __get_meta_constraint(project_id, data): else: filter_type = f["key"].upper() filter_type = [filter_type, "USER" + filter_type, filter_type[4:]] - if any(item in [sessions_metas.meta_type.USERBROWSER] \ + if any(item in [schemas.FilterType.user_browser] \ for item in filter_type): constraints.append(f"sessions.user_browser = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS] \ + elif any(item in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios] \ for item in filter_type): constraints.append(f"sessions.user_os = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS] \ + elif any(item in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios] \ for item in filter_type): constraints.append(f"sessions.user_device = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS] \ + elif any(item in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios] \ for item in filter_type): constraints.append(f"sessions.user_country = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS] \ + elif any(item in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios] \ for item in filter_type): constraints.append(f"sessions.user_id = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERANONYMOUSID, sessions_metas.meta_type.USERANONYMOUSID_IOS] \ + elif any(item in [schemas.FilterType.user_anonymous_id, schemas.FilterType.user_anonymous_id_ios] \ for item in filter_type): constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS] \ + elif any(item in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios] \ for item in filter_type): constraints.append(f"sessions.rev_id = %({f['key']}_{i})s") return constraints diff --git a/api/chalicelib/core/events.py b/api/chalicelib/core/events.py index 69213a079..0a330d625 100644 --- a/api/chalicelib/core/events.py +++ b/api/chalicelib/core/events.py @@ -1,6 +1,7 @@ -from chalicelib.utils import pg_client, helper -from chalicelib.core import sessions_metas, metadata +import schemas from chalicelib.core import issues +from chalicelib.core import sessions_metas, metadata +from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.event_filter_definition import SupportedFilter, Event @@ -235,23 +236,23 @@ def __generic_autocomplete(event: Event): class event_type: - CLICK = Event(ui_type="CLICK", table="events.clicks", column="label") - INPUT = Event(ui_type="INPUT", table="events.inputs", column="label") - LOCATION = Event(ui_type="LOCATION", table="events.pages", column="base_path") - CUSTOM = Event(ui_type="CUSTOM", table="events_common.customs", column="name") - REQUEST = Event(ui_type="REQUEST", table="events_common.requests", column="url") - GRAPHQL = Event(ui_type="GRAPHQL", table="events.graphql", column="name") - STATEACTION = Event(ui_type="STATEACTION", table="events.state_actions", column="name") - ERROR = Event(ui_type="ERROR", table="events.errors", + CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label") + INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label") + LOCATION = Event(ui_type=schemas.EventType.location, table="events.pages", column="base_path") + CUSTOM = Event(ui_type=schemas.EventType.custom, table="events_common.customs", column="name") + REQUEST = Event(ui_type=schemas.EventType.request, table="events_common.requests", column="url") + GRAPHQL = Event(ui_type=schemas.EventType.graphql, table="events.graphql", column="name") + STATEACTION = Event(ui_type=schemas.EventType.state_action, table="events.state_actions", column="name") + ERROR = Event(ui_type=schemas.EventType.error, table="events.errors", column=None) # column=None because errors are searched by name or message - METADATA = Event(ui_type="METADATA", table="public.sessions", column=None) + METADATA = Event(ui_type=schemas.EventType.metadata, table="public.sessions", column=None) # IOS - CLICK_IOS = Event(ui_type="CLICK_IOS", table="events_ios.clicks", column="label") - INPUT_IOS = Event(ui_type="INPUT_IOS", table="events_ios.inputs", column="label") - VIEW_IOS = Event(ui_type="VIEW_IOS", table="events_ios.views", column="name") - CUSTOM_IOS = Event(ui_type="CUSTOM_IOS", table="events_common.customs", column="name") - REQUEST_IOS = Event(ui_type="REQUEST_IOS", table="events_common.requests", column="url") - ERROR_IOS = Event(ui_type="ERROR_IOS", table="events_ios.crashes", + CLICK_IOS = Event(ui_type=schemas.EventType.click_ios, table="events_ios.clicks", column="label") + INPUT_IOS = Event(ui_type=schemas.EventType.input_ios, table="events_ios.inputs", column="label") + VIEW_IOS = Event(ui_type=schemas.EventType.view_ios, table="events_ios.views", column="name") + CUSTOM_IOS = Event(ui_type=schemas.EventType.custom_ios, table="events_common.customs", column="name") + REQUEST_IOS = Event(ui_type=schemas.EventType.request_ios, table="events_common.requests", column="url") + ERROR_IOS = Event(ui_type=schemas.EventType.error_ios, table="events_ios.crashes", column=None) # column=None because errors are searched by name or message @@ -389,18 +390,18 @@ def search_pg2(text, event_type, project_id, source, key): if not event_type: return {"data": __get_autocomplete_table(text, project_id)} - if event_type.upper() in SUPPORTED_TYPES.keys(): - rows = SUPPORTED_TYPES[event_type.upper()].get(project_id=project_id, value=text, key=key, source=source) - if event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys(): - rows += SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key, - source=source) - elif event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys(): - rows = SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key, - source=source) - elif event_type.upper() in sessions_metas.SUPPORTED_TYPES.keys(): + if event_type in SUPPORTED_TYPES.keys(): + rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source) + if event_type + "_IOS" in SUPPORTED_TYPES.keys(): + rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, + source=source) + elif event_type + "_IOS" in SUPPORTED_TYPES.keys(): + rows = SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, + source=source) + elif event_type in sessions_metas.SUPPORTED_TYPES.keys(): return sessions_metas.search(text, event_type, project_id) - elif event_type.upper().endswith("_IOS") \ - and event_type.upper()[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys(): + elif event_type.endswith("_IOS") \ + and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys(): return sessions_metas.search(text, event_type, project_id) else: return {"errors": ["unsupported event"]} diff --git a/api/chalicelib/core/funnels.py b/api/chalicelib/core/funnels.py index 7d0bcee7d..c33bed586 100644 --- a/api/chalicelib/core/funnels.py +++ b/api/chalicelib/core/funnels.py @@ -1,7 +1,8 @@ import json import chalicelib.utils.helper -from chalicelib.core import events, significance, sessions +import schemas +from chalicelib.core import significance, sessions from chalicelib.utils import dev from chalicelib.utils import helper, pg_client from chalicelib.utils.TimeUTC import TimeUTC @@ -11,23 +12,24 @@ REMOVE_KEYS = ["key", "_key", "startDate", "endDate"] ALLOW_UPDATE_FOR = ["name", "filter"] -def filter_stages(stages): - ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type, - events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type, - events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type, - events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ] - return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None] +# def filter_stages(stages): +# ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type, +# events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type, +# events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type, +# events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ] +# return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None] -def create(project_id, user_id, name, filter, is_public): +def create(project_id, user_id, name, filter: schemas.FunnelSearchPayloadSchema, is_public): helper.delete_keys_from_dict(filter, REMOVE_KEYS) - filter["events"] = filter_stages(stages=filter.get("events", [])) + # filter.events = filter_stages(stages=filter.events) with pg_client.PostgresClient() as cur: query = cur.mogrify("""\ INSERT INTO public.funnels (project_id, user_id, name, filter,is_public) VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s) RETURNING *;""", - {"user_id": user_id, "project_id": project_id, "name": name, "filter": json.dumps(filter), + {"user_id": user_id, "project_id": project_id, "name": name, + "filter": json.dumps(filter.dict()), "is_public": is_public}) cur.execute( @@ -40,7 +42,7 @@ def create(project_id, user_id, name, filter, is_public): return {"data": r} -def update(funnel_id, user_id, name=None, filter=None, is_public=None): +def update(funnel_id, user_id, project_id, name=None, filter=None, is_public=None): s_query = [] if filter is not None: helper.delete_keys_from_dict(filter, REMOVE_KEYS) @@ -56,9 +58,11 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None): UPDATE public.funnels SET {" , ".join(s_query)} WHERE funnel_id=%(funnel_id)s - RETURNING *;""", - {"user_id": user_id, "funnel_id": funnel_id, "name": name, - "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public}) + AND project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING *;""", {"user_id": user_id, "funnel_id": funnel_id, "name": name, + "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public, + "project_id": project_id}) # print("--------------------") # print(query) # print("--------------------") @@ -74,13 +78,12 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None): def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False): with pg_client.PostgresClient() as cur: - team_query = "" cur.execute( cur.mogrify( f"""\ - SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public + SELECT funnel_id, project_id, user_id, name, created_at, deleted_at, is_public {",filter" if details else ""} - FROM public.funnels {team_query} + FROM public.funnels WHERE project_id = %(project_id)s AND funnels.deleted_at IS NULL AND (funnels.user_id = %(user_id)s OR funnels.is_public);""", @@ -93,12 +96,14 @@ def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date for row in rows: row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"]) if details: - row["filter"]["events"] = filter_stages(row["filter"]["events"]) + # row["filter"]["events"] = filter_stages(row["filter"]["events"]) get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - counts = sessions.search2_pg(data=row["filter"], project_id=project_id, user_id=None, count_only=True) + counts = sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(row["filter"]), + project_id=project_id, user_id=None, count_only=True) row["sessionsCount"] = counts["countSessions"] row["usersCount"] = counts["countUsers"] + filter_clone = dict(row["filter"]) overview = significance.get_overview(filter_d=row["filter"], project_id=project_id) row["stages"] = overview["stages"] row.pop("filter") @@ -107,6 +112,7 @@ def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date row["criticalIssuesCount"] = overview["criticalIssuesCount"] row["missedConversions"] = 0 if len(row["stages"]) < 2 \ else row["stages"][0]["sessionsCount"] - row["stages"][-1]["sessionsCount"] + row["filter"] = helper.old_search_payload_to_flat(filter_clone) return rows @@ -135,7 +141,8 @@ def delete(project_id, funnel_id, user_id): UPDATE public.funnels SET deleted_at = timezone('utc'::text, now()) WHERE project_id = %(project_id)s - AND funnel_id = %(funnel_id)s;""", + AND funnel_id = %(funnel_id)s + AND (user_id = %(user_id)s OR is_public);""", {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id}) ) @@ -143,28 +150,29 @@ def delete(project_id, funnel_id, user_id): def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - return sessions.search2_pg(data=f["filter"], project_id=project_id, user_id=user_id) + return sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(f["filter"]), project_id=project_id, + user_id=user_id) -def get_sessions_on_the_fly(funnel_id, project_id, user_id, data): - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) +def get_sessions_on_the_fly(funnel_id, project_id, user_id, data: schemas.FunnelSearchPayloadSchema): + # data.events = filter_stages(data.events) + if len(data.events) == 0: + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id) if f is None: return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - return sessions.search2_pg(data=data, project_id=project_id, user_id=user_id) + get_start_end_time(filter_d=f["filter"], range_value=data.range_value, + start_date=data.startDate, end_date=data.endDate) + data = schemas.FunnelSearchPayloadSchema.parse_obj(f["filter"]) + return sessions.search2_pg(data=data, project_id=project_id, + user_id=user_id) -def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) +def get_top_insights(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None): + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) @@ -174,10 +182,10 @@ def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, e "totalDropDueToIssues": total_drop_due_to_issues}} -def get_top_insights_on_the_fly(funnel_id, project_id, data): - data["events"] = filter_stages(data.get("events", [])) +def get_top_insights_on_the_fly(funnel_id, user_id, project_id, data): + # data["events"] = filter_stages(data.get("events", [])) if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), @@ -191,8 +199,8 @@ def get_top_insights_on_the_fly(funnel_id, project_id, data): "totalDropDueToIssues": total_drop_due_to_issues}} -def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) +def get_issues(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None): + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) @@ -202,12 +210,12 @@ def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_dat @dev.timed -def get_issues_on_the_fly(funnel_id, project_id, data): +def get_issues_on_the_fly(funnel_id, user_id, project_id, data): first_stage = data.get("firstStage") last_stage = data.get("lastStage") - data["events"] = filter_stages(data.get("events", [])) + # data["events"] = filter_stages(data.get("events", [])) if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), @@ -220,7 +228,7 @@ def get_issues_on_the_fly(funnel_id, project_id, data): last_stage=last_stage))} -def get(funnel_id, project_id): +def get(funnel_id, project_id, user_id, flatten=True): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify( @@ -230,8 +238,9 @@ def get(funnel_id, project_id): FROM public.funnels WHERE project_id = %(project_id)s AND deleted_at IS NULL - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id} + AND funnel_id = %(funnel_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id} ) ) @@ -240,22 +249,27 @@ def get(funnel_id, project_id): return None f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"]) - f["filter"]["events"] = filter_stages(stages=f["filter"]["events"]) + # f["filter"]["events"] = filter_stages(stages=f["filter"]["events"]) + if flatten: + f["filter"] = helper.old_search_payload_to_flat(f["filter"]) return f @dev.timed -def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None): - if len(data.get("events", [])) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) +def search_by_issue(user_id, project_id, funnel_id, issue_id, data: schemas.FunnelSearchPayloadSchema, range_value=None, + start_date=None, end_date=None): + if len(data.events) == 0: + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id) if f is None: return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date), - end_date=data.get('endDate', end_date)) - data = f["filter"] + data.startDate = data.startDate if data.startDate is not None else start_date + data.endDate = data.endDate if data.endDate is not None else end_date + get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.startDate, + end_date=data.endDate) + data = schemas.FunnelSearchPayloadSchema.parse_obj(f["filter"]) - # insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id) - issues = get_issues_on_the_fly(funnel_id=funnel_id, project_id=project_id, data=data).get("issues", {}) + issues = get_issues_on_the_fly(funnel_id=funnel_id, user_id=user_id, project_id=project_id, data=data.dict()) \ + .get("issues", {}) issues = issues.get("significant", []) + issues.get("insignificant", []) issue = None for i in issues: diff --git a/api/chalicelib/core/insights.py b/api/chalicelib/core/insights.py index 79b32a4b1..08adfd3ca 100644 --- a/api/chalicelib/core/insights.py +++ b/api/chalicelib/core/insights.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.core import sessions_metas from chalicelib.utils import helper, dev from chalicelib.utils import pg_client @@ -45,7 +46,7 @@ def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp= elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): event_table = JOURNEY_TYPES[f["value"]]["table"] event_column = JOURNEY_TYPES[f["value"]]["column"] - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query_subset.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] @@ -300,7 +301,7 @@ def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] @@ -390,7 +391,7 @@ def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] @@ -477,7 +478,7 @@ def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_da if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): event_table = JOURNEY_TYPES[f["value"]]["table"] event_column = JOURNEY_TYPES[f["value"]]["column"] - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] @@ -543,7 +544,7 @@ def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), end elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] @@ -613,7 +614,7 @@ def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] @@ -674,7 +675,7 @@ def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_da elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] @@ -737,7 +738,7 @@ def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): event_table = JOURNEY_TYPES[f["value"]]["table"] event_column = JOURNEY_TYPES[f["value"]]["column"] - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] pg_sub_query.append(f"length({event_column})>2") @@ -772,7 +773,7 @@ def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTime for f in filters: if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]: period = f["value"] - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] @@ -844,7 +845,7 @@ def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTi elif f["type"] == "EVENT_VALUE": event_value = f["value"] default = False - elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: pg_sub_query.append(f"sessions.user_id = %(user_id)s") extra_values["user_id"] = f["value"] event_table = JOURNEY_TYPES[event_type]["table"] diff --git a/api/chalicelib/core/performance_event.py b/api/chalicelib/core/performance_event.py new file mode 100644 index 000000000..76633ce40 --- /dev/null +++ b/api/chalicelib/core/performance_event.py @@ -0,0 +1,15 @@ +import schemas + + +def get_col(perf: schemas.PerformanceEventType): + return { + schemas.PerformanceEventType.location_dom_complete: {"column": "dom_building_time", "extraJoin": None}, + schemas.PerformanceEventType.location_ttfb: {"column": "ttfb", "extraJoin": None}, + schemas.PerformanceEventType.location_avg_cpu_load: {"column": "avg_cpu", "extraJoin": "events.performance"}, + schemas.PerformanceEventType.location_avg_memory_usage: {"column": "avg_used_js_heap_size", + "extraJoin": "events.performance"}, + schemas.PerformanceEventType.fetch_failed: {"column": "success", "extraJoin": None}, + # schemas.PerformanceEventType.fetch_duration: {"column": "duration", "extraJoin": None}, + schemas.PerformanceEventType.location_largest_contentful_paint_time: {"column": "first_contentful_paint_time", + "extraJoin": None} + }.get(perf) diff --git a/api/chalicelib/core/saved_search.py b/api/chalicelib/core/saved_search.py new file mode 100644 index 000000000..d1e8fe15f --- /dev/null +++ b/api/chalicelib/core/saved_search.py @@ -0,0 +1,122 @@ +import json + +import schemas +from chalicelib.utils import helper, pg_client +from chalicelib.utils.TimeUTC import TimeUTC + + +def create(project_id, user_id, data: schemas.SavedSearchSchema): + with pg_client.PostgresClient() as cur: + data = data.dict() + data["filter"] = json.dumps(data["filter"]) + query = cur.mogrify("""\ + INSERT INTO public.searches (project_id, user_id, name, filter,is_public) + VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s) + RETURNING *;""", {"user_id": user_id, "project_id": project_id, **data}) + cur.execute( + query + ) + r = cur.fetchone() + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + r["filter"] = helper.old_search_payload_to_flat(r["filter"]) + r = helper.dict_to_camel_case(r) + return {"data": r} + + +def update(search_id, project_id, user_id, data: schemas.SavedSearchSchema): + with pg_client.PostgresClient() as cur: + data = data.dict() + data["filter"] = json.dumps(data["filter"]) + query = cur.mogrify(f"""\ + UPDATE public.searches + SET name = %(name)s, + filter = %(filter)s, + is_public = %(is_public)s + WHERE search_id=%(search_id)s + AND project_id= %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING *;""", {"search_id": search_id, "project_id": project_id, "user_id": user_id, **data}) + cur.execute( + query + ) + r = cur.fetchone() + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + r["filter"] = helper.old_search_payload_to_flat(r["filter"]) + r = helper.dict_to_camel_case(r) + # r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) + return r + + +def get_all(project_id, user_id, details=False): + with pg_client.PostgresClient() as cur: + print(cur.mogrify( + f"""\ + SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public + {",filter" if details else ""} + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND (user_id = %(user_id)s OR is_public);""", + {"project_id": project_id, "user_id": user_id} + )) + cur.execute( + cur.mogrify( + f"""\ + SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public + {",filter" if details else ""} + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND (user_id = %(user_id)s OR is_public);""", + {"project_id": project_id, "user_id": user_id} + ) + ) + + rows = cur.fetchall() + rows = helper.list_to_camel_case(rows) + for row in rows: + row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"]) + if details: + if isinstance(row["filter"], list) and len(row["filter"]) == 0: + row["filter"] = {} + row["filter"] = helper.old_search_payload_to_flat(row["filter"]) + return rows + + +def delete(project_id, search_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify("""\ + UPDATE public.searches + SET deleted_at = timezone('utc'::text, now()) + WHERE project_id = %(project_id)s + AND search_id = %(search_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"search_id": search_id, "project_id": project_id, "user_id": user_id}) + ) + + return {"state": "success"} + + +def get(search_id, project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT + * + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND search_id = %(search_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"search_id": search_id, "project_id": project_id, "user_id": user_id} + ) + ) + + f = helper.dict_to_camel_case(cur.fetchone()) + if f is None: + return None + + f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"]) + f["filter"] = helper.old_search_payload_to_flat(f["filter"]) + return f diff --git a/api/chalicelib/core/sessions.py b/api/chalicelib/core/sessions.py index a6d5a50e0..5f7cd23af 100644 --- a/api/chalicelib/core/sessions.py +++ b/api/chalicelib/core/sessions.py @@ -1,7 +1,7 @@ import schemas -from chalicelib.core import events, sessions_metas, metadata, events_ios, \ - sessions_mobs, issues, projects, errors, resources, assist -from chalicelib.utils import pg_client, helper, dev +from chalicelib.core import events, metadata, events_ios, \ + sessions_mobs, issues, projects, errors, resources, assist, performance_event +from chalicelib.utils import pg_client, helper, dev, metrics_helper SESSION_PROJECTION_COLS = """s.project_id, s.session_id::text AS session_id, @@ -104,30 +104,25 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_ return None -def __is_multivalue(op: schemas.SearchEventOperator): - return op in [schemas.SearchEventOperator._is_any, schemas.SearchEventOperator._on_any] - - def __get_sql_operator(op: schemas.SearchEventOperator): - op = op.lower() return { schemas.SearchEventOperator._is: "=", schemas.SearchEventOperator._is_any: "IN", schemas.SearchEventOperator._on: "=", schemas.SearchEventOperator._on_any: "IN", - schemas.SearchEventOperator._isnot: "!=", - schemas.SearchEventOperator._noton: "!=", + schemas.SearchEventOperator._is_not: "!=", + schemas.SearchEventOperator._not_on: "!=", schemas.SearchEventOperator._contains: "ILIKE", - schemas.SearchEventOperator._notcontains: "NOT ILIKE", + schemas.SearchEventOperator._not_contains: "NOT ILIKE", schemas.SearchEventOperator._starts_with: "ILIKE", schemas.SearchEventOperator._ends_with: "ILIKE", }.get(op, "=") def __is_negation_operator(op: schemas.SearchEventOperator): - return op in [schemas.SearchEventOperator._isnot, - schemas.SearchEventOperator._noton, - schemas.SearchEventOperator._notcontains] + return op in [schemas.SearchEventOperator._is_not, + schemas.SearchEventOperator._not_on, + schemas.SearchEventOperator._not_contains] def __reverse_sql_operator(op): @@ -135,8 +130,8 @@ def __reverse_sql_operator(op): def __get_sql_operator_multiple(op: schemas.SearchEventOperator): - # op == schemas.SearchEventOperator._is is for filter support - return " IN " if __is_multivalue(op) or op == schemas.SearchEventOperator._is else " NOT IN " + return " IN " if op not in [schemas.SearchEventOperator._is_not, schemas.SearchEventOperator._not_on, + schemas.SearchEventOperator._not_contains] else " NOT IN " def __get_sql_value_multiple(values): @@ -145,323 +140,33 @@ def __get_sql_value_multiple(values): return tuple(values) if isinstance(values, list) else (values,) +def _multiple_conditions(condition, values, value_key="value", is_not=False): + query = [] + for i in range(len(values)): + k = f"{value_key}_{i}" + query.append(condition.replace(value_key, k)) + return "(" + (" AND " if is_not else " OR ").join(query) + ")" + + +def _multiple_values(values, value_key="value"): + query_values = {} + for i in range(len(values)): + k = f"{value_key}_{i}" + query_values[k] = values[i] + return query_values + + +def _isAny_opreator(op: schemas.SearchEventOperator): + return op in [schemas.SearchEventOperator._on_any, schemas.SearchEventOperator._is_any] + + @dev.timed def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, favorite_only=False, errors_only=False, - error_status="ALL", - count_only=False, issue=None): - generic_args = {"startDate": data.startDate, "endDate": data.endDate, - "projectId": project_id, - "userId": user_id} + error_status="ALL", count_only=False, issue=None): + full_args, query_part, sort = search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, + user_id) + with pg_client.PostgresClient() as cur: - ss_constraints = [] - extra_constraints = [ - cur.mogrify("s.project_id = %(project_id)s", {"project_id": project_id}), - cur.mogrify("s.duration IS NOT NULL", {}) - ] - extra_from = "" - fav_only_join = "" - if favorite_only and not errors_only: - fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id" - extra_constraints.append(cur.mogrify("fs.user_id = %(userId)s", {"userId": user_id})) - events_query_part = "" - - if len(data.filters) > 0: - meta_keys = metadata.get(project_id=project_id) - meta_keys = {m["key"]: m["index"] for m in meta_keys} - for f in data.filters: - if not isinstance(f.value, list): - f.value = [f.value] - if len(f.value) == 0 or f.value[0] is None: - continue - filter_type = f.type.upper() - f.value = __get_sql_value_multiple(f.value) - if filter_type == sessions_metas.meta_type.USERBROWSER: - op = __get_sql_operator_multiple(f.operator) - extra_constraints.append(cur.mogrify(f's.user_browser {op} %(value)s', {"value": f.value})) - ss_constraints.append(cur.mogrify(f'ms.user_browser {op} %(value)s', {"value": f.value})) - - elif filter_type in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS]: - op = __get_sql_operator_multiple(f.operator) - extra_constraints.append(cur.mogrify(f's.user_os {op} %(value)s', {"value": f.value})) - ss_constraints.append(cur.mogrify(f'ms.user_os {op} %(value)s', {"value": f.value})) - - elif filter_type in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS]: - op = __get_sql_operator_multiple(f.operator) - extra_constraints.append(cur.mogrify(f's.user_device {op} %(value)s', {"value": f.value})) - ss_constraints.append(cur.mogrify(f'ms.user_device {op} %(value)s', {"value": f.value})) - - elif filter_type in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS]: - op = __get_sql_operator_multiple(f.operator) - extra_constraints.append(cur.mogrify(f's.user_country {op} %(value)s', {"value": f.value})) - ss_constraints.append(cur.mogrify(f'ms.user_country {op} %(value)s', {"value": f.value})) - elif filter_type == "duration".upper(): - if len(f.value) > 0 and f.value[0] is not None: - extra_constraints.append( - cur.mogrify("s.duration >= %(minDuration)s", {"minDuration": f.value[0]})) - ss_constraints.append( - cur.mogrify("ms.duration >= %(minDuration)s", {"minDuration": f.value[0]})) - if len(f.value) > 1 and f.value[1] is not None and f.value[1] > 0: - extra_constraints.append( - cur.mogrify("s.duration <= %(maxDuration)s", {"maxDuration": f.value[1]})) - ss_constraints.append( - cur.mogrify("ms.duration <= %(maxDuration)s", {"maxDuration": f.value[1]})) - elif filter_type == sessions_metas.meta_type.REFERRER: - # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" - extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" - op = __get_sql_operator_multiple(f.operator) - extra_constraints.append( - cur.mogrify(f"p.base_referrer {op} %(referrer)s", {"referrer": f.value})) - elif filter_type == events.event_type.METADATA.ui_type: - op = __get_sql_operator(f.operator) - if f.key in meta_keys.keys(): - extra_constraints.append( - cur.mogrify(f"s.{metadata.index_to_colname(meta_keys[f.key])} {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)})) - ss_constraints.append( - cur.mogrify(f"ms.{metadata.index_to_colname(meta_keys[f.key])} {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)})) - elif filter_type in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: - op = __get_sql_operator(f.operator) - extra_constraints.append( - cur.mogrify(f"s.user_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.user_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - elif filter_type in [sessions_metas.meta_type.USERANONYMOUSID, - sessions_metas.meta_type.USERANONYMOUSID_IOS]: - op = __get_sql_operator(f.operator) - extra_constraints.append( - cur.mogrify(f"s.user_anonymous_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.user_anonymous_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - elif filter_type in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS]: - op = __get_sql_operator(f.operator) - extra_constraints.append( - cur.mogrify(f"s.rev_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.rev_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f.value[0], op)}) - ) - - # --------------------------------------------------------------------------- - if len(data.events) > 0: - ss_constraints = [s.decode('UTF-8') for s in ss_constraints] - events_query_from = [] - event_index = 0 - - for event in data.events: - event_type = event.type.upper() - op = __get_sql_operator(event.operator) - is_not = False - if __is_negation_operator(event.operator): - is_not = True - op = __reverse_sql_operator(op) - if event_index == 0: - event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)" - event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s", - "main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s", - "ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"] - else: - event_from = "%s" - event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s", - f"event_{event_index - 1}.timestamp <= main.timestamp", - "main.session_id=event_0.session_id"] - if __is_multivalue(event.operator): - event_args = {"value": __get_sql_value_multiple(event.value)} - else: - event.value = helper.string_to_op(value=event.value, op=event.operator) - event_args = {"value": helper.string_to_sql_like_with_op(event.value, op)} - if event_type not in list(events.SUPPORTED_TYPES.keys()) \ - or event.value in [None, "", "*"] \ - and (event_type != events.event_type.ERROR.ui_type \ - or event_type != events.event_type.ERROR_IOS.ui_type): - continue - if event_type == events.event_type.CLICK.ui_type: - event_from = event_from % f"{events.event_type.CLICK.table} AS main " - event_where.append(f"main.{events.event_type.CLICK.column} {op} %(value)s") - - elif event_type == events.event_type.INPUT.ui_type: - event_from = event_from % f"{events.event_type.INPUT.table} AS main " - event_where.append(f"main.{events.event_type.INPUT.column} {op} %(value)s") - if len(event.custom) > 0: - event_where.append("main.value ILIKE %(custom)s") - event_args["custom"] = helper.string_to_sql_like_with_op(event.custom, "ILIKE") - elif event_type == events.event_type.LOCATION.ui_type: - event_from = event_from % f"{events.event_type.LOCATION.table} AS main " - event_where.append(f"main.{events.event_type.LOCATION.column} {op} %(value)s") - elif event_type == events.event_type.CUSTOM.ui_type: - event_from = event_from % f"{events.event_type.CUSTOM.table} AS main " - event_where.append(f"main.{events.event_type.CUSTOM.column} {op} %(value)s") - elif event_type == events.event_type.REQUEST.ui_type: - event_from = event_from % f"{events.event_type.REQUEST.table} AS main " - event_where.append(f"main.{events.event_type.REQUEST.column} {op} %(value)s") - elif event_type == events.event_type.GRAPHQL.ui_type: - event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main " - event_where.append(f"main.{events.event_type.GRAPHQL.column} {op} %(value)s") - elif event_type == events.event_type.STATEACTION.ui_type: - event_from = event_from % f"{events.event_type.STATEACTION.table} AS main " - event_where.append(f"main.{events.event_type.STATEACTION.column} {op} %(value)s") - elif event_type == events.event_type.ERROR.ui_type: - if event.source in [None, "*", ""]: - event.source = "js_exception" - event_from = event_from % f"{events.event_type.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)" - if event.value not in [None, "*", ""]: - event_where.append(f"(main1.message {op} %(value)s OR main1.name {op} %(value)s)") - if event.source not in [None, "*", ""]: - event_where.append(f"main1.source = %(source)s") - event_args["source"] = event.source - elif event.source not in [None, "*", ""]: - event_where.append(f"main1.source = %(source)s") - event_args["source"] = event.source - - # ----- IOS - elif event_type == events.event_type.CLICK_IOS.ui_type: - event_from = event_from % f"{events.event_type.CLICK_IOS.table} AS main " - event_where.append(f"main.{events.event_type.CLICK_IOS.column} {op} %(value)s") - - elif event_type == events.event_type.INPUT_IOS.ui_type: - event_from = event_from % f"{events.event_type.INPUT_IOS.table} AS main " - event_where.append(f"main.{events.event_type.INPUT_IOS.column} {op} %(value)s") - - if len(event.custom) > 0: - event_where.append("main.value ILIKE %(custom)s") - event_args["custom"] = helper.string_to_sql_like_with_op(event.custom, "ILIKE") - elif event_type == events.event_type.VIEW_IOS.ui_type: - event_from = event_from % f"{events.event_type.VIEW_IOS.table} AS main " - event_where.append(f"main.{events.event_type.VIEW_IOS.column} {op} %(value)s") - elif event_type == events.event_type.CUSTOM_IOS.ui_type: - event_from = event_from % f"{events.event_type.CUSTOM_IOS.table} AS main " - event_where.append(f"main.{events.event_type.CUSTOM_IOS.column} {op} %(value)s") - elif event_type == events.event_type.REQUEST_IOS.ui_type: - event_from = event_from % f"{events.event_type.REQUEST_IOS.table} AS main " - event_where.append(f"main.{events.event_type.REQUEST_IOS.column} {op} %(value)s") - elif event_type == events.event_type.ERROR_IOS.ui_type: - event_from = event_from % f"{events.event_type.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)" - if event.value not in [None, "*", ""]: - event_where.append(f"(main1.reason {op} %(value)s OR main1.name {op} %(value)s)") - - else: - continue - if event_index == 0: - event_where += ss_constraints - if is_not: - if event_index == 0: - events_query_from.append(cur.mogrify(f"""\ - (SELECT - session_id, - 0 AS timestamp, - {event_index} AS funnel_step - FROM sessions - WHERE EXISTS(SELECT session_id - FROM {event_from} - WHERE {" AND ".join(event_where)} - AND sessions.session_id=ms.session_id) IS FALSE - AND project_id = %(projectId)s - AND start_ts >= %(startDate)s - AND start_ts <= %(endDate)s - AND duration IS NOT NULL - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - else: - events_query_from.append(cur.mogrify(f"""\ - (SELECT - event_0.session_id, - event_{event_index - 1}.timestamp AS timestamp, - {event_index} AS funnel_step - WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - else: - events_query_from.append(cur.mogrify(f"""\ - (SELECT main.session_id, MIN(timestamp) AS timestamp,{event_index} AS funnel_step - FROM {event_from} - WHERE {" AND ".join(event_where)} - GROUP BY 1 - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - event_index += 1 - if event_index > 0: - events_query_part = f"""SELECT - event_0.session_id, - MIN(event_0.timestamp) AS first_event_ts, - MAX(event_{event_index - 1}.timestamp) AS last_event_ts - FROM {(" INNER JOIN LATERAL ").join(events_query_from)} - GROUP BY 1 - {fav_only_join}""" - else: - data.events = [] - - # --------------------------------------------------------------------------- - - if data.startDate is not None: - extra_constraints.append(cur.mogrify("s.start_ts >= %(startDate)s", {"startDate": data.startDate})) - else: - data.startDate = None - if data.endDate is not None: - extra_constraints.append(cur.mogrify("s.start_ts <= %(endDate)s", {"endDate": data.endDate})) - else: - data.endDate = None - - if data.platform is not None: - if data.platform == schemas.PlatformType.mobile: - extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')") - elif data.platform == schemas.PlatformType.desktop: - extra_constraints.append( - b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')") - - order = "DESC" - if data.order is not None: - order = data.order - sort = 'session_id' - if data.sort is not None and data.sort != "session_id": - sort += " " + order + "," + helper.key_to_snake_case(data.sort) - else: - sort = 'session_id' - - if errors_only: - extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)" - extra_constraints.append(b"ser.source = 'js_exception'") - if error_status != "ALL": - extra_constraints.append(cur.mogrify("ser.status = %(status)s", {"status": error_status.lower()})) - if favorite_only: - extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)" - extra_constraints.append(cur.mogrify("ufe.user_id = %(user_id)s", {"user_id": user_id})) - - extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints] - if not favorite_only and not errors_only: - extra_from += """LEFT JOIN (SELECT user_id, session_id - FROM public.user_favorite_sessions - WHERE user_id = %(userId)s) AS favorite_sessions - USING (session_id)""" - extra_join = "" - if issue is not None: - extra_join = cur.mogrify(""" - INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id) - WHERE issues.session_id=f.session_id - AND p_issues.type=%(type)s - AND p_issues.context_string=%(contextString)s - AND timestamp >= f.first_event_ts - AND timestamp <= f.last_event_ts) AS issues ON(TRUE) - """, {"contextString": issue["contextString"], "type": issue["type"]}).decode('UTF-8') - - query_part = f"""\ - FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"} - {extra_join} - {"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""} - {extra_from} - WHERE - - {" AND ".join(extra_constraints)}""" - if errors_only: main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id, ser.status, ser.parent_error_id, ser.payload, COALESCE((SELECT TRUE @@ -472,38 +177,47 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, f FROM public.user_viewed_errors AS ve WHERE er.error_id = ve.error_id AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed - {query_part};""", - generic_args) + {query_part};""", full_args) elif count_only: - main_query = cur.mogrify( - f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions, COUNT(DISTINCT s.user_uuid) AS count_users - {query_part};""", - generic_args) + main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions, + COUNT(DISTINCT s.user_uuid) AS count_users + {query_part};""", full_args) else: - main_query = cur.mogrify(f"""SELECT * FROM - (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} - {query_part} - ORDER BY s.session_id desc) AS filtred_sessions - ORDER BY favorite DESC, issue_score DESC, {sort} {order};""", - generic_args) + main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count, COALESCE(JSONB_AGG(full_sessions) FILTER (WHERE rn <= 200), '[]'::JSONB) AS sessions + FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY favorite DESC, issue_score DESC, session_id desc, start_ts desc) AS rn FROM + (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} + {query_part} + ORDER BY s.session_id desc) AS filtred_sessions + ORDER BY favorite DESC, issue_score DESC, {sort} {data.order}) AS full_sessions;""", + full_args) + + # main_query = cur.mogrify(f"""SELECT * FROM + # (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} + # {query_part} + # ORDER BY s.session_id desc) AS filtred_sessions + # ORDER BY favorite DESC, issue_score DESC, {sort} {order};""", + # full_args) # print("--------------------") # print(main_query) cur.execute(main_query) - + # print("--------------------") if count_only: return helper.dict_to_camel_case(cur.fetchone()) - sessions = [] - total = cur.rowcount - row = cur.fetchone() - limit = 200 - while row is not None and len(sessions) < limit: - if row.get("favorite"): - limit += 1 - sessions.append(row) - row = cur.fetchone() + sessions = cur.fetchone() + total = sessions["count"] + sessions = sessions["sessions"] + # sessions = [] + # total = cur.rowcount + # row = cur.fetchone() + # limit = 200 + # while row is not None and len(sessions) < limit: + # if row.get("favorite"): + # limit += 1 + # sessions.append(row) + # row = cur.fetchone() if errors_only: return sessions @@ -516,6 +230,551 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, f } +@dev.timed +def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int, + view_type: schemas.MetricViewType): + step_size = metrics_helper.__get_step_size(endTimestamp=data.endDate, startTimestamp=data.startDate, + density=density, factor=1) + full_args, query_part, sort = search_query_parts(data=data, error_status=None, errors_only=False, + favorite_only=False, issue=None, project_id=project_id, + user_id=None) + full_args["step_size"] = step_size + with pg_client.PostgresClient() as cur: + if view_type == schemas.MetricViewType.line_chart: + main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts + {query_part}) + SELECT generated_timestamp AS timestamp, + COUNT(s) AS count + FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp + LEFT JOIN LATERAL ( SELECT 1 AS s + FROM full_sessions + WHERE start_ts >= generated_timestamp + AND start_ts < generated_timestamp + %(step_size)s) AS sessions ON (TRUE) + GROUP BY generated_timestamp + ORDER BY generated_timestamp;""", full_args) + else: + main_query = cur.mogrify(f"""SELECT count(DISTINCT s.session_id) AS count + {query_part};""", full_args) + + # print("--------------------") + # print(main_query) + cur.execute(main_query) + # print("--------------------") + if view_type == schemas.MetricViewType.line_chart: + sessions = cur.fetchall() + else: + sessions = cur.fetchone()["count"] + return sessions + + +def search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, user_id): + ss_constraints = [] + full_args = {"project_id": project_id, "startDate": data.startDate, "endDate": data.endDate, + "projectId": project_id, "userId": user_id} + extra_constraints = [ + "s.project_id = %(project_id)s", + "s.duration IS NOT NULL" + ] + extra_from = "" + fav_only_join = "" + if favorite_only and not errors_only: + fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id" + extra_constraints.append("fs.user_id = %(userId)s") + full_args["userId"] = user_id + events_query_part = "" + if len(data.filters) > 0: + meta_keys = None + for i, f in enumerate(data.filters): + if not isinstance(f.value, list): + f.value = [f.value] + if len(f.value) == 0 or f.value[0] is None: + continue + filter_type = f.type + # f.value = __get_sql_value_multiple(f.value) + f.value = helper.values_for_operator(value=f.value, op=f.operator) + f_k = f"f_value{i}" + full_args = {**full_args, **_multiple_values(f.value, value_key=f_k)} + op = __get_sql_operator(f.operator) \ + if filter_type not in [schemas.FilterType.events_count] else f.operator + is_any = _isAny_opreator(f.operator) + if not is_any and len(f.value) == 0: + continue + is_not = False + if __is_negation_operator(f.operator): + is_not = True + # op = __reverse_sql_operator(op) + if filter_type == schemas.FilterType.user_browser: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.utm_source]: + if is_any: + extra_constraints.append('s.utm_source IS NOT NULL') + ss_constraints.append('ms.utm_source IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_source {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_source {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.utm_medium]: + if is_any: + extra_constraints.append('s.utm_medium IS NOT NULL') + ss_constraints.append('ms.utm_medium IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_medium {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_medium {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.utm_campaign]: + if is_any: + extra_constraints.append('s.utm_campaign IS NOT NULL') + ss_constraints.append('ms.utm_campaign IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_campaign {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_campaign {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + + elif filter_type == schemas.FilterType.duration: + if len(f.value) > 0 and f.value[0] is not None: + extra_constraints.append("s.duration >= %(minDuration)s") + ss_constraints.append("ms.duration >= %(minDuration)s") + full_args["minDuration"] = f.value[0] + if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0: + extra_constraints.append("s.duration <= %(maxDuration)s") + ss_constraints.append("ms.duration <= %(maxDuration)s") + full_args["maxDuration"] = f.value[1] + elif filter_type == schemas.FilterType.referrer: + # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" + extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type == events.event_type.METADATA.ui_type: + # get metadata list only if you need it + if meta_keys is None: + meta_keys = metadata.get(project_id=project_id) + meta_keys = {m["key"]: m["index"] for m in meta_keys} + # op = __get_sql_operator(f.operator) + if f.source in meta_keys.keys(): + extra_constraints.append( + _multiple_conditions(f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s", + f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s", + f.value, is_not=is_not, value_key=f_k)) + elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type in [schemas.FilterType.user_anonymous_id, + schemas.FilterType.user_anonymous_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_anonymous_id {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_anonymous_id {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.rev_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.rev_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type == schemas.FilterType.platform: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_device_type {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type == schemas.FilterType.issue: + extra_constraints.append( + _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"%({f_k})s {op} ANY (ms.issue_types)", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type == schemas.FilterType.events_count: + extra_constraints.append( + _multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.events_count {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + # --------------------------------------------------------------------------- + if len(data.events) > 0: + # ss_constraints = [s.decode('UTF-8') for s in ss_constraints] + events_query_from = [] + event_index = 0 + or_events = data.events_order == schemas.SearchEventOrder._or + # events_joiner = " FULL JOIN " if or_events else " INNER JOIN LATERAL " + events_joiner = " UNION " if or_events else " INNER JOIN LATERAL " + for i, event in enumerate(data.events): + event_type = event.type + is_any = _isAny_opreator(event.operator) + if not isinstance(event.value, list): + event.value = [event.value] + if not is_any and len(event.value) == 0: + continue + op = __get_sql_operator(event.operator) + is_not = False + if __is_negation_operator(event.operator): + is_not = True + op = __reverse_sql_operator(op) + if event_index == 0 or or_events: + event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)" + event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s", + "main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s", + "ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"] + else: + event_from = "%s" + event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s", + "main.session_id=event_0.session_id"] + if data.events_order == schemas.SearchEventOrder._then: + event_where.append(f"event_{event_index - 1}.timestamp <= main.timestamp") + e_k = f"e_value{i}" + if event.type != schemas.PerformanceEventType.time_between_events: + event.value = helper.values_for_operator(value=event.value, op=event.operator) + full_args = {**full_args, **_multiple_values(event.value, value_key=e_k)} + + # if event_type not in list(events.SUPPORTED_TYPES.keys()) \ + # or event.value in [None, "", "*"] \ + # and (event_type != events.event_type.ERROR.ui_type \ + # or event_type != events.event_type.ERROR_IOS.ui_type): + # continue + if event_type == events.event_type.CLICK.ui_type: + event_from = event_from % f"{events.event_type.CLICK.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CLICK.column} {op} %({e_k})s", event.value, + value_key=e_k)) + + elif event_type == events.event_type.INPUT.ui_type: + event_from = event_from % f"{events.event_type.INPUT.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.INPUT.column} {op} %({e_k})s", event.value, + value_key=e_k)) + if event.source is not None and len(event.source) > 0: + event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.source, + value_key=f"custom{i}")) + full_args = {**full_args, **_multiple_values(event.source, value_key=f"custom{i}")} + + elif event_type == events.event_type.LOCATION.ui_type: + event_from = event_from % f"{events.event_type.LOCATION.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.CUSTOM.ui_type: + event_from = event_from % f"{events.event_type.CUSTOM.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CUSTOM.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.REQUEST.ui_type: + event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.GRAPHQL.ui_type: + event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.GRAPHQL.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.STATEACTION.ui_type: + event_from = event_from % f"{events.event_type.STATEACTION.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.STATEACTION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.ERROR.ui_type: + # if event.source in [None, "*", ""]: + # event.source = "js_exception" + event_from = event_from % f"{events.event_type.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)" + if event.value not in [None, "*", ""]: + if not is_any: + event_where.append(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)") + if event.source not in [None, "*", ""]: + event_where.append(f"main1.source = %(source)s") + full_args["source"] = event.source + elif event.source not in [None, "*", ""]: + event_where.append(f"main1.source = %(source)s") + full_args["source"] = event.source + + # ----- IOS + elif event_type == events.event_type.CLICK_IOS.ui_type: + event_from = event_from % f"{events.event_type.CLICK_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CLICK_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + + elif event_type == events.event_type.INPUT_IOS.ui_type: + event_from = event_from % f"{events.event_type.INPUT_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.INPUT_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + if event.source is not None and len(event.source) > 0: + event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.source, + value_key="custom{i}")) + full_args = {**full_args, **_multiple_values(event.source, f"custom{i}")} + elif event_type == events.event_type.VIEW_IOS.ui_type: + event_from = event_from % f"{events.event_type.VIEW_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.VIEW_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.CUSTOM_IOS.ui_type: + event_from = event_from % f"{events.event_type.CUSTOM_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CUSTOM_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.REQUEST_IOS.ui_type: + event_from = event_from % f"{events.event_type.REQUEST_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.ERROR_IOS.ui_type: + event_from = event_from % f"{events.event_type.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)" + if not is_any and event.value not in [None, "*", ""]: + event_where.append( + _multiple_conditions(f"(main1.reason {op} %({e_k})s OR main1.name {op} %({e_k})s)", + event.value, value_key=e_k)) + elif event_type == schemas.PerformanceEventType.fetch_failed: + event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", + event.value, value_key=e_k)) + col = performance_event.get_col(event_type) + colname = col["column"] + event_where.append(f"main.{colname} = FALSE") + # elif event_type == schemas.PerformanceEventType.fetch_duration: + # event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + # if not is_any: + # event_where.append( + # _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", + # event.value, value_key=e_k)) + # col = performance_event.get_col(event_type) + # colname = col["column"] + # tname = "main" + # e_k += "_custom" + # full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)} + # event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " + + # _multiple_conditions(f"{tname}.{colname} {event.sourceOperator} %({e_k})s", + # event.source, value_key=e_k)) + elif event_type in [schemas.PerformanceEventType.location_dom_complete, + schemas.PerformanceEventType.location_largest_contentful_paint_time, + schemas.PerformanceEventType.location_ttfb, + schemas.PerformanceEventType.location_avg_cpu_load, + schemas.PerformanceEventType.location_avg_memory_usage + ]: + event_from = event_from % f"{events.event_type.LOCATION.table} AS main " + col = performance_event.get_col(event_type) + colname = col["column"] + tname = "main" + if col.get("extraJoin") is not None: + tname = "ej" + event_from += f" INNER JOIN {col['extraJoin']} AS {tname} USING(session_id)" + event_where += [f"{tname}.timestamp >= main.timestamp", f"{tname}.timestamp >= %(startDate)s", + f"{tname}.timestamp <= %(endDate)s"] + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + e_k += "_custom" + full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)} + + event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " + + _multiple_conditions(f"{tname}.{colname} {event.sourceOperator} %({e_k})s", + event.source, value_key=e_k)) + elif event_type == schemas.PerformanceEventType.time_between_events: + event_from = event_from % f"{getattr(events.event_type, event.value[0].type).table} AS main INNER JOIN {getattr(events.event_type, event.value[1].type).table} AS main2 USING(session_id) " + if not isinstance(event.value[0].value, list): + event.value[0].value = [event.value[0].value] + if not isinstance(event.value[1].value, list): + event.value[1].value = [event.value[1].value] + event.value[0].value = helper.values_for_operator(value=event.value[0].value, + op=event.value[0].operator) + event.value[1].value = helper.values_for_operator(value=event.value[1].value, + op=event.value[0].operator) + e_k1 = e_k + "_e1" + e_k2 = e_k + "_e2" + full_args = {**full_args, + **_multiple_values(event.value[0].value, value_key=e_k1), + **_multiple_values(event.value[1].value, value_key=e_k2)} + s_op = __get_sql_operator(event.value[0].operator) + event_where += ["main2.timestamp >= %(startDate)s", "main2.timestamp <= %(endDate)s"] + if event_index > 0 and not or_events: + event_where.append("main2.session_id=event_0.session_id") + event_where.append( + _multiple_conditions( + f"main.{getattr(events.event_type, event.value[0].type).column} {s_op} %({e_k1})s", + event.value[0].value, value_key=e_k1)) + s_op = __get_sql_operator(event.value[1].operator) + event_where.append( + _multiple_conditions( + f"main2.{getattr(events.event_type, event.value[1].type).column} {s_op} %({e_k2})s", + event.value[1].value, value_key=e_k2)) + + e_k += "_custom" + full_args = {**full_args, **_multiple_values(event.source, value_key=e_k)} + event_where.append( + _multiple_conditions(f"main2.timestamp - main.timestamp {event.sourceOperator} %({e_k})s", + event.source, value_key=e_k)) + + + else: + continue + if event_index == 0 or or_events: + event_where += ss_constraints + if is_not: + if event_index == 0 or or_events: + events_query_from.append(f"""\ + (SELECT + session_id, + 0 AS timestamp + FROM sessions + WHERE EXISTS(SELECT session_id + FROM {event_from} + WHERE {" AND ".join(event_where)} + AND sessions.session_id=ms.session_id) IS FALSE + AND project_id = %(projectId)s + AND start_ts >= %(startDate)s + AND start_ts <= %(endDate)s + AND duration IS NOT NULL + ) {"" if or_events else (f"AS event_{event_index}" + ("ON(TRUE)" if event_index > 0 else ""))}\ + """) + else: + events_query_from.append(f"""\ + (SELECT + event_0.session_id, + event_{event_index - 1}.timestamp AS timestamp + WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE + ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ + """) + else: + events_query_from.append(f"""\ + (SELECT main.session_id, MIN(main.timestamp) AS timestamp + FROM {event_from} + WHERE {" AND ".join(event_where)} + GROUP BY 1 + ) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\ + """) + event_index += 1 + if event_index > 0: + if or_events: + events_query_part = f"""SELECT + session_id, + MIN(timestamp) AS first_event_ts, + MAX(timestamp) AS last_event_ts + FROM ({events_joiner.join(events_query_from)}) AS u + GROUP BY 1 + {fav_only_join}""" + else: + events_query_part = f"""SELECT + event_0.session_id, + MIN(event_0.timestamp) AS first_event_ts, + MAX(event_{event_index - 1}.timestamp) AS last_event_ts + FROM {events_joiner.join(events_query_from)} + GROUP BY 1 + {fav_only_join}""" + else: + data.events = [] + # --------------------------------------------------------------------------- + if data.startDate is not None: + extra_constraints.append("s.start_ts >= %(startDate)s") + if data.endDate is not None: + extra_constraints.append("s.start_ts <= %(endDate)s") + # if data.platform is not None: + # if data.platform == schemas.PlatformType.mobile: + # extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')") + # elif data.platform == schemas.PlatformType.desktop: + # extra_constraints.append( + # b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')") + if data.order is None: + data.order = "DESC" + sort = 'session_id' + if data.sort is not None and data.sort != "session_id": + sort += " " + data.order + "," + helper.key_to_snake_case(data.sort) + else: + sort = 'session_id' + if errors_only: + extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)" + extra_constraints.append("ser.source = 'js_exception'") + if error_status != "ALL": + extra_constraints.append("ser.status = %(error_status)s") + full_args["status"] = error_status.lower() + if favorite_only: + extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)" + extra_constraints.append("ufe.user_id = %(user_id)s") + # extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints] + if not favorite_only and not errors_only and user_id is not None: + extra_from += """LEFT JOIN (SELECT user_id, session_id + FROM public.user_favorite_sessions + WHERE user_id = %(userId)s) AS favorite_sessions + USING (session_id)""" + extra_join = "" + if issue is not None: + extra_join = """ + INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id) + WHERE issues.session_id=f.session_id + AND p_issues.type=%(issue_type)s + AND p_issues.context_string=%(issue_contextString)s + AND timestamp >= f.first_event_ts + AND timestamp <= f.last_event_ts) AS issues ON(TRUE) + """ + full_args["issue_contextString"] = issue["contextString"] + full_args["issue_type"] = issue["type"] + query_part = f"""\ + FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"} + {extra_join} + {"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""} + {extra_from} + WHERE + {" AND ".join(extra_constraints)}""" + return full_args, query_part, sort + + def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None): if project_id is None: all_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) @@ -529,8 +788,8 @@ def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None): available_keys = metadata.get_keys_by_projects(project_ids) for i in available_keys: - available_keys[i]["user_id"] = sessions_metas.meta_type.USERID - available_keys[i]["user_anonymous_id"] = sessions_metas.meta_type.USERANONYMOUSID + available_keys[i]["user_id"] = schemas.FilterType.user_id + available_keys[i]["user_anonymous_id"] = schemas.FilterType.user_anonymous_id results = {} for i in project_ids: if m_key not in available_keys[i].values(): @@ -733,7 +992,7 @@ def get_session_ids_by_user_ids(project_id, user_ids): def delete_sessions_by_session_ids(session_ids): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: query = cur.mogrify( """\ DELETE FROM public.sessions @@ -747,7 +1006,7 @@ def delete_sessions_by_session_ids(session_ids): def delete_sessions_by_user_ids(project_id, user_ids): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: query = cur.mogrify( """\ DELETE FROM public.sessions @@ -761,6 +1020,6 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") return row.get("count", 0) diff --git a/api/chalicelib/core/sessions_metas.py b/api/chalicelib/core/sessions_metas.py index a21b78783..1d342d03f 100644 --- a/api/chalicelib/core/sessions_metas.py +++ b/api/chalicelib/core/sessions_metas.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.utils import pg_client, helper from chalicelib.utils.event_filter_definition import SupportedFilter @@ -8,40 +9,47 @@ def get_key_values(project_id): cur.mogrify( f"""\ SELECT ARRAY_AGG(DISTINCT s.user_os - ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {meta_type.USEROS}, + ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_os}, ARRAY_AGG(DISTINCT s.user_browser ORDER BY s.user_browser) - FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {meta_type.USERBROWSER}, + FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_browser}, ARRAY_AGG(DISTINCT s.user_device ORDER BY s.user_device) - FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {meta_type.USERDEVICE}, + FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {schemas.FilterType.user_device}, ARRAY_AGG(DISTINCT s.user_country ORDER BY s.user_country) - FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {meta_type.USERCOUNTRY}, + FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {schemas.FilterType.user_country}, ARRAY_AGG(DISTINCT s.user_id - ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {meta_type.USERID}, + ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {schemas.FilterType.user_id}, ARRAY_AGG(DISTINCT s.user_anonymous_id - ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {meta_type.USERANONYMOUSID}, + ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {schemas.FilterType.user_anonymous_id}, ARRAY_AGG(DISTINCT s.rev_id - ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {meta_type.REVID}, + ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {schemas.FilterType.rev_id}, ARRAY_AGG(DISTINCT p.referrer ORDER BY p.referrer) - FILTER ( WHERE p.referrer != '' ) AS {meta_type.REFERRER}, + FILTER ( WHERE p.referrer != '' ) AS {schemas.FilterType.referrer}, + + ARRAY_AGG(DISTINCT s.utm_source + ORDER BY s.utm_source) FILTER ( WHERE s.utm_source IS NOT NULL AND s.utm_source != 'none' AND s.utm_source != '') AS {schemas.FilterType.utm_source}, + ARRAY_AGG(DISTINCT s.utm_medium + ORDER BY s.utm_medium) FILTER ( WHERE s.utm_medium IS NOT NULL AND s.utm_medium != 'none' AND s.utm_medium != '') AS {schemas.FilterType.utm_medium}, + ARRAY_AGG(DISTINCT s.utm_campaign + ORDER BY s.utm_campaign) FILTER ( WHERE s.utm_campaign IS NOT NULL AND s.utm_campaign != 'none' AND s.utm_campaign != '') AS {schemas.FilterType.utm_campaign}, ARRAY_AGG(DISTINCT s.user_os - ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {meta_type.USEROS_IOS}, + ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {schemas.FilterType.user_os_ios}, ARRAY_AGG(DISTINCT s.user_device ORDER BY s.user_device) - FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {meta_type.USERDEVICE}, + FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {schemas.FilterType.user_device_ios}, ARRAY_AGG(DISTINCT s.user_country ORDER BY s.user_country) - FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {meta_type.USERCOUNTRY_IOS}, + FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {schemas.FilterType.user_country_ios}, ARRAY_AGG(DISTINCT s.user_id - ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {meta_type.USERID_IOS}, + ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {schemas.FilterType.user_id_ios}, ARRAY_AGG(DISTINCT s.user_anonymous_id - ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {meta_type.USERANONYMOUSID_IOS}, + ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {schemas.FilterType.user_anonymous_id_ios}, ARRAY_AGG(DISTINCT s.rev_id - ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {meta_type.REVID_IOS} + ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {schemas.FilterType.rev_id_ios} FROM public.sessions AS s LEFT JOIN events.pages AS p USING (session_id) WHERE s.project_id = %(site_id)s;""", @@ -108,119 +116,137 @@ def __generic_autocomplete(typename): return f -class meta_type: - USEROS = "USEROS" - USERBROWSER = "USERBROWSER" - USERDEVICE = "USERDEVICE" - USERCOUNTRY = "USERCOUNTRY" - USERID = "USERID" - USERANONYMOUSID = "USERANONYMOUSID" - REFERRER = "REFERRER" - REVID = "REVID" - # IOS - USEROS_IOS = "USEROS_IOS" - USERDEVICE_IOS = "USERDEVICE_IOS" - USERCOUNTRY_IOS = "USERCOUNTRY_IOS" - USERID_IOS = "USERID_IOS" - USERANONYMOUSID_IOS = "USERANONYMOUSID_IOS" - REVID_IOS = "REVID_IOS" - - SUPPORTED_TYPES = { - meta_type.USEROS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS), - query=__generic_query(typename=meta_type.USEROS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERBROWSER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERBROWSER), - query=__generic_query(typename=meta_type.USERBROWSER), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERDEVICE: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE), - query=__generic_query(typename=meta_type.USERDEVICE), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.USERCOUNTRY: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY), - query=__generic_query(typename=meta_type.USERCOUNTRY), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID), - query=__generic_query(typename=meta_type.USERID), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERANONYMOUSID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID), - query=__generic_query(typename=meta_type.USERANONYMOUSID), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.REVID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID), - query=__generic_query(typename=meta_type.REVID), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.REFERRER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REFERRER), - query=__generic_query(typename=meta_type.REFERRER), - value_limit=5, - starts_with="/", - starts_limit=5, - ignore_if_starts_with=[]), + schemas.FilterType.user_os: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_os), + query=__generic_query(typename=schemas.FilterType.user_os), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_browser: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_browser), + query=__generic_query(typename=schemas.FilterType.user_browser), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_device: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_device), + query=__generic_query(typename=schemas.FilterType.user_device), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_country: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_country), + query=__generic_query(typename=schemas.FilterType.user_country), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_id), + query=__generic_query(typename=schemas.FilterType.user_id), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_anonymous_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id), + query=__generic_query(typename=schemas.FilterType.user_anonymous_id), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.rev_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.rev_id), + query=__generic_query(typename=schemas.FilterType.rev_id), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.referrer: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.referrer), + query=__generic_query(typename=schemas.FilterType.referrer), + value_limit=5, + starts_with="/", + starts_limit=5, + ignore_if_starts_with=[]), + schemas.FilterType.utm_campaign: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_campaign), + query=__generic_query(typename=schemas.FilterType.utm_campaign), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.utm_medium: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_medium), + query=__generic_query(typename=schemas.FilterType.utm_medium), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.utm_source: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_source), + query=__generic_query(typename=schemas.FilterType.utm_source), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), # IOS - meta_type.USEROS_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS_IOS), - query=__generic_query(typename=meta_type.USEROS_IOS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERDEVICE_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE_IOS), - query=__generic_query(typename=meta_type.USERDEVICE_IOS), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.USERCOUNTRY_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY_IOS), - query=__generic_query(typename=meta_type.USERCOUNTRY_IOS), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID_IOS), - query=__generic_query(typename=meta_type.USERID_IOS), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERANONYMOUSID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID_IOS), - query=__generic_query(typename=meta_type.USERANONYMOUSID_IOS), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.REVID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID_IOS), - query=__generic_query(typename=meta_type.REVID_IOS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), + schemas.FilterType.user_os_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_os_ios), + query=__generic_query(typename=schemas.FilterType.user_os_ios), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_device_ios: SupportedFilter( + get=__generic_autocomplete( + typename=schemas.FilterType.user_device_ios), + query=__generic_query(typename=schemas.FilterType.user_device_ios), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_country_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_country_ios), + query=__generic_query(typename=schemas.FilterType.user_country_ios), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_id_ios), + query=__generic_query(typename=schemas.FilterType.user_id_ios), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_anonymous_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id_ios), + query=__generic_query(typename=schemas.FilterType.user_anonymous_id_ios), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.rev_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.rev_id_ios), + query=__generic_query(typename=schemas.FilterType.rev_id_ios), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), } def search(text, meta_type, project_id): rows = [] - if meta_type.upper() not in list(SUPPORTED_TYPES.keys()): + if meta_type not in list(SUPPORTED_TYPES.keys()): return {"errors": ["unsupported type"]} - rows += SUPPORTED_TYPES[meta_type.upper()].get(project_id=project_id, text=text) - if meta_type.upper() + "_IOS" in list(SUPPORTED_TYPES.keys()): - rows += SUPPORTED_TYPES[meta_type.upper() + "_IOS"].get(project_id=project_id, text=text) + rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text) + if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()): + rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text) return {"data": rows} diff --git a/api/chalicelib/core/significance.py b/api/chalicelib/core/significance.py index 8bcda04a7..2580a7584 100644 --- a/api/chalicelib/core/significance.py +++ b/api/chalicelib/core/significance.py @@ -1,7 +1,8 @@ __author__ = "AZNAUROV David" __maintainer__ = "KRAIEM Taha Yassine" -from chalicelib.core import events, sessions_metas, metadata, sessions +import schemas +from chalicelib.core import events, metadata, sessions from chalicelib.utils import dev """ @@ -30,87 +31,109 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: :param filter_d: dict contains events&filters&... :return: """ - stages = filter_d["events"] - filters = filter_d.get("filters", []) + stages: [dict] = filter_d["events"] + filters: [dict] = filter_d.get("filters", []) filter_issues = filter_d.get("issueTypes") if filter_issues is None or len(filter_issues) == 0: filter_issues = [] stage_constraints = ["main.timestamp <= %(endTimestamp)s"] first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s", "s.start_ts <= %(endTimestamp)s"] - extra_from = "" + filter_extra_from = [] n_stages_query = [] values = {} if len(filters) > 0: - meta_keys = metadata.get(project_id=project_id) - meta_keys = {m["key"]: m["index"] for m in meta_keys} + meta_keys = None for i, f in enumerate(filters): - if not isinstance(f.get("value"), list): - if isinstance(f.get("value"), tuple): - f["value"] = list(f.get("value")) - else: - f["value"] = [f.get("value")] - if len(f["value"]) == 0 or f["value"][0] is None: + if not isinstance(f["value"], list): + f.value = [f["value"]] + if len(f["value"]) == 0 or f["value"] is None: continue - filter_type = f["type"].upper() - values[f"f_value_{i}"] = sessions.__get_sql_value_multiple(f["value"]) - if filter_type == sessions_metas.meta_type.USERBROWSER: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_browser {op} %({f"f_value_{i}"})s') + f["value"] = helper.values_for_operator(value=f["value"], op=f["operator"]) + # filter_args = _multiple_values(f["value"]) + op = sessions.__get_sql_operator(f["operator"]) - elif filter_type in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_os {op} %({f"f_value_{i}"})s') + filter_type = f["type"] + # values[f_k] = sessions.__get_sql_value_multiple(f["value"]) + f_k = f"f_value{i}" + values = {**values, + **sessions._multiple_values(helper.values_for_operator(value=f["value"], op=f["operator"]), + value_key=f_k)} + if filter_type == schemas.FilterType.user_browser: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_browser {op} %({f_k})s', f["value"], value_key=f_k)) - elif filter_type in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_device {op} %({f"f_value_{i}"})s') + elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_os {op} %({f_k})s', f["value"], value_key=f_k)) - elif filter_type in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_country {op} %({f"f_value_{i}"})s') - elif filter_type == "duration".upper(): + elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_device {op} %({f_k})s', f["value"], value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_country {op} %({f_k})s', f["value"], value_key=f_k)) + elif filter_type == schemas.FilterType.duration: if len(f["value"]) > 0 and f["value"][0] is not None: - first_stage_extra_constraints.append(f's.duration >= %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = f["value"][0] - if len(f["value"]) > 1 and f["value"][1] is not None and f["value"][1] > 0: - first_stage_extra_constraints.append('s.duration <= %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = f["value"][1] - elif filter_type == sessions_metas.meta_type.REFERRER: + first_stage_extra_constraints.append(f's.duration >= %(minDuration)s') + values["minDuration"] = f["value"][0] + if len(f["value"]) > 1 and f["value"][1] is not None and int(f["value"][1]) > 0: + first_stage_extra_constraints.append('s.duration <= %(maxDuration)s') + values["maxDuration"] = f["value"][1] + elif filter_type == schemas.FilterType.referrer: # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" - extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f"p.base_referrer {op} %(referrer)s") + filter_extra_from = [f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"] + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f"p.base_referrer {op} %({f_k})s", f["value"], value_key=f_k)) elif filter_type == events.event_type.METADATA.ui_type: - op = sessions.__get_sql_operator(f["operator"]) + if meta_keys is None: + meta_keys = metadata.get(project_id=project_id) + meta_keys = {m["key"]: m["index"] for m in meta_keys} + # op = sessions.__get_sql_operator(f["operator"]) if f.get("key") in meta_keys.keys(): first_stage_extra_constraints.append( - f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.user_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.USERANONYMOUSID, - sessions_metas.meta_type.USERANONYMOUSID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.user_anonymous_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.rev_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) + sessions._multiple_conditions( + f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f_k})s', f["value"], + value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.user_anonymous_id, + schemas.FilterType.user_anonymous_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_anonymous_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.rev_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) for i, s in enumerate(stages): if i == 0: - extra_from = ["INNER JOIN public.sessions AS s USING (session_id)"] + extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"] else: extra_from = [] if s.get("operator") is None: s["operator"] = "is" + + if not isinstance(s["value"], list): + s["value"] = [s["value"]] + is_any = sessions._isAny_opreator(s["operator"]) + if not is_any and isinstance(s["value"], list) and len(s["value"]) == 0: + continue op = sessions.__get_sql_operator(s["operator"]) event_type = s["type"].upper() - next_label = s["value"] if event_type == events.event_type.CLICK.ui_type: next_table = events.event_type.CLICK.table next_col_name = events.event_type.CLICK.column @@ -140,7 +163,8 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: print("=================UNDEFINED") continue - values[f"value{i + 1}"] = helper.string_to_sql_like_with_op(next_label, op) + values = {**values, **sessions._multiple_values(helper.values_for_operator(value=s["value"], op=s["operator"]), + value_key=f"value{i + 1}")} if sessions.__is_negation_operator(op) and i > 0: op = sessions.__reverse_sql_operator(op) main_condition = "left_not.session_id ISNULL" @@ -150,7 +174,11 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: AND s_main.timestamp >= T{i}.stage{i}_timestamp AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""") else: - main_condition = f"""main.{next_col_name} {op} %(value{i + 1})s""" + if is_any: + main_condition = "TRUE" + else: + main_condition = sessions._multiple_conditions(f"main.{next_col_name} {op} %(value{i + 1})s", + values=s["value"], value_key=f"value{i + 1}") n_stages_query.append(f""" (SELECT main.session_id, {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp, @@ -535,7 +563,8 @@ def get_top_insights(filter_d, project_id): "dropDueToIssues": 0 }] - counts = sessions.search2_pg(data=filter_d, project_id=project_id, user_id=None, count_only=True) + counts = sessions.search2_pg(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id, + user_id=None, count_only=True) output[0]["sessionsCount"] = counts["countSessions"] output[0]["usersCount"] = counts["countUsers"] return output, 0 @@ -590,6 +619,15 @@ def get_overview(filter_d, project_id, first_stage=None, last_stage=None): # The result of the multi-stage query rows = get_stages_and_events(filter_d=filter_d, project_id=project_id) if len(rows) == 0: + # PS: not sure what to return if rows are empty + output["stages"] = [{ + "type": stages[0]["type"], + "value": stages[0]["value"], + "sessionsCount": None, + "dropPercentage": None, + "usersCount": None + }] + output['criticalIssuesCount'] = 0 return output # Obtain the first part of the output stages_list = get_stages(stages, rows) diff --git a/api/chalicelib/core/webhook.py b/api/chalicelib/core/webhook.py index 653a2b513..d0b3e2adc 100644 --- a/api/chalicelib/core/webhook.py +++ b/api/chalicelib/core/webhook.py @@ -1,6 +1,9 @@ +import logging + +import requests + from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC -import requests def get_by_id(webhook_id): @@ -76,12 +79,6 @@ def update(tenant_id, webhook_id, changes, replace_none=False): allow_update = ["name", "index", "authHeader", "endpoint"] with pg_client.PostgresClient() as cur: sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys() if k in allow_update] - print(cur.mogrify(f"""\ - UPDATE public.webhooks - SET {','.join(sub_query)} - WHERE webhook_id =%(id)s AND deleted_at ISNULL - RETURNING webhook_id AS integration_id, webhook_id AS id,*;""", - {"id": webhook_id, **changes})) cur.execute( cur.mogrify(f"""\ UPDATE public.webhooks @@ -150,28 +147,24 @@ def trigger_batch(data_list): for w in data_list: if w["destination"] not in webhooks_map: webhooks_map[w["destination"]] = get_by_id(webhook_id=w["destination"]) - __trigger(hook=webhooks_map[w["destination"]], data=w["data"]) + if webhooks_map[w["destination"]] is None: + logging.error(f"!!Error webhook not found: webhook_id={w['destination']}") + else: + __trigger(hook=webhooks_map[w["destination"]], data=w["data"]) def __trigger(hook, data): - if hook["type"] == 'webhook': + if hook is not None and hook["type"] == 'webhook': headers = {} if hook["authHeader"] is not None and len(hook["authHeader"]) > 0: headers = {"Authorization": hook["authHeader"]} - # body = { - # "webhookId": hook["id"], - # "createdAt": TimeUTC.now(), - # "event": event, - # "data": data - # } - r = requests.post(url=hook["endpoint"], json=data, headers=headers) if r.status_code != 200: - print("=======> webhook: something went wrong") - print(r) - print(r.status_code) - print(r.text) + logging.error("=======> webhook: something went wrong") + logging.error(r) + logging.error(r.status_code) + logging.error(r.text) return response = None try: @@ -180,5 +173,5 @@ def __trigger(hook, data): try: response = r.text except: - print("no response found") + logging.info("no response found") return response diff --git a/api/chalicelib/core/weekly_report.py b/api/chalicelib/core/weekly_report.py index ecdd24f34..3d857ccc0 100644 --- a/api/chalicelib/core/weekly_report.py +++ b/api/chalicelib/core/weekly_report.py @@ -29,7 +29,7 @@ def edit_config(user_id, weekly_report): def cron(): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: params = {"3_days_ago": TimeUTC.midnight(delta_days=-3), "1_week_ago": TimeUTC.midnight(delta_days=-7), "2_week_ago": TimeUTC.midnight(delta_days=-14), diff --git a/api/chalicelib/utils/helper.py b/api/chalicelib/utils/helper.py index b8b571d03..6887fa5da 100644 --- a/api/chalicelib/utils/helper.py +++ b/api/chalicelib/utils/helper.py @@ -1,6 +1,7 @@ import random import re import string +from typing import Union import math import requests @@ -168,39 +169,56 @@ def string_to_sql_like(value): def string_to_sql_like_with_op(value, op): - if isinstance(value, list) and len(value) > 0: - _value = value[0] + if isinstance(value, list): + r = [] + for v in value: + r.append(string_to_sql_like_with_op(v, op)) + return r else: _value = value - if _value is None: - return _value - if op.upper() != 'ILIKE': + if _value is None: + return _value + if op.upper() != 'ILIKE': + return _value.replace("%", "%%") + _value = _value.replace("*", "%") + if _value.startswith("^"): + _value = _value[1:] + elif not _value.startswith("%"): + _value = '%' + _value + + if _value.endswith("$"): + _value = _value[:-1] + elif not _value.endswith("%"): + _value = _value + '%' return _value.replace("%", "%%") - _value = _value.replace("*", "%") - if _value.startswith("^"): - _value = _value[1:] - elif not _value.startswith("%"): - _value = '%' + _value - - if _value.endswith("$"): - _value = _value[:-1] - elif not _value.endswith("%"): - _value = _value + '%' - return _value.replace("%", "%%") -def string_to_op(value: str, op: schemas.SearchEventOperator): - if isinstance(value, list) and len(value) > 0: - _value = value[0] +likable_operators = [schemas.SearchEventOperator._starts_with, schemas.SearchEventOperator._ends_with, + schemas.SearchEventOperator._contains, schemas.SearchEventOperator._not_contains] + + +def is_likable(op: schemas.SearchEventOperator): + return op in likable_operators + + +def values_for_operator(value: Union[str, list], op: schemas.SearchEventOperator): + if not is_likable(op): + return value + if isinstance(value, list): + r = [] + for v in value: + r.append(values_for_operator(v, op)) + return r else: - _value = value - if _value is None: - return _value - if op == schemas.SearchEventOperator._starts_with: - _value = '^' + _value - elif op == schemas.SearchEventOperator._ends_with: - _value = _value + '$' - return _value + if value is None: + return value + if op == schemas.SearchEventOperator._starts_with: + return value + '%' + elif op == schemas.SearchEventOperator._ends_with: + return '%' + value + elif op == schemas.SearchEventOperator._contains: + return '%' + value + '%' + return value def is_valid_email(email): @@ -348,3 +366,14 @@ def has_smtp(): def get_edition(): return "ee" if "ee" in config("ENTERPRISE_BUILD", default="").lower() else "foss" + + +def old_search_payload_to_flat(values): + # in case the old search body was passed + if values.get("events") is not None: + for v in values["events"]: + v["isEvent"] = True + for v in values.get("filters", []): + v["isEvent"] = False + values["filters"] = values.pop("events") + values.get("filters", []) + return values diff --git a/api/chalicelib/utils/html/alert_notification.html b/api/chalicelib/utils/html/alert_notification.html index 881d6ffb0..2d63341f3 100644 --- a/api/chalicelib/utils/html/alert_notification.html +++ b/api/chalicelib/utils/html/alert_notification.html @@ -38,7 +38,7 @@

- Sent with ♡ from OpenReplay © 2021 - All rights reserved.

+ Sent with ♡ from OpenReplay © 2022 - All rights reserved.

https://openreplay.com/

diff --git a/api/chalicelib/utils/pg_client.py b/api/chalicelib/utils/pg_client.py index 8fb869367..c598d8971 100644 --- a/api/chalicelib/utils/pg_client.py +++ b/api/chalicelib/utils/pg_client.py @@ -1,15 +1,17 @@ +from threading import Semaphore + import psycopg2 import psycopg2.extras from decouple import config +from psycopg2 import pool PG_CONFIG = {"host": config("pg_host"), "database": config("pg_dbname"), "user": config("pg_user"), "password": config("pg_password"), "port": config("pg_port", cast=int)} - -from psycopg2 import pool -from threading import Semaphore +if config("pg_timeout", cast=int, default=0) > 0: + PG_CONFIG["options"] = f"-c statement_timeout={config('pg_timeout', cast=int) * 1000}" class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool): @@ -19,28 +21,51 @@ class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool): def getconn(self, *args, **kwargs): self._semaphore.acquire() - return super().getconn(*args, **kwargs) + try: + return super().getconn(*args, **kwargs) + except psycopg2.pool.PoolError as e: + if str(e) == "connection pool is closed": + make_pool() + raise e def putconn(self, *args, **kwargs): super().putconn(*args, **kwargs) self._semaphore.release() -try: - postgreSQL_pool = ORThreadedConnectionPool(50, 100, **PG_CONFIG) - if (postgreSQL_pool): - print("Connection pool created successfully") -except (Exception, psycopg2.DatabaseError) as error: - print("Error while connecting to PostgreSQL", error) - raise error +postgreSQL_pool: ORThreadedConnectionPool = None + + +def make_pool(): + global postgreSQL_pool + if postgreSQL_pool is not None: + try: + postgreSQL_pool.closeall() + except (Exception, psycopg2.DatabaseError) as error: + print("Error while closing all connexions to PostgreSQL", error) + try: + postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20), 100, **PG_CONFIG) + if (postgreSQL_pool): + print("Connection pool created successfully") + except (Exception, psycopg2.DatabaseError) as error: + print("Error while connecting to PostgreSQL", error) + raise error + + +make_pool() class PostgresClient: connection = None cursor = None + long_query = False - def __init__(self): - self.connection = postgreSQL_pool.getconn() + def __init__(self, long_query=False): + self.long_query = long_query + if long_query: + self.connection = psycopg2.connect(**PG_CONFIG) + else: + self.connection = postgreSQL_pool.getconn() def __enter__(self): if self.cursor is None: @@ -51,11 +76,18 @@ class PostgresClient: try: self.connection.commit() self.cursor.close() + if self.long_query: + self.connection.close() except Exception as error: print("Error while committing/closing PG-connection", error) - raise error + if str(error) == "connection already closed": + print("Recreating the connexion pool") + make_pool() + else: + raise error finally: - postgreSQL_pool.putconn(self.connection) + if not self.long_query: + postgreSQL_pool.putconn(self.connection) def close(): diff --git a/api/entrypoint.sh b/api/entrypoint.sh index 60fefb5c0..a092737be 100755 --- a/api/entrypoint.sh +++ b/api/entrypoint.sh @@ -1,2 +1,2 @@ #!/bin/bash -uvicorn app:app --host 0.0.0.0 +uvicorn app:app --host 0.0.0.0 --reload diff --git a/api/routers/core.py b/api/routers/core.py index 0400bbfac..81ed07cca 100644 --- a/api/routers/core.py +++ b/api/routers/core.py @@ -1,4 +1,4 @@ -from typing import Union, Optional +from typing import Union from decouple import config from fastapi import Depends, Body @@ -10,7 +10,8 @@ from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assig log_tool_stackdriver, reset_password, sessions_favorite_viewed, \ log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \ log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \ - assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, slack, users + assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \ + custom_metrics, saved_search from chalicelib.core.collaboration_slack import Slack from chalicelib.utils import email_helper from chalicelib.utils.TimeUTC import TimeUTC @@ -98,17 +99,16 @@ def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schem @app.get('/{projectId}/events/search', tags=["events"]) -def events_search(projectId: int, q: str, type: str = None, key: str = None, source: str = None, - context: schemas.CurrentContext = Depends(OR_context)): +def events_search(projectId: int, q: str, type: Union[schemas.FilterType, schemas.EventType] = None, key: str = None, + source: str = None, context: schemas.CurrentContext = Depends(OR_context)): if len(q) == 0: return {"data": []} - result = events.search_pg2(text=q, event_type=type, project_id=projectId, source=source, - key=key) + result = events.search_pg2(text=q, event_type=type, project_id=projectId, source=source, key=key) return result @app.post('/{projectId}/sessions/search2', tags=["sessions"]) -def sessions_search2(projectId: int, data: schemas.SessionsSearchPayloadSchema = Body(...), +def sessions_search2(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): data = sessions.search2_pg(data, projectId, user_id=context.user_id) return {'data': data} @@ -613,7 +613,7 @@ def errors_merge(context: schemas.CurrentContext = Depends(OR_context)): @app.put('/{projectId}/alerts', tags=["alerts"]) def create_alert(projectId: int, data: schemas.AlertSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - return alerts.create(projectId, data.dict()) + return alerts.create(projectId, data) @app.get('/{projectId}/alerts', tags=["alerts"]) @@ -621,6 +621,12 @@ def get_all_alerts(projectId: int, context: schemas.CurrentContext = Depends(OR_ return {"data": alerts.get_all(projectId)} +@app.get('/{projectId}/alerts/triggers', tags=["alerts", "customMetrics"]) +def get_alerts_triggers(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": alerts.get_predefined_values() \ + + custom_metrics.get_series_for_alert(project_id=projectId, user_id=context.user_id)} + + @app.get('/{projectId}/alerts/{alertId}', tags=["alerts"]) def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = Depends(OR_context)): return {"data": alerts.get(alertId)} @@ -630,7 +636,7 @@ def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = De @app.put('/{projectId}/alerts/{alertId}', tags=["alerts"]) def update_alert(projectId: int, alertId: int, data: schemas.AlertSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - return alerts.update(alertId, data.dict()) + return alerts.update(alertId, data) @app.delete('/{projectId}/alerts/{alertId}', tags=["alerts"]) @@ -645,7 +651,7 @@ def add_funnel(projectId: int, data: schemas.FunnelSchema = Body(...), return funnels.create(project_id=projectId, user_id=context.user_id, name=data.name, - filter=data.filter.dict(), + filter=data.filter, is_public=data.is_public) @@ -678,32 +684,31 @@ def get_possible_issue_types(projectId: int, context: schemas.CurrentContext = D @app.get('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) def get_funnel_insights(projectId: int, funnelId: int, rangeValue: str = None, startDate: int = None, endDate: int = None, context: schemas.CurrentContext = Depends(OR_context)): - return funnels.get_top_insights(funnel_id=funnelId, project_id=projectId, - range_value=rangeValue, - start_date=startDate, - end_date=endDate) + return funnels.get_top_insights(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + range_value=rangeValue, start_date=startDate, end_date=endDate) @app.post('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) @app.put('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) def get_funnel_insights_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelInsightsPayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict()) + return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + data=data.dict()) @app.get('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) def get_funnel_issues(projectId: int, funnelId, rangeValue: str = None, startDate: int = None, endDate: int = None, context: schemas.CurrentContext = Depends(OR_context)): - return funnels.get_issues(funnel_id=funnelId, project_id=projectId, - range_value=rangeValue, - start_date=startDate, end_date=endDate) + return funnels.get_issues(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + range_value=rangeValue, start_date=startDate, end_date=endDate) @app.post('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) @app.put('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) def get_funnel_issues_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict())} + return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + data=data.dict())} @app.get('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"]) @@ -720,7 +725,7 @@ def get_funnel_sessions(projectId: int, funnelId: int, rangeValue: str = None, s def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): return {"data": funnels.get_sessions_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, - data=data.dict())} + data=data)} @app.get('/{projectId}/funnels/issues/{issueId}/sessions', tags=["funnels"]) @@ -740,7 +745,7 @@ def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str, data: schemas.FunnelSearchPayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): data = funnels.search_by_issue(project_id=projectId, user_id=context.user_id, issue_id=issueId, - funnel_id=funnelId, data=data.dict()) + funnel_id=funnelId, data=data) if "errors" in data: return data if data.get("issue") is None: @@ -752,7 +757,7 @@ def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str, @app.get('/{projectId}/funnels/{funnelId}', tags=["funnels"]) def get_funnel(projectId: int, funnelId: int, context: schemas.CurrentContext = Depends(OR_context)): - data = funnels.get(funnel_id=funnelId, project_id=projectId) + data = funnels.get(funnel_id=funnelId, project_id=projectId, user_id=context.user_id) if data is None: return {"errors": ["funnel not found"]} return {"data": data} @@ -766,7 +771,8 @@ def edit_funnel(projectId: int, funnelId: int, data: schemas.UpdateFunnelSchema user_id=context.user_id, name=data.name, filter=data.filter.dict(), - is_public=data.is_public) + is_public=data.is_public, + project_id=projectId) @app.delete('/{projectId}/funnels/{funnelId}', tags=["funnels"]) @@ -838,13 +844,6 @@ def signup_handler(data: schemas.UserSignupSchema = Body(...)): return signup.create_step1(data) -@app.get('/projects', tags=['projects']) -def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)): - return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True, - stack_integrations=True, version=True, - last_tracker_version=last_tracker_version)} - - @app.post('/projects', tags=['projects']) @app.put('/projects', tags=['projects']) def create_project(data: schemas.CreateProjectSchema = Body(...), @@ -864,18 +863,6 @@ def delete_project(projectId, context: schemas.CurrentContext = Depends(OR_conte return projects.delete(tenant_id=context.tenant_id, user_id=context.user_id, project_id=projectId) -@app.get('/client', tags=['projects']) -def get_client(context: schemas.CurrentContext = Depends(OR_context)): - r = tenants.get_by_tenant_id(context.tenant_id) - if r is not None: - r.pop("createdAt") - r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True, - stack_integrations=True, version=True) - return { - 'data': r - } - - @app.get('/client/new_api_key', tags=['client']) def generate_new_tenant_token(context: schemas.CurrentContext = Depends(OR_context)): return { @@ -953,19 +940,6 @@ def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDa return {"errors": ["undefined action"]} -@public_app.post('/async/alerts/notifications/{step}', tags=["async", "alerts"]) -@public_app.put('/async/alerts/notifications/{step}', tags=["async", "alerts"]) -def send_alerts_notification_async(step: str, data: schemas.AlertNotificationSchema = Body(...)): - if data.auth != config("async_Token"): - return {"errors": ["missing auth"]} - if step == "slack": - slack.send_batch(notifications_list=data.notifications) - elif step == "email": - alerts.send_by_email_batch(notifications_list=data.notifications) - elif step == "webhook": - webhook.trigger_batch(data_list=data.notifications) - - @app.get('/notifications', tags=['notifications']) def get_notifications(context: schemas.CurrentContext = Depends(OR_context)): return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)} @@ -1087,3 +1061,84 @@ def change_client_password(data: schemas.EditUserPasswordSchema = Body(...), return users.change_password(email=context.email, old_password=data.old_password, new_password=data.new_password, tenant_id=context.tenant_id, user_id=context.user_id) + + +@app.post('/{projectId}/custom_metrics/try', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"]) +def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.try_live(project_id=projectId, data=data)} + + +@app.post('/{projectId}/custom_metrics/chart', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/chart', tags=["customMetrics"]) +def get_custom_metric_chart(projectId: int, data: schemas.CustomMetricChartPayloadSchema2 = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=data.metric_id, + data=data)} + + +@app.post('/{projectId}/custom_metrics', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics', tags=["customMetrics"]) +def add_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return custom_metrics.create(project_id=projectId, user_id=context.user_id, data=data) + + +@app.get('/{projectId}/custom_metrics', tags=["customMetrics"]) +def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)} + + +@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)} + + +@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"]) +def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=metric_id, + data=data)} + + +@app.post('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def update_custom_metric(projectId: int, metric_id: int, data: schemas.UpdateCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": custom_metrics.update(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)} + + +@app.delete('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def delete_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.delete(project_id=projectId, user_id=context.user_id, metric_id=metric_id)} + + +@app.post('/{projectId}/saved_search', tags=["savedSearch"]) +@app.put('/{projectId}/saved_search', tags=["savedSearch"]) +def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return saved_search.create(project_id=projectId, user_id=context.user_id, data=data) + + +@app.get('/{projectId}/saved_search', tags=["savedSearch"]) +def get_saved_searches(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.get_all(project_id=projectId, user_id=context.user_id, details=True)} + + +@app.get('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def get_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.get(project_id=projectId, search_id=search_id, user_id=context.user_id)} + + +@app.post('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +@app.put('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def update_saved_search(projectId: int, search_id: int, data: schemas.SavedSearchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.update(user_id=context.user_id, search_id=search_id, data=data, project_id=projectId)} + + +@app.delete('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def delete_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.delete(project_id=projectId, user_id=context.user_id, search_id=search_id)} diff --git a/api/routers/core_dynamic.py b/api/routers/core_dynamic.py index 0cd1b98da..c149266b5 100644 --- a/api/routers/core_dynamic.py +++ b/api/routers/core_dynamic.py @@ -8,7 +8,7 @@ import schemas from chalicelib.core import assist from chalicelib.core import integrations_manager from chalicelib.core import sessions -from chalicelib.core import tenants, users, metadata, projects, license, alerts +from chalicelib.core import tenants, users, metadata, projects, license from chalicelib.core import webhook from chalicelib.core.collaboration_slack import Slack from chalicelib.utils import captcha @@ -209,13 +209,25 @@ def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)): } -@public_app.post('/alerts/notifications', tags=["alerts"]) -@public_app.put('/alerts/notifications', tags=["alerts"]) -def send_alerts_notifications(background_tasks: BackgroundTasks, data: schemas.AlertNotificationSchema = Body(...)): - # TODO: validate token - return {"data": alerts.process_notifications(data.notifications, background_tasks=background_tasks)} - - @public_app.get('/general_stats', tags=["private"], include_in_schema=False) def get_general_stats(): return {"data": {"sessions:": sessions.count_all()}} + + +@app.get('/client', tags=['projects']) +def get_client(context: schemas.CurrentContext = Depends(OR_context)): + r = tenants.get_by_tenant_id(context.tenant_id) + if r is not None: + r.pop("createdAt") + r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True) + return { + 'data': r + } + + +@app.get('/projects', tags=['projects']) +def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True, + stack_integrations=True, version=True, + last_tracker_version=last_tracker_version)} diff --git a/api/schemas.py b/api/schemas.py index dac62e612..11d882e79 100644 --- a/api/schemas.py +++ b/api/schemas.py @@ -1,7 +1,7 @@ from enum import Enum -from typing import Optional, List, Literal, Union +from typing import Optional, List, Union, Literal -from pydantic import BaseModel, Field, EmailStr, HttpUrl +from pydantic import BaseModel, Field, EmailStr, HttpUrl, root_validator from chalicelib.utils.TimeUTC import TimeUTC @@ -88,16 +88,6 @@ class SearchErrorsSchema(BaseModel): order: Optional[str] = Field(None) -class EmailNotificationSchema(BaseModel): - notification: str = Field(...) - destination: str = Field(...) - - -class AlertNotificationSchema(BaseModel): - auth: str = Field(...) - notifications: List[EmailNotificationSchema] = Field(...) - - class CreateNotificationSchema(BaseModel): token: str = Field(...) notifications: List = Field(...) @@ -276,12 +266,18 @@ class _AlertMessageSchema(BaseModel): value: str = Field(...) +class AlertDetectionChangeType(str, Enum): + percent = "percent" + change = "change" + + class _AlertOptionSchema(BaseModel): message: List[_AlertMessageSchema] = Field([]) - currentPeriod: int = Field(...) - previousPeriod: int = Field(...) + currentPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(...) + previousPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(15) lastNotification: Optional[int] = Field(None) renotifyInterval: Optional[int] = Field(720) + change: Optional[AlertDetectionChangeType] = Field(None) class AlertColumn(str, Enum): @@ -304,35 +300,133 @@ class AlertColumn(str, Enum): performance__crashes__count = "performance.crashes.count" errors__javascript__count = "errors.javascript.count" errors__backend__count = "errors.backend.count" + custom = "CUSTOM" + + +class MathOperator(str, Enum): + _equal = "=" + _less = "<" + _greater = ">" + _less_eq = "<=" + _greater_eq = ">=" class _AlertQuerySchema(BaseModel): left: AlertColumn = Field(...) right: float = Field(...) - operator: Literal["<", ">", "<=", ">="] = Field(...) + # operator: Literal["<", ">", "<=", ">="] = Field(...) + operator: MathOperator = Field(...) + + +class AlertDetectionMethod(str, Enum): + threshold = "threshold" + change = "change" class AlertSchema(BaseModel): name: str = Field(...) - detectionMethod: str = Field(...) + detection_method: AlertDetectionMethod = Field(...) description: Optional[str] = Field(None) options: _AlertOptionSchema = Field(...) query: _AlertQuerySchema = Field(...) + series_id: Optional[int] = Field(None) + + @root_validator + def alert_validator(cls, values): + if values.get("query") is not None and values["query"].left == AlertColumn.custom: + assert values.get("series_id") is not None, "series_id should not be null for CUSTOM alert" + if values.get("detectionMethod") is not None \ + and values["detectionMethod"] == AlertDetectionMethod.change \ + and values.get("options") is not None: + assert values["options"].change is not None, \ + "options.change should not be null for detection method 'change'" + return values + + class Config: + alias_generator = attribute_to_camel_case class SourcemapUploadPayloadSchema(BaseModel): urls: List[str] = Field(..., alias="URL") +class ErrorSource(str, Enum): + js_exception = "js_exception" + bugsnag = "bugsnag" + cloudwatch = "cloudwatch" + datadog = "datadog" + newrelic = "newrelic" + rollbar = "rollbar" + sentry = "sentry" + stackdriver = "stackdriver" + sumologic = "sumologic" + + +class EventType(str, Enum): + click = "CLICK" + input = "INPUT" + location = "LOCATION" + custom = "CUSTOM" + request = "REQUEST" + graphql = "GRAPHQL" + state_action = "STATEACTION" + error = "ERROR" + metadata = "METADATA" + click_ios = "CLICK_IOS" + input_ios = "INPUT_IOS" + view_ios = "VIEW_IOS" + custom_ios = "CUSTOM_IOS" + request_ios = "REQUEST_IOS" + error_ios = "ERROR_IOS" + + +class PerformanceEventType(str, Enum): + location_dom_complete = "DOM_COMPLETE" + location_largest_contentful_paint_time = "LARGEST_CONTENTFUL_PAINT_TIME" + time_between_events = "TIME_BETWEEN_EVENTS" + location_ttfb = "TTFB" + location_avg_cpu_load = "AVG_CPU_LOAD" + location_avg_memory_usage = "AVG_MEMORY_USAGE" + fetch_failed = "FETCH_FAILED" + # fetch_duration = "FETCH_DURATION" + + +class FilterType(str, Enum): + user_os = "USEROS" + user_browser = "USERBROWSER" + user_device = "USERDEVICE" + user_country = "USERCOUNTRY" + user_id = "USERID" + user_anonymous_id = "USERANONYMOUSID" + referrer = "REFERRER" + rev_id = "REVID" + # IOS + user_os_ios = "USEROS_IOS" + user_device_ios = "USERDEVICE_IOS" + user_country_ios = "USERCOUNTRY_IOS" + user_id_ios = "USERID_IOS" + user_anonymous_id_ios = "USERANONYMOUSID_IOS" + rev_id_ios = "REVID_IOS" + # + duration = "DURATION" + platform = "PLATFORM" + metadata = "METADATA" + issue = "ISSUE" + events_count = "EVENTS_COUNT" + utm_source = "UTM_SOURCE" + utm_medium = "UTM_MEDIUM" + utm_campaign = "UTM_CAMPAIGN" + + class SearchEventOperator(str, Enum): _is = "is" _is_any = "isAny" _on = "on" _on_any = "onAny" - _isnot = "isNot" - _noton = "notOn" + _is_not = "isNot" + _not_on = "notOn" _contains = "contains" - _notcontains = "notContains" + _not_contains = "notContains" _starts_with = "startsWith" _ends_with = "endsWith" @@ -340,34 +434,146 @@ class SearchEventOperator(str, Enum): class PlatformType(str, Enum): mobile = "mobile" desktop = "desktop" + tablet = "tablet" -class _SessionSearchEventSchema(BaseModel): - custom: Optional[str] = Field(None) - key: Optional[str] = Field(None) - value: Union[Optional[str], Optional[List[str]]] = Field(...) - type: str = Field(...) +class SearchEventOrder(str, Enum): + _then = "then" + _or = "or" + _and = "and" + + +class IssueType(str, Enum): + click_rage = 'click_rage' + dead_click = 'dead_click' + excessive_scrolling = 'excessive_scrolling' + bad_request = 'bad_request' + missing_resource = 'missing_resource' + memory = 'memory' + cpu = 'cpu' + slow_resource = 'slow_resource' + slow_page_load = 'slow_page_load' + crash = 'crash' + custom = 'custom' + js_exception = 'js_exception' + + +class __MixedSearchFilter(BaseModel): + is_event: bool = Field(...) + + class Config: + alias_generator = attribute_to_camel_case + + +class _SessionSearchEventRaw(__MixedSearchFilter): + is_event: bool = Field(True, const=True) + value: Union[str, List[str]] = Field(...) + type: Union[EventType, PerformanceEventType] = Field(...) operator: SearchEventOperator = Field(...) - source: Optional[str] = Field(...) + source: Optional[Union[ErrorSource,List[Union[int, str]]]] = Field(default=ErrorSource.js_exception) + sourceOperator: Optional[MathOperator] = Field(None) + + @root_validator + def event_validator(cls, values): + if isinstance(values.get("type"), PerformanceEventType): + if values.get("type") == PerformanceEventType.fetch_failed: + return values + assert values.get("source") is not None, "source should not be null for PerformanceEventType" + assert values.get("sourceOperator") is not None \ + , "sourceOperator should not be null for PerformanceEventType" + if values["type"] == PerformanceEventType.time_between_events: + assert len(values.get("value", [])) == 2, \ + f"must provide 2 Events as value for {PerformanceEventType.time_between_events}" + assert isinstance(values["value"][0], _SessionSearchEventRaw) \ + and isinstance(values["value"][1], _SessionSearchEventRaw) \ + , f"event should be of type _SessionSearchEventRaw for {PerformanceEventType.time_between_events}" + else: + for c in values["source"]: + assert isinstance(c, int), f"source value should be of type int for {values.get('type')}" + return values -class _SessionSearchFilterSchema(_SessionSearchEventSchema): - value: List[str] = Field(...) +class _SessionSearchEventSchema(_SessionSearchEventRaw): + value: Union[List[_SessionSearchEventRaw], str, List[str]] = Field(...) + + +class _SessionSearchFilterSchema(__MixedSearchFilter): + is_event: bool = Field(False, const=False) + value: Union[Optional[Union[IssueType, PlatformType, int, str]], + Optional[List[Union[IssueType, PlatformType, int, str]]]] = Field(...) + type: FilterType = Field(...) + operator: Union[SearchEventOperator, MathOperator] = Field(...) + source: Optional[Union[ErrorSource, str]] = Field(default=ErrorSource.js_exception) + + @root_validator + def filter_validator(cls, values): + if values.get("type") == FilterType.metadata: + assert values.get("source") is not None and len(values["source"]) > 0, \ + "must specify a valid 'source' for metadata filter" + elif values.get("type") == FilterType.issue: + for v in values.get("value"): + assert isinstance(v, IssueType), f"value should be of type IssueType for {values.get('type')} filter" + elif values.get("type") == FilterType.platform: + for v in values.get("value"): + assert isinstance(v, PlatformType), \ + f"value should be of type PlatformType for {values.get('type')} filter" + elif values.get("type") == FilterType.events_count: + assert isinstance(values.get("operator"), MathOperator), \ + f"operator should be of type MathOperator for {values.get('type')} filter" + for v in values.get("value"): + assert isinstance(v, int), f"value should be of type int for {values.get('type')} filter" + else: + assert isinstance(values.get("operator"), SearchEventOperator), \ + f"operator should be of type SearchEventOperator for {values.get('type')} filter" + return values class SessionsSearchPayloadSchema(BaseModel): events: List[_SessionSearchEventSchema] = Field([]) filters: List[_SessionSearchFilterSchema] = Field([]) - # custom:dict=Field(...) - # rangeValue:str=Field(...) startDate: int = Field(None) endDate: int = Field(None) - sort: str = Field(...) + sort: str = Field(default="startTs") order: str = Field(default="DESC") - platform: Optional[PlatformType] = Field(None) + events_order: Optional[SearchEventOrder] = Field(default=SearchEventOrder._then) + + class Config: + alias_generator = attribute_to_camel_case -class FunnelSearchPayloadSchema(SessionsSearchPayloadSchema): +class FlatSessionsSearchPayloadSchema(SessionsSearchPayloadSchema): + events: Optional[List[_SessionSearchEventSchema]] = Field([]) + filters: List[Union[_SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([]) + + @root_validator(pre=True) + def flat_to_original(cls, values): + # in case the old search body was passed + if len(values.get("events", [])) > 0: + for v in values["events"]: + v["isEvent"] = True + for v in values.get("filters", []): + v["isEvent"] = False + else: + n_filters = [] + n_events = [] + for v in values.get("filters", []): + if v.get("isEvent"): + n_events.append(v) + else: + n_filters.append(v) + values["events"] = n_events + values["filters"] = n_filters + return values + + +class SessionsSearchCountSchema(FlatSessionsSearchPayloadSchema): + # class SessionsSearchCountSchema(SessionsSearchPayloadSchema): + sort: Optional[str] = Field(default=None) + order: Optional[str] = Field(default=None) + + +class FunnelSearchPayloadSchema(FlatSessionsSearchPayloadSchema): + # class FunnelSearchPayloadSchema(SessionsSearchPayloadSchema): range_value: Optional[str] = Field(None) sort: Optional[str] = Field(None) order: Optional[str] = Field(None) @@ -391,7 +597,8 @@ class UpdateFunnelSchema(FunnelSchema): is_public: Optional[bool] = Field(None) -class FunnelInsightsPayloadSchema(SessionsSearchPayloadSchema): +class FunnelInsightsPayloadSchema(FlatSessionsSearchPayloadSchema): + # class FunnelInsightsPayloadSchema(SessionsSearchPayloadSchema): sort: Optional[str] = Field(None) order: Optional[str] = Field(None) @@ -419,3 +626,64 @@ class SentrySchema(BaseModel): class MobileSignPayloadSchema(BaseModel): keys: List[str] = Field(...) + + +class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema): + # class CustomMetricSeriesFilterSchema(SessionsSearchPayloadSchema): + startDate: Optional[int] = Field(None) + endDate: Optional[int] = Field(None) + sort: Optional[str] = Field(None) + order: Optional[str] = Field(None) + + +class CustomMetricCreateSeriesSchema(BaseModel): + name: Optional[str] = Field(None) + index: Optional[int] = Field(None) + filter: Optional[CustomMetricSeriesFilterSchema] = Field([]) + + +class CreateCustomMetricsSchema(BaseModel): + name: str = Field(...) + series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1) + is_public: Optional[bool] = Field(False) + + class Config: + alias_generator = attribute_to_camel_case + + +class MetricViewType(str, Enum): + line_chart = "lineChart" + progress = "progress" + + +class CustomMetricChartPayloadSchema(BaseModel): + startDate: int = Field(TimeUTC.now(-7)) + endDate: int = Field(TimeUTC.now()) + density: int = Field(7) + viewType: MetricViewType = Field(MetricViewType.line_chart) + + class Config: + alias_generator = attribute_to_camel_case + + +class CustomMetricChartPayloadSchema2(CustomMetricChartPayloadSchema): + metric_id: int = Field(...) + + +class TryCustomMetricsSchema(CreateCustomMetricsSchema, CustomMetricChartPayloadSchema): + name: Optional[str] = Field(None) + + +class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema): + series_id: Optional[int] = Field(None) + + class Config: + alias_generator = attribute_to_camel_case + + +class UpdateCustomMetricsSchema(CreateCustomMetricsSchema): + series: List[CustomMetricUpdateSeriesSchema] = Field(..., min_items=1) + + +class SavedSearchSchema(FunnelSchema): + pass diff --git a/backend/pkg/db/cache/messages_common.go b/backend/pkg/db/cache/messages_common.go index c05422cb2..dcf860835 100644 --- a/backend/pkg/db/cache/messages_common.go +++ b/backend/pkg/db/cache/messages_common.go @@ -38,7 +38,7 @@ func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error { if err != nil { return err } - session.UserID = &userID.Value + session.UserID = userID.Value return nil } diff --git a/backend/pkg/db/cache/messages_web.go b/backend/pkg/db/cache/messages_web.go index 3afae8592..b259e49da 100644 --- a/backend/pkg/db/cache/messages_web.go +++ b/backend/pkg/db/cache/messages_web.go @@ -30,6 +30,7 @@ func (c *PGCache) InsertWebSessionStart(sessionID uint64, s *SessionStart) error UserDeviceType: s.UserDeviceType, UserDeviceMemorySize: s.UserDeviceMemorySize, UserDeviceHeapSize: s.UserDeviceHeapSize, + UserID: s.UserID, } if err := c.Conn.InsertSessionStart(sessionID, c.sessions[ sessionID ]); err != nil { c.sessions[ sessionID ] = nil diff --git a/backend/pkg/db/postgres/messages_common.go b/backend/pkg/db/postgres/messages_common.go index a6f624651..df539e05c 100644 --- a/backend/pkg/db/postgres/messages_common.go +++ b/backend/pkg/db/postgres/messages_common.go @@ -47,7 +47,8 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error { rev_id, tracker_version, issue_score, platform, - user_agent, user_browser, user_browser_version, user_device_memory_size, user_device_heap_size + user_agent, user_browser, user_browser_version, user_device_memory_size, user_device_heap_size, + user_id ) VALUES ( $1, $2, $3, $4, $5, $6, $7, @@ -55,7 +56,8 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error { NULLIF($10, ''), $11, $12, $13, - NULLIF($14, ''), NULLIF($15, ''), NULLIF($16, ''), NULLIF($17, 0), NULLIF($18, 0::bigint) + NULLIF($14, ''), NULLIF($15, ''), NULLIF($16, ''), NULLIF($17, 0), NULLIF($18, 0::bigint), + NULLIF($19, '') )`, sessionID, s.ProjectID, s.Timestamp, s.UserUUID, s.UserDevice, s.UserDeviceType, s.UserCountry, @@ -64,6 +66,7 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error { s.TrackerVersion, s.Timestamp/1000, s.Platform, s.UserAgent, s.UserBrowser, s.UserBrowserVersion, s.UserDeviceMemorySize, s.UserDeviceHeapSize, + s.UserID, ); err != nil { return err; } diff --git a/backend/pkg/db/types/session.go b/backend/pkg/db/types/session.go index 0205f76f3..d354b0cd2 100644 --- a/backend/pkg/db/types/session.go +++ b/backend/pkg/db/types/session.go @@ -16,7 +16,7 @@ type Session struct { PagesCount int EventsCount int ErrorsCount int - UserID *string + UserID string // pointer?? UserAnonymousID *string Metadata1 *string Metadata2 *string diff --git a/backend/pkg/messages/messages.go b/backend/pkg/messages/messages.go index 3d8bae7f6..cdff71e1d 100644 --- a/backend/pkg/messages/messages.go +++ b/backend/pkg/messages/messages.go @@ -63,9 +63,10 @@ UserDeviceType string UserDeviceMemorySize uint64 UserDeviceHeapSize uint64 UserCountry string +UserID string } func (msg *SessionStart) Encode() []byte{ - buf := make([]byte, 151 + len(msg.TrackerVersion)+ len(msg.RevID)+ len(msg.UserUUID)+ len(msg.UserAgent)+ len(msg.UserOS)+ len(msg.UserOSVersion)+ len(msg.UserBrowser)+ len(msg.UserBrowserVersion)+ len(msg.UserDevice)+ len(msg.UserDeviceType)+ len(msg.UserCountry)) + buf := make([]byte, 161 + len(msg.TrackerVersion)+ len(msg.RevID)+ len(msg.UserUUID)+ len(msg.UserAgent)+ len(msg.UserOS)+ len(msg.UserOSVersion)+ len(msg.UserBrowser)+ len(msg.UserBrowserVersion)+ len(msg.UserDevice)+ len(msg.UserDeviceType)+ len(msg.UserCountry)+ len(msg.UserID)) buf[0] = 1 p := 1 p = WriteUint(msg.Timestamp, buf, p) @@ -83,6 +84,7 @@ p = WriteString(msg.UserDeviceType, buf, p) p = WriteUint(msg.UserDeviceMemorySize, buf, p) p = WriteUint(msg.UserDeviceHeapSize, buf, p) p = WriteString(msg.UserCountry, buf, p) +p = WriteString(msg.UserID, buf, p) return buf[:p] } diff --git a/backend/pkg/messages/read_message.go b/backend/pkg/messages/read_message.go index d0148bbc6..c226df728 100644 --- a/backend/pkg/messages/read_message.go +++ b/backend/pkg/messages/read_message.go @@ -42,6 +42,7 @@ if msg.UserDeviceType, err = ReadString(reader); err != nil { return nil, err } if msg.UserDeviceMemorySize, err = ReadUint(reader); err != nil { return nil, err } if msg.UserDeviceHeapSize, err = ReadUint(reader); err != nil { return nil, err } if msg.UserCountry, err = ReadString(reader); err != nil { return nil, err } +if msg.UserID, err = ReadString(reader); err != nil { return nil, err } return msg, nil case 2: diff --git a/backend/services/alerts/main.go b/backend/services/alerts/main.go deleted file mode 100644 index b11d3ae04..000000000 --- a/backend/services/alerts/main.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "database/sql" - "log" - "os" - "os/signal" - "syscall" - "time" - - "openreplay/backend/pkg/db/postgres" - "openreplay/backend/pkg/env" - _ "github.com/lib/pq" -) - -func main() { - log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - POSTGRES_STRING := env.String("POSTGRES_STRING") - NOTIFICATIONS_STRING := env.String("ALERT_NOTIFICATION_STRING") - log.Printf("Notifications: %s \nPG: %s\n", NOTIFICATIONS_STRING, POSTGRES_STRING) - pg := postgres.NewConn(POSTGRES_STRING) - defer pg.Close() - - pgs, err := sql.Open("postgres", POSTGRES_STRING+ "?sslmode=disable") - if err != nil { - log.Fatal(err) - } - defer pgs.Close() - - manager := NewManager(NOTIFICATIONS_STRING, POSTGRES_STRING, pgs, pg) - if err := pg.IterateAlerts(func(a *postgres.Alert, err error) { - if err != nil { - log.Printf("Postgres error: %v\n", err) - return - } - log.Printf("Alert initialization: %+v\n", *a) - //log.Printf("CreatedAt: %s\n", *a.CreatedAt) - err = manager.Update(a) - if err != nil { - log.Printf("Alert parse error: %v | Alert: %+v\n", err, *a) - return - } - }); err != nil { - log.Fatalf("Postgres error: %v\n", err) - } - - listener, err := postgres.NewAlertsListener(POSTGRES_STRING) - if err != nil { - log.Fatalf("Postgres listener error: %v\n", err) - } - defer listener.Close() - - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) - - tickAlert := time.Tick(1 * time.Minute) - - log.Printf("Alert service started\n") - manager.RequestAll() - //return - for { - select { - case sig := <-sigchan: - log.Printf("Caught signal %v: terminating\n", sig) - listener.Close() - pg.Close() - os.Exit(0) - case <-tickAlert: - log.Printf("Requesting all...%d alerts\n", manager.Length()) - manager.RequestAll() - case iPointer := <-listener.Alerts: - log.Printf("Alert update: %+v\n", *iPointer) - //log.Printf("CreatedAt: %s\n", *iPointer.CreatedAt) - //log.Printf("Notification received for AlertId: %d\n", iPointer.AlertID) - err := manager.Update(iPointer) - if err != nil { - log.Printf("Alert parse error: %+v | Alert: %v\n", err, *iPointer) - } - case err := <-listener.Errors: - log.Printf("listener error: %v\n", err) - if err.Error() == "conn closed" { - panic("Listener conn lost") - } - } - } -} diff --git a/backend/services/alerts/manager.go b/backend/services/alerts/manager.go deleted file mode 100644 index 11ddb9363..000000000 --- a/backend/services/alerts/manager.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "sync" - "sync/atomic" - "time" - - "openreplay/backend/pkg/db/postgres" -) - -const PGParallelLimit = 2 - -var pgCount int64 - -type manager struct { - postgresString string - notificationsUrl string - alertsCache map[uint32]*postgres.Alert - cacheMutex sync.Mutex - pgParallel chan bool - pgs *sql.DB - pg *postgres.Conn - pgMutex sync.Mutex - notifications map[uint32]*postgres.TenantNotification - notificationsGo *sync.WaitGroup - notificationsMutex sync.Mutex -} - -func NewManager(notificationsUrl string, postgresString string, pgs *sql.DB, pg *postgres.Conn) *manager { - return &manager{ - postgresString: postgresString, - notificationsUrl: notificationsUrl, - alertsCache: make(map[uint32]*postgres.Alert), - cacheMutex: sync.Mutex{}, - pgParallel: make(chan bool, PGParallelLimit), - pgs: pgs, - pg: pg, - pgMutex: sync.Mutex{}, - notifications: make(map[uint32]*postgres.TenantNotification), - notificationsGo: &sync.WaitGroup{}, - notificationsMutex: sync.Mutex{}, - } - -} - -func (m *manager) Length() int { - return len(m.alertsCache) -} - -func (m *manager) Update(a *postgres.Alert) error { - m.cacheMutex.Lock() - defer m.cacheMutex.Unlock() - _, exists := m.alertsCache[a.AlertID] - if exists && a.DeletedAt != nil { - log.Println("deleting alert from memory") - delete(m.alertsCache, a.AlertID) - return nil - } else { - m.alertsCache[a.AlertID] = a - } - return nil -} -func (m *manager) processAlert(a *postgres.Alert) { - defer func() { - defer m.notificationsGo.Done() - <-m.pgParallel - }() - if !a.CanCheck() { - log.Printf("cannot check %s", a.Name) - return - } - //log.Printf("checking %+v", a) - log.Printf("quering %s", a.Name) - //--- For stats: - atomic.AddInt64(&pgCount, 1) - q, err := a.Build() - if err != nil { - log.Println(err) - return - } - - rows, err := q.RunWith(m.pgs).Query() - - if err != nil { - log.Println(err) - return - } - defer rows.Close() - - for rows.Next() { - var ( - value sql.NullFloat64 - valid bool - ) - if err := rows.Scan(&value, &valid); err != nil { - log.Println(err) - continue - } - - if valid && value.Valid { - log.Printf("%s: valid", a.Name) - m.notificationsMutex.Lock() - m.notifications[a.AlertID] = &postgres.TenantNotification{ - TenantId: a.TenantId, - Title: a.Name, - Description: fmt.Sprintf("has been triggered, %s = %.0f (%s %.0f).", a.Query.Left, value.Float64, a.Query.Operator, a.Query.Right), - ButtonText: "Check metrics for more details", - ButtonUrl: fmt.Sprintf("/%d/metrics", a.ProjectID), - ImageUrl: nil, - Options: map[string]interface{}{"source": "ALERT", "sourceId": a.AlertID, "sourceMeta": a.DetectionMethod, "message": a.Options.Message, "projectId": a.ProjectID, "data": map[string]interface{}{"title": a.Name, "limitValue": a.Query.Right, "actualValue": value.Float64, "operator": a.Query.Operator, "trigger": a.Query.Left, "alertId": a.AlertID, "detectionMethod": a.DetectionMethod, "currentPeriod": a.Options.CurrentPeriod, "previousPeriod": a.Options.PreviousPeriod, "createdAt": time.Now().Unix() * 1000}}, - } - m.notificationsMutex.Unlock() - } - } - -} -func (m *manager) RequestAll() { - now := time.Now().Unix() - m.cacheMutex.Lock() - for _, a := range m.alertsCache { - m.pgParallel <- true - m.notificationsGo.Add(1) - go m.processAlert(a) - //m.processAlert(a) - } - //log.Println("releasing cache") - m.cacheMutex.Unlock() - //log.Println("waiting for all alerts to finish") - m.notificationsGo.Wait() - log.Printf("done %d PG queries in: %ds", pgCount, time.Now().Unix()-now) - pgCount = 0 - //log.Printf("Processing %d Notifications", len(m.notifications)) - m.notificationsMutex.Lock() - go m.ProcessNotifications(m.notifications) - m.notificationsMutex.Unlock() - m.notifications = make(map[uint32]*postgres.TenantNotification) - //log.Printf("Notifications purged: %d", len(m.notifications)) -} - -func (m *manager) ProcessNotifications(allNotifications map[uint32]*postgres.TenantNotification) { - if len(allNotifications) == 0 { - log.Println("No notifications to process") - return - } - log.Printf("sending %d notifications", len(allNotifications)) - allIds := make([]uint32, 0, len(allNotifications)) - toSend := postgres.Notifications{ - Notifications: []*postgres.TenantNotification{}, - } - for k, n := range allNotifications { - //log.Printf("notification for %d", k) - allIds = append(allIds, k) - toSend.Notifications = append(toSend.Notifications, n) - } - toSend.Send(m.notificationsUrl) - if err := m.pg.SaveLastNotification(allIds); err != nil { - log.Printf("Error saving LastNotification time: %v", err) - if err.Error() == "conn closed" { - m.pg = postgres.NewConn(m.postgresString) - //if err != nil { - // panic(fmt.Sprintf("Postgres renew notifications connection error: %v\n", err)) - //} - if err := m.pg.SaveLastNotification(allIds); err != nil { - panic(fmt.Sprintf("Error saving LastNotification time, suicide: %v", err)) - } - } - } -} diff --git a/backend/services/http/handlers_web.go b/backend/services/http/handlers_web.go index 5e144f1cc..09d2511d8 100644 --- a/backend/services/http/handlers_web.go +++ b/backend/services/http/handlers_web.go @@ -27,6 +27,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"` ProjectKey *string `json:"projectKey"` Reset bool `json:"reset"` + UserID string `json:"userID"` } type response struct { Timestamp int64 `json:"timestamp"` @@ -101,6 +102,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) { UserCountry: country, UserDeviceMemorySize: req.DeviceMemory, UserDeviceHeapSize: req.JsHeapSizeLimit, + UserID: req.UserID, })) } diff --git a/ee/LICENSE.md b/ee/LICENSE.md index d99b63d76..5f6043f8f 100644 --- a/ee/LICENSE.md +++ b/ee/LICENSE.md @@ -1,36 +1,4 @@ The OpenReplay Enterprise license (the “Enterprise License”) -Copyright (c) 2021 Asayer SAS. +Copyright (c) 2022 Asayer SAS. -With regard to the OpenReplay Software: - -This software and associated documentation files (the "Software") may only be -used in production, if you (and any entity that you represent) have agreed to, -and are in compliance with, the OpenReplay Subscription Terms of Service, available -at https://openreplay.com/terms.html (the “Enterprise Edition”), or other -agreement governing the use of the Software, as agreed by you and OpenReplay, -and otherwise have a valid OpenReplay Enterprise license for the -correct usage. Subject to the foregoing sentence, you are free to -modify this Software and publish patches to the Software. You agree that OpenReplay -and/or its licensors (as applicable) retain all right, title and interest in and -to all such modifications and/or patches, and all such modifications and/or -patches may only be used, copied, modified, displayed, distributed, or otherwise -exploited with a valid OpenReplay Enterprise license for the correct -number of user seats and profiles. Notwithstanding the foregoing, you may copy and modify -the Software for development and testing purposes, without requiring a -subscription. You agree that OpenReplay and/or its licensors (as applicable) retain -all right, title and interest in and to all such modifications. You are not -granted any other rights beyond what is expressly stated herein. Subject to the -foregoing, it is forbidden to copy, merge, publish, distribute, sublicense, -and/or sell the Software. - -THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR -IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, -FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE -AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER -LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, -OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE -SOFTWARE. - -For all third party components incorporated into the OpenReplay Software, those -components are licensed under the original license provided by the owner of the -applicable component. +To license the Enterprise Edition of OpenReplay, and take advantage of its additional features, functionality and support, you must agree to the terms of the OpenReplay Enterprise License Agreement. Please contact OpenReplay at [sales@openreplay.com](mailto:sales@openreplay.com). diff --git a/ee/api/.chalice/config.json b/ee/api/.chalice/config.json deleted file mode 100644 index db58c76ba..000000000 --- a/ee/api/.chalice/config.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-ee": { - "api_gateway_stage": "default-ee", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "false", - "isEE": "true", - "stage": "default-ee", - "jwt_issuer": "openreplay-default-ee", - "sentryURL": "", - "pg_host": "127.0.0.1", - "pg_port": "9202", - "pg_dbname": "app", - "pg_user": "", - "pg_password": "", - "ch_host": "", - "ch_port": "", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_plans": "http://127.0.0.1:8000/async/plans/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers", - "js_cache_bucket": "sessions-assets", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "SET A RANDOM STRING HERE", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "LICENSE_KEY": "", - "SAML2_MD_URL": "", - "idp_entityId": "", - "idp_sso_url": "", - "idp_x509cert": "", - "idp_sls_url": "", - "idp_name": "", - "sso_exp_delta_seconds": "172800", - "sso_landing": "/login?jwt=%s", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "iosBucket": "openreplay-ios-images", - "version_number": "1.3.6", - "assist_secret": "" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} diff --git a/ee/api/.env.default b/ee/api/.env.default index 6fff1793c..cec7e59a4 100644 --- a/ee/api/.env.default +++ b/ee/api/.env.default @@ -38,11 +38,13 @@ jwt_exp_delta_seconds=2592000 jwt_issuer=openreplay-default-ee jwt_secret="SET A RANDOM STRING HERE" peers=http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers -pg_dbname=app -pg_host=127.0.0.1 -pg_password= -pg_port=9202 -pg_user= +pg_dbname=postgres +pg_host=postgresql.db.svc.cluster.local +pg_password=asayerPostgres +pg_port=5432 +pg_user=postgres +pg_timeout=30 +pg_minconn=45 put_S3_TTL=20 sentryURL= sessions_bucket=mobs diff --git a/ee/api/.gitignore b/ee/api/.gitignore index fbf1958af..f1ff9550b 100644 --- a/ee/api/.gitignore +++ b/ee/api/.gitignore @@ -178,6 +178,7 @@ README/* Pipfile /chalicelib/core/alerts.py +/chalicelib/core/alerts_processor.py /chalicelib/core/announcements.py /chalicelib/blueprints/bp_app_api.py /chalicelib/blueprints/bp_core.py @@ -186,6 +187,7 @@ Pipfile /chalicelib/core/errors_favorite_viewed.py /chalicelib/core/events.py /chalicelib/core/events_ios.py +/chalicelib/core/funnels.py /chalicelib/core/integration_base.py /chalicelib/core/integration_base_issue.py /chalicelib/core/integration_github.py @@ -251,9 +253,14 @@ Pipfile /db_changes.sql /Dockerfile.bundle /entrypoint.bundle.sh -/entrypoint.sh +#/entrypoint.sh /chalicelib/core/heatmaps.py /routers/subs/insights.py /schemas.py /chalicelib/blueprints/app/v1_api.py /routers/app/v1_api.py +/chalicelib/core/custom_metrics.py +/chalicelib/core/performance_event.py +/chalicelib/core/saved_search.py +/app_alerts.py +/build_alerts.sh diff --git a/ee/api/Dockerfile.alerts b/ee/api/Dockerfile.alerts new file mode 100644 index 000000000..5809de5e6 --- /dev/null +++ b/ee/api/Dockerfile.alerts @@ -0,0 +1,19 @@ +FROM python:3.9.7-slim +LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" +RUN apt-get update && apt-get install -y pkg-config libxmlsec1-dev gcc && rm -rf /var/lib/apt/lists/* +WORKDIR /work +COPY . . +RUN pip install -r requirements.txt +RUN mv .env.default .env && mv app_alerts.py app.py +ENV pg_minconn 2 + +# Add Tini +# Startup daemon +ENV TINI_VERSION v0.19.0 +ARG envarg +ENV ENTERPRISE_BUILD ${envarg} +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +RUN chmod +x /tini +ENTRYPOINT ["/tini", "--"] +CMD ./entrypoint.sh \ No newline at end of file diff --git a/ee/api/app.py b/ee/api/app.py index 1c731c3f7..fdf7f60b8 100644 --- a/ee/api/app.py +++ b/ee/api/app.py @@ -1,6 +1,8 @@ +import logging import queue from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config from fastapi import FastAPI, Request from fastapi.middleware.cors import CORSMiddleware from starlette import status @@ -75,7 +77,10 @@ for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs: app.schedule.add_job(id=job["func"].__name__, **job) from chalicelib.core import traces -app.schedule.add_job(id="trace_worker",**traces.cron_jobs[0]) +app.schedule.add_job(id="trace_worker", **traces.cron_jobs[0]) for job in app.schedule.get_jobs(): print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/ee/api/auth/auth_project.py b/ee/api/auth/auth_project.py index 479681cb8..c1e1d38cd 100644 --- a/ee/api/auth/auth_project.py +++ b/ee/api/auth/auth_project.py @@ -16,10 +16,12 @@ class ProjectAuthorizer: return current_user: schemas.CurrentContext = await OR_context(request) project_identifier = request.path_params[self.project_identifier] + user_id = current_user.user_id if request.state.authorizer_identity == "jwt" else None if (self.project_identifier == "projectId" \ - and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id)) \ + and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id, + user_id=user_id)) \ or (self.project_identifier.lower() == "projectKey" \ and not projects.is_authorized(project_id=projects.get_internal_project_id(project_identifier), - tenant_id=current_user.tenant_id)): + tenant_id=current_user.tenant_id, user_id=user_id)): print("unauthorized project") raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="unauthorized project.") diff --git a/ee/api/chalicelib/core/alerts_listener.py b/ee/api/chalicelib/core/alerts_listener.py new file mode 100644 index 000000000..40241f51e --- /dev/null +++ b/ee/api/chalicelib/core/alerts_listener.py @@ -0,0 +1,27 @@ +from chalicelib.utils import pg_client, helper + + +def get_all_alerts(): + with pg_client.PostgresClient(long_query=True) as cur: + query = """SELECT tenant_id, + alert_id, + project_id, + detection_method, + query, + options, + (EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at, + alerts.name, + alerts.series_id, + filter + FROM public.alerts + LEFT JOIN metric_series USING (series_id) + INNER JOIN projects USING (project_id) + WHERE alerts.deleted_at ISNULL + AND alerts.active + AND projects.active + AND projects.deleted_at ISNULL + AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL) + ORDER BY alerts.created_at;""" + cur.execute(query=query) + all_alerts = helper.list_to_camel_case(cur.fetchall()) + return all_alerts diff --git a/ee/api/chalicelib/core/funnels.py b/ee/api/chalicelib/core/funnels.py deleted file mode 100644 index 9c550244d..000000000 --- a/ee/api/chalicelib/core/funnels.py +++ /dev/null @@ -1,275 +0,0 @@ -import chalicelib.utils.helper -from chalicelib.core import events, significance, sessions -from chalicelib.utils.TimeUTC import TimeUTC - -from chalicelib.utils import helper, pg_client -from chalicelib.utils import dev -import json - -REMOVE_KEYS = ["key", "_key", "startDate", "endDate"] - -ALLOW_UPDATE_FOR = ["name", "filter"] - - -def filter_stages(stages): - ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type, - events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type, - events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type, - events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ] - return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None] - - -def create(project_id, user_id, name, filter, is_public): - helper.delete_keys_from_dict(filter, REMOVE_KEYS) - filter["events"] = filter_stages(stages=filter.get("events", [])) - with pg_client.PostgresClient() as cur: - query = cur.mogrify("""\ - INSERT INTO public.funnels (project_id, user_id, name, filter,is_public) - VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s) - RETURNING *;""", - {"user_id": user_id, "project_id": project_id, "name": name, "filter": json.dumps(filter), - "is_public": is_public}) - - cur.execute( - query - ) - r = cur.fetchone() - r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) - r = helper.dict_to_camel_case(r) - r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) - return {"data": r} - - -def update(funnel_id, user_id, name=None, filter=None, is_public=None): - s_query = [] - if filter is not None: - helper.delete_keys_from_dict(filter, REMOVE_KEYS) - s_query.append("filter = %(filter)s::jsonb") - if name is not None and len(name) > 0: - s_query.append("name = %(name)s") - if is_public is not None: - s_query.append("is_public = %(is_public)s") - if len(s_query) == 0: - return {"errors": ["Nothing to update"]} - with pg_client.PostgresClient() as cur: - query = cur.mogrify(f"""\ - UPDATE public.funnels - SET {" , ".join(s_query)} - WHERE funnel_id=%(funnel_id)s - RETURNING *;""", - {"user_id": user_id, "funnel_id": funnel_id, "name": name, - "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public}) - # print("--------------------") - # print(query) - # print("--------------------") - cur.execute( - query - ) - r = cur.fetchone() - r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) - r = helper.dict_to_camel_case(r) - r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) - return {"data": r} - - -def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False): - with pg_client.PostgresClient() as cur: - team_query = """INNER JOIN - ( - SELECT collaborators.user_id - FROM public.users AS creator - INNER JOIN public.users AS collaborators USING (tenant_id) - WHERE creator.user_id=%(user_id)s - ) AS team USING (user_id)""" - cur.execute( - cur.mogrify( - f"""\ - SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public - {",filter" if details else ""} - FROM public.funnels {team_query} - WHERE project_id = %(project_id)s - AND funnels.deleted_at IS NULL - AND (funnels.user_id = %(user_id)s OR funnels.is_public);""", - {"project_id": project_id, "user_id": user_id} - ) - ) - - rows = cur.fetchall() - rows = helper.list_to_camel_case(rows) - for row in rows: - row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"]) - if details: - row["filter"]["events"] = filter_stages(row["filter"]["events"]) - get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date, - end_date=end_date) - counts = sessions.search2_pg(data=row["filter"], project_id=project_id, user_id=None, count_only=True) - row["sessionsCount"] = counts["countSessions"] - row["usersCount"] = counts["countUsers"] - overview = significance.get_overview(filter_d=row["filter"], project_id=project_id) - row["stages"] = overview["stages"] - row.pop("filter") - row["stagesCount"] = len(row["stages"]) - # TODO: ask david to count it alone - row["criticalIssuesCount"] = overview["criticalIssuesCount"] - row["missedConversions"] = 0 if len(row["stages"]) < 2 \ - else row["stages"][0]["sessionsCount"] - row["stages"][-1]["sessionsCount"] - return rows - - -def get_possible_issue_types(project_id): - return [{"type": t, "title": chalicelib.utils.helper.get_issue_title(t)} for t in - ['click_rage', 'dead_click', 'excessive_scrolling', - 'bad_request', 'missing_resource', 'memory', 'cpu', - 'slow_resource', 'slow_page_load', 'crash', 'custom_event_error', - 'js_error']] - - -def get_start_end_time(filter_d, range_value, start_date, end_date): - if start_date is not None and end_date is not None: - filter_d["startDate"], filter_d["endDate"] = start_date, end_date - elif range_value is not None and len(range_value) > 0: - filter_d["rangeValue"] = range_value - filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(range_value) - else: - filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(filter_d["rangeValue"]) - - -def delete(project_id, funnel_id, user_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify("""\ - UPDATE public.funnels - SET deleted_at = timezone('utc'::text, now()) - WHERE project_id = %(project_id)s - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id}) - ) - - return {"data": {"state": "success"}} - - -def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - return sessions.search2_pg(data=f["filter"], project_id=project_id, user_id=user_id) - - -def get_sessions_on_the_fly(funnel_id, project_id, user_id, data): - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - return sessions.search2_pg(data=data, project_id=project_id, user_id=user_id) - - -def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=f["filter"], project_id=project_id) - insights[-1]["dropDueToIssues"] = total_drop_due_to_issues - return {"data": {"stages": helper.list_to_camel_case(insights), - "totalDropDueToIssues": total_drop_due_to_issues}} - - -def get_top_insights_on_the_fly(funnel_id, project_id, data): - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id) - if len(insights) > 0: - insights[-1]["dropDueToIssues"] = total_drop_due_to_issues - return {"data": {"stages": helper.list_to_camel_case(insights), - "totalDropDueToIssues": total_drop_due_to_issues}} - - -def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - return {"data": { - "issues": helper.dict_to_camel_case(significance.get_issues_list(filter_d=f["filter"], project_id=project_id)) - }} - - -@dev.timed -def get_issues_on_the_fly(funnel_id, project_id, data): - first_stage = data.get("firstStage") - last_stage = data.get("lastStage") - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - return { - "issues": helper.dict_to_camel_case( - significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=first_stage, - last_stage=last_stage))} - - -def get(funnel_id, project_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - """\ - SELECT - * - FROM public.funnels - WHERE project_id = %(project_id)s - AND deleted_at IS NULL - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id} - ) - ) - - f = helper.dict_to_camel_case(cur.fetchone()) - if f is None: - return None - - f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"]) - f["filter"]["events"] = filter_stages(stages=f["filter"]["events"]) - return f - - -@dev.timed -def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None): - if len(data.get("events", [])) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date), - end_date=data.get('endDate', end_date)) - data = f["filter"] - - # insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id) - issues = get_issues_on_the_fly(funnel_id=funnel_id, project_id=project_id, data=data).get("issues", {}) - issues = issues.get("significant", []) + issues.get("insignificant", []) - issue = None - for i in issues: - if i.get("issueId", "") == issue_id: - issue = i - break - return {"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id, issue=issue, - data=data) if issue is not None else {"total": 0, "sessions": []}, - # "stages": helper.list_to_camel_case(insights), - # "totalDropDueToIssues": total_drop_due_to_issues, - "issue": issue} diff --git a/ee/api/chalicelib/core/projects.py b/ee/api/chalicelib/core/projects.py index a9d7767d0..2728e5077 100644 --- a/ee/api/chalicelib/core/projects.py +++ b/ee/api/chalicelib/core/projects.py @@ -1,5 +1,6 @@ import json +import schemas from chalicelib.core import users from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC @@ -41,7 +42,7 @@ def __create(tenant_id, name): def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False, version=False, - last_tracker_version=None): + last_tracker_version=None, user_id=None): with pg_client.PostgresClient() as cur: tracker_query = "" if last_tracker_version is not None and len(last_tracker_version) > 0: @@ -53,6 +54,15 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st elif version: tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version" + role_query = """INNER JOIN LATERAL (SELECT 1 + FROM users + INNER JOIN roles USING (role_id) + LEFT JOIN roles_projects USING (role_id) + WHERE users.user_id = %(user_id)s + AND users.deleted_at ISNULL + AND users.tenant_id = %(tenant_id)s + AND (roles.all_projects OR roles_projects.project_id = s.project_id) + ) AS role_project ON (TRUE)""" cur.execute( cur.mogrify(f"""\ SELECT @@ -63,10 +73,11 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st {tracker_query} FROM public.projects AS s {'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''} + {role_query if user_id is not None else ""} WHERE s.tenant_id =%(tenant_id)s AND s.deleted_at IS NULL ORDER BY s.project_id;""", - {"tenant_id": tenant_id}) + {"tenant_id": tenant_id, "user_id": user_id}) ) rows = cur.fetchall() if recording_state: @@ -104,8 +115,8 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr= query = cur.mogrify(f"""\ SELECT s.project_id, - s.name, - s.project_key + s.project_key, + s.name {",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_id = %(project_id)s) AS last_recorded_session_at" if include_last_session else ""} {',s.gdpr' if include_gdpr else ''} {tracker_query} @@ -123,26 +134,52 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr= return helper.dict_to_camel_case(row) -def is_authorized(project_id, tenant_id): +def is_authorized(project_id, tenant_id, user_id=None): if project_id is None or not str(project_id).isdigit(): return False - return get_project(tenant_id=tenant_id, project_id=project_id) is not None + with pg_client.PostgresClient() as cur: + role_query = """INNER JOIN LATERAL (SELECT 1 + FROM users + INNER JOIN roles USING (role_id) + LEFT JOIN roles_projects USING (role_id) + WHERE users.user_id = %(user_id)s + AND users.deleted_at ISNULL + AND users.tenant_id = %(tenant_id)s + AND (roles.all_projects OR roles_projects.project_id = %(project_id)s) + ) AS role_project ON (TRUE)""" + + query = cur.mogrify(f"""\ + SELECT project_id + FROM public.projects AS s + {role_query if user_id is not None else ""} + where s.tenant_id =%(tenant_id)s + AND s.project_id =%(project_id)s + AND s.deleted_at IS NULL + LIMIT 1;""", + {"tenant_id": tenant_id, "project_id": project_id, "user_id": user_id}) + cur.execute( + query=query + ) + row = cur.fetchone() + return row is not None -def create(tenant_id, user_id, data, skip_authorization=False): +def create(tenant_id, user_id, data: schemas.CreateProjectSchema, skip_authorization=False): if not skip_authorization: admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - return {"data": __create(tenant_id=tenant_id, name=data.get("name", "my first project"))} + if admin["roleId"] is not None and not admin["allProjects"]: + return {"errors": ["unauthorized: you need allProjects permission to create a new project"]} + return {"data": __create(tenant_id=tenant_id, name=data.name)} -def edit(tenant_id, user_id, project_id, data): +def edit(tenant_id, user_id, project_id, data: schemas.CreateProjectSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} return {"data": __update(tenant_id=tenant_id, project_id=project_id, - changes={"name": data.get("name", "my first project")})} + changes={"name": data.name})} def delete(tenant_id, user_id, project_id): @@ -152,8 +189,7 @@ def delete(tenant_id, user_id, project_id): return {"errors": ["unauthorized"]} with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""\ - UPDATE public.projects + cur.mogrify("""UPDATE public.projects SET deleted_at = timezone('utc'::text, now()), active = FALSE @@ -274,3 +310,22 @@ def get_project_by_key(tenant_id, project_key, include_last_session=False, inclu ) row = cur.fetchone() return helper.dict_to_camel_case(row) + + +def is_authorized_batch(project_ids, tenant_id): + if project_ids is None or not len(project_ids): + return False + with pg_client.PostgresClient() as cur: + query = cur.mogrify("""\ + SELECT project_id + FROM public.projects + where tenant_id =%(tenant_id)s + AND project_id IN %(project_ids)s + AND deleted_at IS NULL;""", + {"tenant_id": tenant_id, "project_ids": tuple(project_ids)}) + + cur.execute( + query=query + ) + rows = cur.fetchall() + return [r["project_id"] for r in rows] diff --git a/ee/api/chalicelib/core/roles.py b/ee/api/chalicelib/core/roles.py index 8ba62091a..5bd80dc06 100644 --- a/ee/api/chalicelib/core/roles.py +++ b/ee/api/chalicelib/core/roles.py @@ -1,64 +1,111 @@ -from chalicelib.core import users +import schemas_ee +from chalicelib.core import users, projects from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC -def update(tenant_id, user_id, role_id, changes): +def update(tenant_id, user_id, role_id, data: schemas_ee.RolePayloadSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - if len(changes.keys()) == 0: - return None - ALLOW_EDIT = ["name", "description", "permissions"] - sub_query = [] - for key in changes.keys(): - if key in ALLOW_EDIT: - sub_query.append(f"{helper.key_to_snake_case(key)} = %({key})s") + if not data.all_projects and (data.projects is None or len(data.projects) == 0): + return {"errors": ["must specify a project or all projects"]} + if data.projects is not None and len(data.projects) > 0 and not data.all_projects: + data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id) with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify(f"""\ + cur.mogrify("""SELECT 1 + FROM public.roles + WHERE role_id = %(role_id)s + AND tenant_id = %(tenant_id)s + AND protected = TRUE + LIMIT 1;""", + {"tenant_id": tenant_id, "role_id": role_id}) + ) + if cur.fetchone() is not None: + return {"errors": ["this role is protected"]} + cur.execute( + cur.mogrify("""\ UPDATE public.roles - SET {" ,".join(sub_query)} + SET name= %(name)s, + description= %(description)s, + permissions= %(permissions)s, + all_projects= %(all_projects)s WHERE role_id = %(role_id)s AND tenant_id = %(tenant_id)s AND deleted_at ISNULL AND protected = FALSE - RETURNING *;""", - {"tenant_id": tenant_id, "role_id": role_id, **changes}) + RETURNING *, COALESCE((SELECT ARRAY_AGG(project_id) + FROM roles_projects WHERE roles_projects.role_id=%(role_id)s),'{}') AS projects;""", + {"tenant_id": tenant_id, "role_id": role_id, **data.dict()}) ) row = cur.fetchone() row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + if not data.all_projects: + d_projects = [i for i in row["projects"] if i not in data.projects] + if len(d_projects) > 0: + cur.execute( + cur.mogrify( + "DELETE FROM roles_projects WHERE role_id=%(role_id)s AND project_id IN %(project_ids)s", + {"role_id": role_id, "project_ids": tuple(d_projects)}) + ) + n_projects = [i for i in data.projects if i not in row["projects"]] + if len(n_projects) > 0: + cur.execute( + cur.mogrify( + f"""INSERT INTO roles_projects(role_id, project_id) + VALUES {",".join([f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(n_projects))])}""", + {"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(n_projects)}}) + ) + row["projects"] = data.projects + return helper.dict_to_camel_case(row) -def create(tenant_id, user_id, name, description, permissions): +def create(tenant_id, user_id, data: schemas_ee.RolePayloadSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - + if not data.all_projects and (data.projects is None or len(data.projects) == 0): + return {"errors": ["must specify a project or all projects"]} + if data.projects is not None and len(data.projects) > 0 and not data.all_projects: + data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id) with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions) - VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[]) + cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions, all_projects) + VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[], %(all_projects)s) RETURNING *;""", - {"tenant_id": tenant_id, "name": name, "description": description, "permissions": permissions}) + {"tenant_id": tenant_id, "name": data.name, "description": data.description, + "permissions": data.permissions, "all_projects": data.all_projects}) ) row = cur.fetchone() row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + if not data.all_projects: + role_id = row["role_id"] + cur.execute( + cur.mogrify(f"""INSERT INTO roles_projects(role_id, project_id) + VALUES {",".join(f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(data.projects)))};""", + {"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(data.projects)}}) + ) return helper.dict_to_camel_case(row) def get_roles(tenant_id): with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""SELECT * - FROM public.roles - where tenant_id =%(tenant_id)s - AND deleted_at IS NULL - ORDER BY role_id;""", + cur.mogrify("""SELECT roles.*, COALESCE(projects, '{}') AS projects + FROM public.roles + LEFT JOIN LATERAL (SELECT array_agg(project_id) AS projects + FROM roles_projects + INNER JOIN projects USING (project_id) + WHERE roles_projects.role_id = roles.role_id + AND projects.deleted_at ISNULL ) AS role_projects ON (TRUE) + WHERE tenant_id =%(tenant_id)s + AND deleted_at IS NULL + ORDER BY role_id;""", {"tenant_id": tenant_id}) ) rows = cur.fetchall() @@ -71,11 +118,10 @@ def get_role_by_name(tenant_id, name): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""SELECT * - FROM public.roles - where tenant_id =%(tenant_id)s - AND deleted_at IS NULL - AND name ILIKE %(name)s - ;""", + FROM public.roles + where tenant_id =%(tenant_id)s + AND deleted_at IS NULL + AND name ILIKE %(name)s;""", {"tenant_id": tenant_id, "name": name}) ) row = cur.fetchone() @@ -92,11 +138,11 @@ def delete(tenant_id, user_id, role_id): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""SELECT 1 - FROM public.roles - WHERE role_id = %(role_id)s - AND tenant_id = %(tenant_id)s - AND protected = TRUE - LIMIT 1;""", + FROM public.roles + WHERE role_id = %(role_id)s + AND tenant_id = %(tenant_id)s + AND protected = TRUE + LIMIT 1;""", {"tenant_id": tenant_id, "role_id": role_id}) ) if cur.fetchone() is not None: diff --git a/ee/api/chalicelib/core/users.py b/ee/api/chalicelib/core/users.py index 7838a68ac..ce5bcca5d 100644 --- a/ee/api/chalicelib/core/users.py +++ b/ee/api/chalicelib/core/users.py @@ -274,6 +274,7 @@ def get(user_id, tenant_id): role_id, roles.name AS role_name, roles.permissions, + roles.all_projects, basic_authentication.password IS NOT NULL AS has_password FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id LEFT JOIN public.roles USING (role_id) @@ -482,7 +483,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password): c = tenants.get_by_tenant_id(tenant_id) c.pop("createdAt") c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True) + stack_integrations=True, user_id=user_id) c["smtp"] = helper.has_smtp() c["iceServers"] = assist.get_ice_servers() return { @@ -510,7 +511,7 @@ def set_password_invitation(tenant_id, user_id, new_password): c = tenants.get_by_tenant_id(tenant_id) c.pop("createdAt") c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True) + stack_integrations=True, user_id=user_id) c["smtp"] = helper.has_smtp() c["iceServers"] = assist.get_ice_servers() return { @@ -735,3 +736,57 @@ def create_sso_user(tenant_id, email, admin, name, origin, role_id, internal_id= query ) return helper.dict_to_camel_case(cur.fetchone()) + + +def restore_sso_user(user_id, tenant_id, email, admin, name, origin, role_id, internal_id=None): + with pg_client.PostgresClient() as cur: + query = cur.mogrify(f"""\ + WITH u AS ( + UPDATE public.users + SET tenant_id= %(tenantId)s, + role= %(role)s, + name= %(name)s, + data= %(data)s, + origin= %(origin)s, + internal_id= %(internal_id)s, + role_id= %(role_id)s, + deleted_at= NULL, + created_at= default, + api_key= default, + jwt_iat= NULL, + appearance= default, + weekly_report= default + WHERE user_id = %(user_id)s + RETURNING * + ), + au AS ( + UPDATE public.basic_authentication + SET password= default, + generated_password= default, + invitation_token= default, + invited_at= default, + change_pwd_token= default, + change_pwd_expire_at= default, + changed_at= NULL + WHERE user_id = %(user_id)s + RETURNING user_id + ) + SELECT u.user_id AS id, + u.email, + u.role, + u.name, + TRUE AS change_password, + (CASE WHEN u.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin, + (CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin, + (CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member, + u.appearance, + origin + FROM u;""", + {"tenantId": tenant_id, "email": email, "internal_id": internal_id, + "role": "admin" if admin else "member", "name": name, "origin": origin, + "role_id": role_id, "data": json.dumps({"lastAnnouncementView": TimeUTC.now()}), + "user_id": user_id}) + cur.execute( + query + ) + return helper.dict_to_camel_case(cur.fetchone()) diff --git a/ee/api/chalicelib/utils/SAML2_helper.py b/ee/api/chalicelib/utils/SAML2_helper.py index a2a4e1e6e..c00081d2c 100644 --- a/ee/api/chalicelib/utils/SAML2_helper.py +++ b/ee/api/chalicelib/utils/SAML2_helper.py @@ -12,11 +12,11 @@ SAML2 = { "sp": { "entityId": config("SITE_URL") + "/api/sso/saml2/metadata/", "assertionConsumerService": { - "url": config("SITE_URL") + "/api/sso/saml2/acs", + "url": config("SITE_URL") + "/api/sso/saml2/acs/", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" }, "singleLogoutService": { - "url": config("SITE_URL") + "/api/sso/saml2/sls", + "url": config("SITE_URL") + "/api/sso/saml2/sls/", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress", @@ -25,6 +25,12 @@ SAML2 = { }, "idp": None } + +# in case tenantKey is included in the URL +sp_acs = config("idp_tenantKey", default="") +if sp_acs is not None and len(sp_acs) > 0: + SAML2["sp"]["assertionConsumerService"]["url"] += sp_acs + "/" + idp = None # SAML2 config handler if config("SAML2_MD_URL", default=None) is not None and len(config("SAML2_MD_URL")) > 0: @@ -60,12 +66,9 @@ else: def init_saml_auth(req): # auth = OneLogin_Saml2_Auth(req, custom_base_path=environ['SAML_PATH']) - if idp is None: raise Exception("No SAML2 config provided") - auth = OneLogin_Saml2_Auth(req, old_settings=SAML2) - - return auth + return OneLogin_Saml2_Auth(req, old_settings=SAML2) async def prepare_request(request: Request): @@ -86,12 +89,20 @@ async def prepare_request(request: Request): session = {} # If server is behind proxys or balancers use the HTTP_X_FORWARDED fields headers = request.headers - url_data = urlparse('%s://%s' % (headers.get('x-forwarded-proto', 'http'), headers['host'])) + proto = headers.get('x-forwarded-proto', 'http') + if headers.get('x-forwarded-proto') is not None: + print(f"x-forwarded-proto: {proto}") + url_data = urlparse('%s://%s' % (proto, headers['host'])) + path = request.url.path + # add / to /acs + if not path.endswith("/"): + path = path + '/' + return { - 'https': 'on' if request.headers.get('x-forwarded-proto', 'http') == 'https' else 'off', + 'https': 'on' if proto == 'https' else 'off', 'http_host': request.headers['host'], 'server_port': url_data.port, - 'script_name': "/api" + request.url.path, + 'script_name': "/api" + path, 'get_data': request.args.copy(), # Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144 # 'lowercase_urlencoding': True, diff --git a/ee/api/entrypoint.sh b/ee/api/entrypoint.sh new file mode 100755 index 000000000..a092737be --- /dev/null +++ b/ee/api/entrypoint.sh @@ -0,0 +1,2 @@ +#!/bin/bash +uvicorn app:app --host 0.0.0.0 --reload diff --git a/ee/api/routers/core_dynamic.py b/ee/api/routers/core_dynamic.py index a887237b3..cf5e7378f 100644 --- a/ee/api/routers/core_dynamic.py +++ b/ee/api/routers/core_dynamic.py @@ -8,7 +8,7 @@ import schemas import schemas_ee from chalicelib.core import integrations_manager from chalicelib.core import sessions -from chalicelib.core import tenants, users, metadata, projects, license, alerts, assist +from chalicelib.core import tenants, users, metadata, projects, license, assist from chalicelib.core import webhook from chalicelib.core.collaboration_slack import Slack from chalicelib.utils import captcha, SAML2_helper @@ -52,7 +52,7 @@ def login(data: schemas.UserLoginSchema = Body(...)): c = tenants.get_by_tenant_id(tenant_id) c.pop("createdAt") c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True, version=True) + stack_integrations=True, version=True, user_id=r["id"]) c["smtp"] = helper.has_smtp() c["iceServers"] = assist.get_ice_servers() r["smtp"] = c["smtp"] @@ -195,7 +195,8 @@ def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] = if key is None or value is None or len(value) == 0 and len(key) == 0: return {"errors": ["please provide a key&value for search"]} - if projectId is not None and not projects.is_authorized(project_id=projectId, tenant_id=context.tenant_id): + if projectId is not None and not projects.is_authorized(project_id=projectId, tenant_id=context.tenant_id, + user_id=context.user_id): return {"errors": ["unauthorized project"]} if len(value) == 0: return {"errors": ["please provide a value for search"]} @@ -213,13 +214,25 @@ def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)): } -@public_app.post('/alerts/notifications', tags=["alerts"]) -@public_app.put('/alerts/notifications', tags=["alerts"]) -def send_alerts_notifications(background_tasks: BackgroundTasks, data: schemas.AlertNotificationSchema = Body(...)): - # TODO: validate token - return {"data": alerts.process_notifications(data.notifications, background_tasks=background_tasks)} - - @public_app.get('/general_stats', tags=["private"], include_in_schema=False) def get_general_stats(): return {"data": {"sessions:": sessions.count_all()}} + + +@app.get('/client', tags=['projects']) +def get_client(context: schemas.CurrentContext = Depends(OR_context)): + r = tenants.get_by_tenant_id(context.tenant_id) + if r is not None: + r.pop("createdAt") + r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True, user_id=context.user_id) + return { + 'data': r + } + + +@app.get('/projects', tags=['projects']) +def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True, + stack_integrations=True, version=True, + last_tracker_version=last_tracker_version, user_id=context.user_id)} diff --git a/ee/api/routers/ee.py b/ee/api/routers/ee.py index 52ffaad8f..1a9589eaa 100644 --- a/ee/api/routers/ee.py +++ b/ee/api/routers/ee.py @@ -23,7 +23,7 @@ def get_roles(context: schemas.CurrentContext = Depends(OR_context)): @app.post('/client/roles', tags=["client", "roles"]) @app.put('/client/roles', tags=["client", "roles"]) def add_role(data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - data = roles.create(tenant_id=context.tenant_id, user_id=context.user_id, **data.dict()) + data = roles.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data) if "errors" in data: return data @@ -36,7 +36,7 @@ def add_role(data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.Cu @app.put('/client/roles/{roleId}', tags=["client", "roles"]) def edit_role(roleId: int, data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): - data = roles.update(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId, changes=data.dict()) + data = roles.update(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId, data=data) if "errors" in data: return data diff --git a/ee/api/routers/saml.py b/ee/api/routers/saml.py index c9f074324..ee0f0333b 100644 --- a/ee/api/routers/saml.py +++ b/ee/api/routers/saml.py @@ -16,6 +16,7 @@ from starlette import status @public_app.get("/sso/saml2", tags=["saml2"]) +@public_app.get("/sso/saml2/", tags=["saml2"]) async def start_sso(request: Request): request.path = '' req = await prepare_request(request=request) @@ -24,8 +25,8 @@ async def start_sso(request: Request): return RedirectResponse(url=sso_built_url) -# @public_app.post('/sso/saml2/acs', tags=["saml2"], content_types=['application/x-www-form-urlencoded']) @public_app.post('/sso/saml2/acs', tags=["saml2"]) +@public_app.post('/sso/saml2/acs/', tags=["saml2"]) async def process_sso_assertion(request: Request): req = await prepare_request(request=request) session = req["cookie"]["session"] @@ -44,6 +45,8 @@ async def process_sso_assertion(request: Request): user_data = auth.get_attributes() elif auth.get_settings().is_debug_active(): error_reason = auth.get_last_error_reason() + print("SAML2 error:") + print(error_reason) return {"errors": [error_reason]} email = auth.get_nameid() @@ -77,11 +80,102 @@ async def process_sso_assertion(request: Request): or admin_privileges[0].lower() == "false") if existing is None: - print("== new user ==") - users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges, - origin=SAML2_helper.get_saml2_provider(), - name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), - internal_id=internal_id, role_id=role["roleId"]) + deleted = users.get_deleted_user_by_email(auth.get_nameid()) + if deleted is not None: + print("== restore deleted user ==") + users.restore_sso_user(user_id=deleted["userId"], tenant_id=t['tenantId'], email=email, + admin=admin_privileges, origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) + else: + print("== new user ==") + users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges, + origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) + else: + if t['tenantId'] != existing["tenantId"]: + print("user exists for a different tenant") + return {"errors": ["user exists for a different tenant"]} + if existing.get("origin") is None: + print(f"== migrating user to {SAML2_helper.get_saml2_provider()} ==") + users.update(tenant_id=t['tenantId'], user_id=existing["id"], + changes={"origin": SAML2_helper.get_saml2_provider(), "internal_id": internal_id}) + expiration = auth.get_session_expiration() + expiration = expiration if expiration is not None and expiration > 10 * 60 \ + else int(config("sso_exp_delta_seconds", cast=int, default=24 * 60 * 60)) + jwt = users.authenticate_sso(email=email, internal_id=internal_id, exp=expiration) + if jwt is None: + return {"errors": ["null JWT"]} + return Response( + status_code=status.HTTP_302_FOUND, + headers={'Location': SAML2_helper.get_landing_URL(jwt)}) + + +@public_app.post('/sso/saml2/acs/{tenantKey}', tags=["saml2"]) +@public_app.post('/sso/saml2/acs/{tenantKey}/', tags=["saml2"]) +async def process_sso_assertion_tk(tenantKey: str, request: Request): + req = await prepare_request(request=request) + session = req["cookie"]["session"] + auth = init_saml_auth(req) + + request_id = None + if 'AuthNRequestID' in session: + request_id = session['AuthNRequestID'] + + auth.process_response(request_id=request_id) + errors = auth.get_errors() + user_data = {} + if len(errors) == 0: + if 'AuthNRequestID' in session: + del session['AuthNRequestID'] + user_data = auth.get_attributes() + elif auth.get_settings().is_debug_active(): + error_reason = auth.get_last_error_reason() + print("SAML2 error:") + print(error_reason) + return {"errors": [error_reason]} + + email = auth.get_nameid() + print("received nameId:") + print(email) + existing = users.get_by_email_only(auth.get_nameid()) + + internal_id = next(iter(user_data.get("internalId", [])), None) + + t = tenants.get_by_tenant_key(tenantKey) + if t is None: + print("invalid tenantKey, please copy the correct value from Preferences > Account") + return {"errors": ["invalid tenantKey, please copy the correct value from Preferences > Account"]} + print(user_data) + role_name = user_data.get("role", []) + if len(role_name) == 0: + print("No role specified, setting role to member") + role_name = ["member"] + role_name = role_name[0] + role = roles.get_role_by_name(tenant_id=t['tenantId'], name=role_name) + if role is None: + return {"errors": [f"role {role_name} not found, please create it in openreplay first"]} + + admin_privileges = user_data.get("adminPrivileges", []) + admin_privileges = not (len(admin_privileges) == 0 + or admin_privileges[0] is None + or admin_privileges[0].lower() == "false") + + if existing is None: + deleted = users.get_deleted_user_by_email(auth.get_nameid()) + if deleted is not None: + print("== restore deleted user ==") + users.restore_sso_user(user_id=deleted["userId"], tenant_id=t['tenantId'], email=email, + admin=admin_privileges, origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) + else: + print("== new user ==") + users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges, + origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) else: if t['tenantId'] != existing["tenantId"]: print("user exists for a different tenant") @@ -102,6 +196,7 @@ async def process_sso_assertion(request: Request): @public_app.get('/sso/saml2/sls', tags=["saml2"]) +@public_app.get('/sso/saml2/sls/', tags=["saml2"]) async def process_sls_assertion(request: Request): req = await prepare_request(request=request) session = req["cookie"]["session"] @@ -137,6 +232,7 @@ async def process_sls_assertion(request: Request): @public_app.get('/sso/saml2/metadata', tags=["saml2"]) +@public_app.get('/sso/saml2/metadata/', tags=["saml2"]) async def saml2_metadata(request: Request): req = await prepare_request(request=request) auth = init_saml_auth(req) diff --git a/ee/api/schemas_ee.py b/ee/api/schemas_ee.py index e278f3077..59a58f94b 100644 --- a/ee/api/schemas_ee.py +++ b/ee/api/schemas_ee.py @@ -9,6 +9,11 @@ class RolePayloadSchema(BaseModel): name: str = Field(...) description: Optional[str] = Field(None) permissions: List[str] = Field(...) + all_projects: bool = Field(True) + projects: List[int] = Field([]) + + class Config: + alias_generator = schemas.attribute_to_camel_case class CreateMemberSchema(schemas.CreateMemberSchema): diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql b/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql new file mode 100644 index 000000000..e259bdf69 --- /dev/null +++ b/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql @@ -0,0 +1,4 @@ +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source Nullable(String), + ADD COLUMN IF NOT EXISTS utm_medium Nullable(String), + ADD COLUMN IF NOT EXISTS utm_campaign Nullable(String); diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/clicks.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/clicks.sql index 1fd5b0dbd..7781d2328 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/clicks.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/clicks.sql @@ -1,21 +1,21 @@ -CREATE TABLE clicks +CREATE TABLE IF NOT EXISTS clicks ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - label String, - hesitation_time Nullable(UInt32) + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + label String, + hesitation_time Nullable(UInt32) ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/customs.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/customs.sql index 6d466a7a0..eed67c990 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/customs.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/customs.sql @@ -1,4 +1,4 @@ -CREATE TABLE customs +CREATE TABLE IF NOT EXISTS customs ( session_id UInt64, project_id UInt32, diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/errors.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/errors.sql index 51e2d806a..4560f6500 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/errors.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/errors.sql @@ -1,22 +1,23 @@ -CREATE TABLE errors ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - source Enum8('js_exception'=0, 'bugsnag'=1, 'cloudwatch'=2, 'datadog'=3, 'elasticsearch'=4, 'newrelic'=5, 'rollbar'=6, 'sentry'=7, 'stackdriver'=8, 'sumologic'=9), - name Nullable(String), - message String, - error_id String +CREATE TABLE IF NOT EXISTS errors +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + source Enum8('js_exception'=0, 'bugsnag'=1, 'cloudwatch'=2, 'datadog'=3, 'elasticsearch'=4, 'newrelic'=5, 'rollbar'=6, 'sentry'=7, 'stackdriver'=8, 'sumologic'=9), + name Nullable(String), + message String, + error_id String ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/inputs.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/inputs.sql index 4c369c7d5..523d2d468 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/inputs.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/inputs.sql @@ -1,20 +1,20 @@ -CREATE TABLE inputs +CREATE TABLE IF NOT EXISTS inputs ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - label String + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + label String ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/longtasks.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/longtasks.sql index a5622407b..9770fb380 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/longtasks.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/longtasks.sql @@ -1,26 +1,26 @@ -CREATE TABLE longtasks +CREATE TABLE IF NOT EXISTS longtasks ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - duration UInt16, - context Enum8('unknown'=0, 'self'=1, 'same-origin-ancestor'=2, 'same-origin-descendant'=3, 'same-origin'=4, 'cross-origin-ancestor'=5, 'cross-origin-descendant'=6, 'cross-origin-unreachable'=7, 'multiple-contexts'=8), - container_type Enum8('window'=0, 'iframe'=1, 'embed'=2, 'object'=3), - container_id String, - container_name String, - container_src String + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + duration UInt16, + context Enum8('unknown'=0, 'self'=1, 'same-origin-ancestor'=2, 'same-origin-descendant'=3, 'same-origin'=4, 'cross-origin-ancestor'=5, 'cross-origin-descendant'=6, 'cross-origin-unreachable'=7, 'multiple-contexts'=8), + container_type Enum8('window'=0, 'iframe'=1, 'embed'=2, 'object'=3), + container_id String, + container_name String, + container_src String ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_buffer.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_buffer.sql index 354e9f87f..ac67028b3 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_buffer.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_buffer.sql @@ -1,213 +1,215 @@ -CREATE TABLE negatives_buffer ( -sessionid UInt64, -clickevent_hesitationtime Nullable(UInt64), -clickevent_label Nullable(String), -clickevent_messageid Nullable(UInt64), -clickevent_timestamp Nullable(Datetime), -connectioninformation_downlink Nullable(UInt64), -connectioninformation_type Nullable(String), -consolelog_level Nullable(String), -consolelog_value Nullable(String), -cpuissue_duration Nullable(UInt64), -cpuissue_rate Nullable(UInt64), -cpuissue_timestamp Nullable(Datetime), -createdocument Nullable(UInt8), -createelementnode_id Nullable(UInt64), -createelementnode_parentid Nullable(UInt64), -cssdeleterule_index Nullable(UInt64), -cssdeleterule_stylesheetid Nullable(UInt64), -cssinsertrule_index Nullable(UInt64), -cssinsertrule_rule Nullable(String), -cssinsertrule_stylesheetid Nullable(UInt64), -customevent_messageid Nullable(UInt64), -customevent_name Nullable(String), -customevent_payload Nullable(String), -customevent_timestamp Nullable(Datetime), -domdrop_timestamp Nullable(Datetime), -errorevent_message Nullable(String), -errorevent_messageid Nullable(UInt64), -errorevent_name Nullable(String), -errorevent_payload Nullable(String), -errorevent_source Nullable(String), -errorevent_timestamp Nullable(Datetime), -fetch_duration Nullable(UInt64), -fetch_method Nullable(String), -fetch_request Nullable(String), -fetch_status Nullable(UInt64), -fetch_timestamp Nullable(Datetime), -fetch_url Nullable(String), -graphql_operationkind Nullable(String), -graphql_operationname Nullable(String), -graphql_response Nullable(String), -graphql_variables Nullable(String), -graphqlevent_messageid Nullable(UInt64), -graphqlevent_name Nullable(String), -graphqlevent_timestamp Nullable(Datetime), -inputevent_label Nullable(String), -inputevent_messageid Nullable(UInt64), -inputevent_timestamp Nullable(Datetime), -inputevent_value Nullable(String), -inputevent_valuemasked Nullable(UInt8), -jsexception_message Nullable(String), -jsexception_name Nullable(String), -jsexception_payload Nullable(String), -longtasks_timestamp Nullable(Datetime), -longtasks_duration Nullable(UInt64), -longtasks_containerid Nullable(String), -longtasks_containersrc Nullable(String), -memoryissue_duration Nullable(UInt64), -memoryissue_rate Nullable(UInt64), -memoryissue_timestamp Nullable(Datetime), -metadata_key Nullable(String), -metadata_value Nullable(String), -mobx_payload Nullable(String), -mobx_type Nullable(String), -mouseclick_id Nullable(UInt64), -mouseclick_hesitationtime Nullable(UInt64), -mouseclick_label Nullable(String), -mousemove_x Nullable(UInt64), -mousemove_y Nullable(UInt64), -movenode_id Nullable(UInt64), -movenode_index Nullable(UInt64), -movenode_parentid Nullable(UInt64), -ngrx_action Nullable(String), -ngrx_duration Nullable(UInt64), -ngrx_state Nullable(String), -pageevent_domcontentloadedeventend Nullable(UInt64), -pageevent_domcontentloadedeventstart Nullable(UInt64), -pageevent_firstcontentfulpaint Nullable(UInt64), -pageevent_firstpaint Nullable(UInt64), -pageevent_loaded Nullable(UInt8), -pageevent_loadeventend Nullable(UInt64), -pageevent_loadeventstart Nullable(UInt64), -pageevent_messageid Nullable(UInt64), -pageevent_referrer Nullable(String), -pageevent_requeststart Nullable(UInt64), -pageevent_responseend Nullable(UInt64), -pageevent_responsestart Nullable(UInt64), -pageevent_speedindex Nullable(UInt64), -pageevent_timestamp Nullable(Datetime), -pageevent_url Nullable(String), -pageloadtiming_domcontentloadedeventend Nullable(UInt64), -pageloadtiming_domcontentloadedeventstart Nullable(UInt64), -pageloadtiming_firstcontentfulpaint Nullable(UInt64), -pageloadtiming_firstpaint Nullable(UInt64), -pageloadtiming_loadeventend Nullable(UInt64), -pageloadtiming_loadeventstart Nullable(UInt64), -pageloadtiming_requeststart Nullable(UInt64), -pageloadtiming_responseend Nullable(UInt64), -pageloadtiming_responsestart Nullable(UInt64), -pagerendertiming_speedindex Nullable(UInt64), -pagerendertiming_timetointeractive Nullable(UInt64), -pagerendertiming_visuallycomplete Nullable(UInt64), -performancetrack_frames Nullable(Int64), -performancetrack_ticks Nullable(Int64), -performancetrack_totaljsheapsize Nullable(UInt64), -performancetrack_usedjsheapsize Nullable(UInt64), -performancetrackaggr_avgcpu Nullable(UInt64), -performancetrackaggr_avgfps Nullable(UInt64), -performancetrackaggr_avgtotaljsheapsize Nullable(UInt64), -performancetrackaggr_avgusedjsheapsize Nullable(UInt64), -performancetrackaggr_maxcpu Nullable(UInt64), -performancetrackaggr_maxfps Nullable(UInt64), -performancetrackaggr_maxtotaljsheapsize Nullable(UInt64), -performancetrackaggr_maxusedjsheapsize Nullable(UInt64), -performancetrackaggr_mincpu Nullable(UInt64), -performancetrackaggr_minfps Nullable(UInt64), -performancetrackaggr_mintotaljsheapsize Nullable(UInt64), -performancetrackaggr_minusedjsheapsize Nullable(UInt64), -performancetrackaggr_timestampend Nullable(Datetime), -performancetrackaggr_timestampstart Nullable(Datetime), -profiler_args Nullable(String), -profiler_duration Nullable(UInt64), -profiler_name Nullable(String), -profiler_result Nullable(String), -rawcustomevent_name Nullable(String), -rawcustomevent_payload Nullable(String), -rawerrorevent_message Nullable(String), -rawerrorevent_name Nullable(String), -rawerrorevent_payload Nullable(String), -rawerrorevent_source Nullable(String), -rawerrorevent_timestamp Nullable(Datetime), -redux_action Nullable(String), -redux_duration Nullable(UInt64), -redux_state Nullable(String), -removenode_id Nullable(UInt64), -removenodeattribute_id Nullable(UInt64), -removenodeattribute_name Nullable(String), -resourceevent_decodedbodysize Nullable(UInt64), -resourceevent_duration Nullable(UInt64), -resourceevent_encodedbodysize Nullable(UInt64), -resourceevent_headersize Nullable(UInt64), -resourceevent_messageid Nullable(UInt64), -resourceevent_method Nullable(String), -resourceevent_status Nullable(UInt64), -resourceevent_success Nullable(UInt8), -resourceevent_timestamp Nullable(Datetime), -resourceevent_ttfb Nullable(UInt64), -resourceevent_type Nullable(String), -resourceevent_url Nullable(String), -resourcetiming_decodedbodysize Nullable(UInt64), -resourcetiming_duration Nullable(UInt64), -resourcetiming_encodedbodysize Nullable(UInt64), -resourcetiming_headersize Nullable(UInt64), -resourcetiming_initiator Nullable(String), -resourcetiming_timestamp Nullable(Datetime), -resourcetiming_ttfb Nullable(UInt64), -resourcetiming_url Nullable(String), -sessiondisconnect Nullable(UInt8), -sessiondisconnect_timestamp Nullable(Datetime), -sessionend Nullable(UInt8), -sessionend_timestamp Nullable(Datetime), -sessionstart_projectid Nullable(UInt64), -sessionstart_revid Nullable(String), -sessionstart_timestamp Nullable(Datetime), -sessionstart_trackerversion Nullable(String), -sessionstart_useragent Nullable(String), -sessionstart_userbrowser Nullable(String), -sessionstart_userbrowserversion Nullable(String), -sessionstart_usercountry Nullable(String), -sessionstart_userdevice Nullable(String), -sessionstart_userdeviceheapsize Nullable(UInt64), -sessionstart_userdevicememorysize Nullable(UInt64), -sessionstart_userdevicetype Nullable(String), -sessionstart_useros Nullable(String), -sessionstart_userosversion Nullable(String), -sessionstart_useruuid Nullable(String), -setcssdata_data Nullable(UInt64), -setcssdata_id Nullable(UInt64), -setinputchecked_checked Nullable(UInt64), -setinputchecked_id Nullable(UInt64), -setinputtarget_id Nullable(UInt64), -setinputtarget_label Nullable(UInt64), -setinputvalue_id Nullable(UInt64), -setinputvalue_mask Nullable(UInt64), -setinputvalue_value Nullable(UInt64), -setnodeattribute_id Nullable(UInt64), -setnodeattribute_name Nullable(UInt64), -setnodeattribute_value Nullable(UInt64), -setnodedata_data Nullable(UInt64), -setnodedata_id Nullable(UInt64), -setnodescroll_id Nullable(UInt64), -setnodescroll_x Nullable(Int64), -setnodescroll_y Nullable(Int64), -setpagelocation_navigationstart Nullable(UInt64), -setpagelocation_referrer Nullable(String), -setpagelocation_url Nullable(String), -setpagevisibility_hidden Nullable(UInt8), -setviewportscroll_x Nullable(Int64), -setviewportscroll_y Nullable(Int64), -setviewportsize_height Nullable(UInt64), -setviewportsize_width Nullable(UInt64), -stateaction_type Nullable(String), -stateactionevent_messageid Nullable(UInt64), -stateactionevent_timestamp Nullable(Datetime), -stateactionevent_type Nullable(String), -timestamp_timestamp Nullable(Datetime), -useranonymousid_id Nullable(String), -userid_id Nullable(String), -vuex_mutation Nullable(String), -vuex_state Nullable(String), -received_at Datetime, -batch_order_number Int64) -ENGINE = Buffer(default, negatives, 16, 10, 120, 10000, 1000000, 10000, 100000000); +CREATE TABLE IF NOT EXISTS negatives_buffer +( + sessionid UInt64, + clickevent_hesitationtime Nullable(UInt64), + clickevent_label Nullable(String), + clickevent_messageid Nullable(UInt64), + clickevent_timestamp Nullable(Datetime), + connectioninformation_downlink Nullable(UInt64), + connectioninformation_type Nullable(String), + consolelog_level Nullable(String), + consolelog_value Nullable(String), + cpuissue_duration Nullable(UInt64), + cpuissue_rate Nullable(UInt64), + cpuissue_timestamp Nullable(Datetime), + createdocument Nullable(UInt8), + createelementnode_id Nullable(UInt64), + createelementnode_parentid Nullable(UInt64), + cssdeleterule_index Nullable(UInt64), + cssdeleterule_stylesheetid Nullable(UInt64), + cssinsertrule_index Nullable(UInt64), + cssinsertrule_rule Nullable(String), + cssinsertrule_stylesheetid Nullable(UInt64), + customevent_messageid Nullable(UInt64), + customevent_name Nullable(String), + customevent_payload Nullable(String), + customevent_timestamp Nullable(Datetime), + domdrop_timestamp Nullable(Datetime), + errorevent_message Nullable(String), + errorevent_messageid Nullable(UInt64), + errorevent_name Nullable(String), + errorevent_payload Nullable(String), + errorevent_source Nullable(String), + errorevent_timestamp Nullable(Datetime), + fetch_duration Nullable(UInt64), + fetch_method Nullable(String), + fetch_request Nullable(String), + fetch_status Nullable(UInt64), + fetch_timestamp Nullable(Datetime), + fetch_url Nullable(String), + graphql_operationkind Nullable(String), + graphql_operationname Nullable(String), + graphql_response Nullable(String), + graphql_variables Nullable(String), + graphqlevent_messageid Nullable(UInt64), + graphqlevent_name Nullable(String), + graphqlevent_timestamp Nullable(Datetime), + inputevent_label Nullable(String), + inputevent_messageid Nullable(UInt64), + inputevent_timestamp Nullable(Datetime), + inputevent_value Nullable(String), + inputevent_valuemasked Nullable(UInt8), + jsexception_message Nullable(String), + jsexception_name Nullable(String), + jsexception_payload Nullable(String), + longtasks_timestamp Nullable(Datetime), + longtasks_duration Nullable(UInt64), + longtasks_containerid Nullable(String), + longtasks_containersrc Nullable(String), + memoryissue_duration Nullable(UInt64), + memoryissue_rate Nullable(UInt64), + memoryissue_timestamp Nullable(Datetime), + metadata_key Nullable(String), + metadata_value Nullable(String), + mobx_payload Nullable(String), + mobx_type Nullable(String), + mouseclick_id Nullable(UInt64), + mouseclick_hesitationtime Nullable(UInt64), + mouseclick_label Nullable(String), + mousemove_x Nullable(UInt64), + mousemove_y Nullable(UInt64), + movenode_id Nullable(UInt64), + movenode_index Nullable(UInt64), + movenode_parentid Nullable(UInt64), + ngrx_action Nullable(String), + ngrx_duration Nullable(UInt64), + ngrx_state Nullable(String), + pageevent_domcontentloadedeventend Nullable(UInt64), + pageevent_domcontentloadedeventstart Nullable(UInt64), + pageevent_firstcontentfulpaint Nullable(UInt64), + pageevent_firstpaint Nullable(UInt64), + pageevent_loaded Nullable(UInt8), + pageevent_loadeventend Nullable(UInt64), + pageevent_loadeventstart Nullable(UInt64), + pageevent_messageid Nullable(UInt64), + pageevent_referrer Nullable(String), + pageevent_requeststart Nullable(UInt64), + pageevent_responseend Nullable(UInt64), + pageevent_responsestart Nullable(UInt64), + pageevent_speedindex Nullable(UInt64), + pageevent_timestamp Nullable(Datetime), + pageevent_url Nullable(String), + pageloadtiming_domcontentloadedeventend Nullable(UInt64), + pageloadtiming_domcontentloadedeventstart Nullable(UInt64), + pageloadtiming_firstcontentfulpaint Nullable(UInt64), + pageloadtiming_firstpaint Nullable(UInt64), + pageloadtiming_loadeventend Nullable(UInt64), + pageloadtiming_loadeventstart Nullable(UInt64), + pageloadtiming_requeststart Nullable(UInt64), + pageloadtiming_responseend Nullable(UInt64), + pageloadtiming_responsestart Nullable(UInt64), + pagerendertiming_speedindex Nullable(UInt64), + pagerendertiming_timetointeractive Nullable(UInt64), + pagerendertiming_visuallycomplete Nullable(UInt64), + performancetrack_frames Nullable(Int64), + performancetrack_ticks Nullable(Int64), + performancetrack_totaljsheapsize Nullable(UInt64), + performancetrack_usedjsheapsize Nullable(UInt64), + performancetrackaggr_avgcpu Nullable(UInt64), + performancetrackaggr_avgfps Nullable(UInt64), + performancetrackaggr_avgtotaljsheapsize Nullable(UInt64), + performancetrackaggr_avgusedjsheapsize Nullable(UInt64), + performancetrackaggr_maxcpu Nullable(UInt64), + performancetrackaggr_maxfps Nullable(UInt64), + performancetrackaggr_maxtotaljsheapsize Nullable(UInt64), + performancetrackaggr_maxusedjsheapsize Nullable(UInt64), + performancetrackaggr_mincpu Nullable(UInt64), + performancetrackaggr_minfps Nullable(UInt64), + performancetrackaggr_mintotaljsheapsize Nullable(UInt64), + performancetrackaggr_minusedjsheapsize Nullable(UInt64), + performancetrackaggr_timestampend Nullable(Datetime), + performancetrackaggr_timestampstart Nullable(Datetime), + profiler_args Nullable(String), + profiler_duration Nullable(UInt64), + profiler_name Nullable(String), + profiler_result Nullable(String), + rawcustomevent_name Nullable(String), + rawcustomevent_payload Nullable(String), + rawerrorevent_message Nullable(String), + rawerrorevent_name Nullable(String), + rawerrorevent_payload Nullable(String), + rawerrorevent_source Nullable(String), + rawerrorevent_timestamp Nullable(Datetime), + redux_action Nullable(String), + redux_duration Nullable(UInt64), + redux_state Nullable(String), + removenode_id Nullable(UInt64), + removenodeattribute_id Nullable(UInt64), + removenodeattribute_name Nullable(String), + resourceevent_decodedbodysize Nullable(UInt64), + resourceevent_duration Nullable(UInt64), + resourceevent_encodedbodysize Nullable(UInt64), + resourceevent_headersize Nullable(UInt64), + resourceevent_messageid Nullable(UInt64), + resourceevent_method Nullable(String), + resourceevent_status Nullable(UInt64), + resourceevent_success Nullable(UInt8), + resourceevent_timestamp Nullable(Datetime), + resourceevent_ttfb Nullable(UInt64), + resourceevent_type Nullable(String), + resourceevent_url Nullable(String), + resourcetiming_decodedbodysize Nullable(UInt64), + resourcetiming_duration Nullable(UInt64), + resourcetiming_encodedbodysize Nullable(UInt64), + resourcetiming_headersize Nullable(UInt64), + resourcetiming_initiator Nullable(String), + resourcetiming_timestamp Nullable(Datetime), + resourcetiming_ttfb Nullable(UInt64), + resourcetiming_url Nullable(String), + sessiondisconnect Nullable(UInt8), + sessiondisconnect_timestamp Nullable(Datetime), + sessionend Nullable(UInt8), + sessionend_timestamp Nullable(Datetime), + sessionstart_projectid Nullable(UInt64), + sessionstart_revid Nullable(String), + sessionstart_timestamp Nullable(Datetime), + sessionstart_trackerversion Nullable(String), + sessionstart_useragent Nullable(String), + sessionstart_userbrowser Nullable(String), + sessionstart_userbrowserversion Nullable(String), + sessionstart_usercountry Nullable(String), + sessionstart_userdevice Nullable(String), + sessionstart_userdeviceheapsize Nullable(UInt64), + sessionstart_userdevicememorysize Nullable(UInt64), + sessionstart_userdevicetype Nullable(String), + sessionstart_useros Nullable(String), + sessionstart_userosversion Nullable(String), + sessionstart_useruuid Nullable(String), + setcssdata_data Nullable(UInt64), + setcssdata_id Nullable(UInt64), + setinputchecked_checked Nullable(UInt64), + setinputchecked_id Nullable(UInt64), + setinputtarget_id Nullable(UInt64), + setinputtarget_label Nullable(UInt64), + setinputvalue_id Nullable(UInt64), + setinputvalue_mask Nullable(UInt64), + setinputvalue_value Nullable(UInt64), + setnodeattribute_id Nullable(UInt64), + setnodeattribute_name Nullable(UInt64), + setnodeattribute_value Nullable(UInt64), + setnodedata_data Nullable(UInt64), + setnodedata_id Nullable(UInt64), + setnodescroll_id Nullable(UInt64), + setnodescroll_x Nullable(Int64), + setnodescroll_y Nullable(Int64), + setpagelocation_navigationstart Nullable(UInt64), + setpagelocation_referrer Nullable(String), + setpagelocation_url Nullable(String), + setpagevisibility_hidden Nullable(UInt8), + setviewportscroll_x Nullable(Int64), + setviewportscroll_y Nullable(Int64), + setviewportsize_height Nullable(UInt64), + setviewportsize_width Nullable(UInt64), + stateaction_type Nullable(String), + stateactionevent_messageid Nullable(UInt64), + stateactionevent_timestamp Nullable(Datetime), + stateactionevent_type Nullable(String), + timestamp_timestamp Nullable(Datetime), + useranonymousid_id Nullable(String), + userid_id Nullable(String), + vuex_mutation Nullable(String), + vuex_state Nullable(String), + received_at Datetime, + batch_order_number Int64 +) + ENGINE = Buffer(default, negatives, 16, 10, 120, 10000, 1000000, 10000, 100000000); diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_creation_clickhouse.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_creation_clickhouse.sql index 7b69a63cb..361082d7c 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_creation_clickhouse.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/negatives_creation_clickhouse.sql @@ -1,216 +1,218 @@ -create table negatives ( -sessionid UInt64, -clickevent_hesitationtime Nullable(UInt64), -clickevent_label Nullable(String), -clickevent_messageid Nullable(UInt64), -clickevent_timestamp Nullable(Datetime), -connectioninformation_downlink Nullable(UInt64), -connectioninformation_type Nullable(String), -consolelog_level Nullable(String), -consolelog_value Nullable(String), -cpuissue_duration Nullable(UInt64), -cpuissue_rate Nullable(UInt64), -cpuissue_timestamp Nullable(Datetime), -createdocument Nullable(UInt8), -createelementnode_id Nullable(UInt64), -createelementnode_parentid Nullable(UInt64), -cssdeleterule_index Nullable(UInt64), -cssdeleterule_stylesheetid Nullable(UInt64), -cssinsertrule_index Nullable(UInt64), -cssinsertrule_rule Nullable(String), -cssinsertrule_stylesheetid Nullable(UInt64), -customevent_messageid Nullable(UInt64), -customevent_name Nullable(String), -customevent_payload Nullable(String), -customevent_timestamp Nullable(Datetime), -domdrop_timestamp Nullable(Datetime), -errorevent_message Nullable(String), -errorevent_messageid Nullable(UInt64), -errorevent_name Nullable(String), -errorevent_payload Nullable(String), -errorevent_source Nullable(String), -errorevent_timestamp Nullable(Datetime), -fetch_duration Nullable(UInt64), -fetch_method Nullable(String), -fetch_request Nullable(String), -fetch_status Nullable(UInt64), -fetch_timestamp Nullable(Datetime), -fetch_url Nullable(String), -graphql_operationkind Nullable(String), -graphql_operationname Nullable(String), -graphql_response Nullable(String), -graphql_variables Nullable(String), -graphqlevent_messageid Nullable(UInt64), -graphqlevent_name Nullable(String), -graphqlevent_timestamp Nullable(Datetime), -inputevent_label Nullable(String), -inputevent_messageid Nullable(UInt64), -inputevent_timestamp Nullable(Datetime), -inputevent_value Nullable(String), -inputevent_valuemasked Nullable(UInt8), -jsexception_message Nullable(String), -jsexception_name Nullable(String), -jsexception_payload Nullable(String), -longtasks_timestamp Nullable(Datetime), -longtasks_duration Nullable(UInt64), -longtasks_containerid Nullable(String), -longtasks_containersrc Nullable(String), -memoryissue_duration Nullable(UInt64), -memoryissue_rate Nullable(UInt64), -memoryissue_timestamp Nullable(Datetime), -metadata_key Nullable(String), -metadata_value Nullable(String), -mobx_payload Nullable(String), -mobx_type Nullable(String), -mouseclick_id Nullable(UInt64), -mouseclick_hesitationtime Nullable(UInt64), -mouseclick_label Nullable(String), -mousemove_x Nullable(UInt64), -mousemove_y Nullable(UInt64), -movenode_id Nullable(UInt64), -movenode_index Nullable(UInt64), -movenode_parentid Nullable(UInt64), -ngrx_action Nullable(String), -ngrx_duration Nullable(UInt64), -ngrx_state Nullable(String), -pageevent_domcontentloadedeventend Nullable(UInt64), -pageevent_domcontentloadedeventstart Nullable(UInt64), -pageevent_firstcontentfulpaint Nullable(UInt64), -pageevent_firstpaint Nullable(UInt64), -pageevent_loaded Nullable(UInt8), -pageevent_loadeventend Nullable(UInt64), -pageevent_loadeventstart Nullable(UInt64), -pageevent_messageid Nullable(UInt64), -pageevent_referrer Nullable(String), -pageevent_requeststart Nullable(UInt64), -pageevent_responseend Nullable(UInt64), -pageevent_responsestart Nullable(UInt64), -pageevent_speedindex Nullable(UInt64), -pageevent_timestamp Nullable(Datetime), -pageevent_url Nullable(String), -pageloadtiming_domcontentloadedeventend Nullable(UInt64), -pageloadtiming_domcontentloadedeventstart Nullable(UInt64), -pageloadtiming_firstcontentfulpaint Nullable(UInt64), -pageloadtiming_firstpaint Nullable(UInt64), -pageloadtiming_loadeventend Nullable(UInt64), -pageloadtiming_loadeventstart Nullable(UInt64), -pageloadtiming_requeststart Nullable(UInt64), -pageloadtiming_responseend Nullable(UInt64), -pageloadtiming_responsestart Nullable(UInt64), -pagerendertiming_speedindex Nullable(UInt64), -pagerendertiming_timetointeractive Nullable(UInt64), -pagerendertiming_visuallycomplete Nullable(UInt64), -performancetrack_frames Nullable(Int64), -performancetrack_ticks Nullable(Int64), -performancetrack_totaljsheapsize Nullable(UInt64), -performancetrack_usedjsheapsize Nullable(UInt64), -performancetrackaggr_avgcpu Nullable(UInt64), -performancetrackaggr_avgfps Nullable(UInt64), -performancetrackaggr_avgtotaljsheapsize Nullable(UInt64), -performancetrackaggr_avgusedjsheapsize Nullable(UInt64), -performancetrackaggr_maxcpu Nullable(UInt64), -performancetrackaggr_maxfps Nullable(UInt64), -performancetrackaggr_maxtotaljsheapsize Nullable(UInt64), -performancetrackaggr_maxusedjsheapsize Nullable(UInt64), -performancetrackaggr_mincpu Nullable(UInt64), -performancetrackaggr_minfps Nullable(UInt64), -performancetrackaggr_mintotaljsheapsize Nullable(UInt64), -performancetrackaggr_minusedjsheapsize Nullable(UInt64), -performancetrackaggr_timestampend Nullable(Datetime), -performancetrackaggr_timestampstart Nullable(Datetime), -profiler_args Nullable(String), -profiler_duration Nullable(UInt64), -profiler_name Nullable(String), -profiler_result Nullable(String), -rawcustomevent_name Nullable(String), -rawcustomevent_payload Nullable(String), -rawerrorevent_message Nullable(String), -rawerrorevent_name Nullable(String), -rawerrorevent_payload Nullable(String), -rawerrorevent_source Nullable(String), -rawerrorevent_timestamp Nullable(Datetime), -redux_action Nullable(String), -redux_duration Nullable(UInt64), -redux_state Nullable(String), -removenode_id Nullable(UInt64), -removenodeattribute_id Nullable(UInt64), -removenodeattribute_name Nullable(String), -resourceevent_decodedbodysize Nullable(UInt64), -resourceevent_duration Nullable(UInt64), -resourceevent_encodedbodysize Nullable(UInt64), -resourceevent_headersize Nullable(UInt64), -resourceevent_messageid Nullable(UInt64), -resourceevent_method Nullable(String), -resourceevent_status Nullable(UInt64), -resourceevent_success Nullable(UInt8), -resourceevent_timestamp Nullable(Datetime), -resourceevent_ttfb Nullable(UInt64), -resourceevent_type Nullable(String), -resourceevent_url Nullable(String), -resourcetiming_decodedbodysize Nullable(UInt64), -resourcetiming_duration Nullable(UInt64), -resourcetiming_encodedbodysize Nullable(UInt64), -resourcetiming_headersize Nullable(UInt64), -resourcetiming_initiator Nullable(String), -resourcetiming_timestamp Nullable(Datetime), -resourcetiming_ttfb Nullable(UInt64), -resourcetiming_url Nullable(String), -sessiondisconnect Nullable(UInt8), -sessiondisconnect_timestamp Nullable(Datetime), -sessionend Nullable(UInt8), -sessionend_timestamp Nullable(Datetime), -sessionstart_projectid Nullable(UInt64), -sessionstart_revid Nullable(String), -sessionstart_timestamp Nullable(Datetime), -sessionstart_trackerversion Nullable(String), -sessionstart_useragent Nullable(String), -sessionstart_userbrowser Nullable(String), -sessionstart_userbrowserversion Nullable(String), -sessionstart_usercountry Nullable(String), -sessionstart_userdevice Nullable(String), -sessionstart_userdeviceheapsize Nullable(UInt64), -sessionstart_userdevicememorysize Nullable(UInt64), -sessionstart_userdevicetype Nullable(String), -sessionstart_useros Nullable(String), -sessionstart_userosversion Nullable(String), -sessionstart_useruuid Nullable(String), -setcssdata_data Nullable(UInt64), -setcssdata_id Nullable(UInt64), -setinputchecked_checked Nullable(UInt64), -setinputchecked_id Nullable(UInt64), -setinputtarget_id Nullable(UInt64), -setinputtarget_label Nullable(UInt64), -setinputvalue_id Nullable(UInt64), -setinputvalue_mask Nullable(UInt64), -setinputvalue_value Nullable(UInt64), -setnodeattribute_id Nullable(UInt64), -setnodeattribute_name Nullable(UInt64), -setnodeattribute_value Nullable(UInt64), -setnodedata_data Nullable(UInt64), -setnodedata_id Nullable(UInt64), -setnodescroll_id Nullable(UInt64), -setnodescroll_x Nullable(Int64), -setnodescroll_y Nullable(Int64), -setpagelocation_navigationstart Nullable(UInt64), -setpagelocation_referrer Nullable(String), -setpagelocation_url Nullable(String), -setpagevisibility_hidden Nullable(UInt8), -setviewportscroll_x Nullable(Int64), -setviewportscroll_y Nullable(Int64), -setviewportsize_height Nullable(UInt64), -setviewportsize_width Nullable(UInt64), -stateaction_type Nullable(String), -stateactionevent_messageid Nullable(UInt64), -stateactionevent_timestamp Nullable(Datetime), -stateactionevent_type Nullable(String), -timestamp_timestamp Nullable(Datetime), -useranonymousid_id Nullable(String), -userid_id Nullable(String), -vuex_mutation Nullable(String), -vuex_state Nullable(String), -received_at Datetime, -batch_order_number Int64) -ENGINE = MergeTree() -PARTITION BY toYYYYMM(received_at) -ORDER BY (received_at, batch_order_number) -SETTINGS min_bytes_for_wide_part=1, use_minimalistic_part_header_in_zookeeper=1; \ No newline at end of file +CREATE TABLE IF NOT EXISTS negatives +( + sessionid UInt64, + clickevent_hesitationtime Nullable(UInt64), + clickevent_label Nullable(String), + clickevent_messageid Nullable(UInt64), + clickevent_timestamp Nullable(Datetime), + connectioninformation_downlink Nullable(UInt64), + connectioninformation_type Nullable(String), + consolelog_level Nullable(String), + consolelog_value Nullable(String), + cpuissue_duration Nullable(UInt64), + cpuissue_rate Nullable(UInt64), + cpuissue_timestamp Nullable(Datetime), + createdocument Nullable(UInt8), + createelementnode_id Nullable(UInt64), + createelementnode_parentid Nullable(UInt64), + cssdeleterule_index Nullable(UInt64), + cssdeleterule_stylesheetid Nullable(UInt64), + cssinsertrule_index Nullable(UInt64), + cssinsertrule_rule Nullable(String), + cssinsertrule_stylesheetid Nullable(UInt64), + customevent_messageid Nullable(UInt64), + customevent_name Nullable(String), + customevent_payload Nullable(String), + customevent_timestamp Nullable(Datetime), + domdrop_timestamp Nullable(Datetime), + errorevent_message Nullable(String), + errorevent_messageid Nullable(UInt64), + errorevent_name Nullable(String), + errorevent_payload Nullable(String), + errorevent_source Nullable(String), + errorevent_timestamp Nullable(Datetime), + fetch_duration Nullable(UInt64), + fetch_method Nullable(String), + fetch_request Nullable(String), + fetch_status Nullable(UInt64), + fetch_timestamp Nullable(Datetime), + fetch_url Nullable(String), + graphql_operationkind Nullable(String), + graphql_operationname Nullable(String), + graphql_response Nullable(String), + graphql_variables Nullable(String), + graphqlevent_messageid Nullable(UInt64), + graphqlevent_name Nullable(String), + graphqlevent_timestamp Nullable(Datetime), + inputevent_label Nullable(String), + inputevent_messageid Nullable(UInt64), + inputevent_timestamp Nullable(Datetime), + inputevent_value Nullable(String), + inputevent_valuemasked Nullable(UInt8), + jsexception_message Nullable(String), + jsexception_name Nullable(String), + jsexception_payload Nullable(String), + longtasks_timestamp Nullable(Datetime), + longtasks_duration Nullable(UInt64), + longtasks_containerid Nullable(String), + longtasks_containersrc Nullable(String), + memoryissue_duration Nullable(UInt64), + memoryissue_rate Nullable(UInt64), + memoryissue_timestamp Nullable(Datetime), + metadata_key Nullable(String), + metadata_value Nullable(String), + mobx_payload Nullable(String), + mobx_type Nullable(String), + mouseclick_id Nullable(UInt64), + mouseclick_hesitationtime Nullable(UInt64), + mouseclick_label Nullable(String), + mousemove_x Nullable(UInt64), + mousemove_y Nullable(UInt64), + movenode_id Nullable(UInt64), + movenode_index Nullable(UInt64), + movenode_parentid Nullable(UInt64), + ngrx_action Nullable(String), + ngrx_duration Nullable(UInt64), + ngrx_state Nullable(String), + pageevent_domcontentloadedeventend Nullable(UInt64), + pageevent_domcontentloadedeventstart Nullable(UInt64), + pageevent_firstcontentfulpaint Nullable(UInt64), + pageevent_firstpaint Nullable(UInt64), + pageevent_loaded Nullable(UInt8), + pageevent_loadeventend Nullable(UInt64), + pageevent_loadeventstart Nullable(UInt64), + pageevent_messageid Nullable(UInt64), + pageevent_referrer Nullable(String), + pageevent_requeststart Nullable(UInt64), + pageevent_responseend Nullable(UInt64), + pageevent_responsestart Nullable(UInt64), + pageevent_speedindex Nullable(UInt64), + pageevent_timestamp Nullable(Datetime), + pageevent_url Nullable(String), + pageloadtiming_domcontentloadedeventend Nullable(UInt64), + pageloadtiming_domcontentloadedeventstart Nullable(UInt64), + pageloadtiming_firstcontentfulpaint Nullable(UInt64), + pageloadtiming_firstpaint Nullable(UInt64), + pageloadtiming_loadeventend Nullable(UInt64), + pageloadtiming_loadeventstart Nullable(UInt64), + pageloadtiming_requeststart Nullable(UInt64), + pageloadtiming_responseend Nullable(UInt64), + pageloadtiming_responsestart Nullable(UInt64), + pagerendertiming_speedindex Nullable(UInt64), + pagerendertiming_timetointeractive Nullable(UInt64), + pagerendertiming_visuallycomplete Nullable(UInt64), + performancetrack_frames Nullable(Int64), + performancetrack_ticks Nullable(Int64), + performancetrack_totaljsheapsize Nullable(UInt64), + performancetrack_usedjsheapsize Nullable(UInt64), + performancetrackaggr_avgcpu Nullable(UInt64), + performancetrackaggr_avgfps Nullable(UInt64), + performancetrackaggr_avgtotaljsheapsize Nullable(UInt64), + performancetrackaggr_avgusedjsheapsize Nullable(UInt64), + performancetrackaggr_maxcpu Nullable(UInt64), + performancetrackaggr_maxfps Nullable(UInt64), + performancetrackaggr_maxtotaljsheapsize Nullable(UInt64), + performancetrackaggr_maxusedjsheapsize Nullable(UInt64), + performancetrackaggr_mincpu Nullable(UInt64), + performancetrackaggr_minfps Nullable(UInt64), + performancetrackaggr_mintotaljsheapsize Nullable(UInt64), + performancetrackaggr_minusedjsheapsize Nullable(UInt64), + performancetrackaggr_timestampend Nullable(Datetime), + performancetrackaggr_timestampstart Nullable(Datetime), + profiler_args Nullable(String), + profiler_duration Nullable(UInt64), + profiler_name Nullable(String), + profiler_result Nullable(String), + rawcustomevent_name Nullable(String), + rawcustomevent_payload Nullable(String), + rawerrorevent_message Nullable(String), + rawerrorevent_name Nullable(String), + rawerrorevent_payload Nullable(String), + rawerrorevent_source Nullable(String), + rawerrorevent_timestamp Nullable(Datetime), + redux_action Nullable(String), + redux_duration Nullable(UInt64), + redux_state Nullable(String), + removenode_id Nullable(UInt64), + removenodeattribute_id Nullable(UInt64), + removenodeattribute_name Nullable(String), + resourceevent_decodedbodysize Nullable(UInt64), + resourceevent_duration Nullable(UInt64), + resourceevent_encodedbodysize Nullable(UInt64), + resourceevent_headersize Nullable(UInt64), + resourceevent_messageid Nullable(UInt64), + resourceevent_method Nullable(String), + resourceevent_status Nullable(UInt64), + resourceevent_success Nullable(UInt8), + resourceevent_timestamp Nullable(Datetime), + resourceevent_ttfb Nullable(UInt64), + resourceevent_type Nullable(String), + resourceevent_url Nullable(String), + resourcetiming_decodedbodysize Nullable(UInt64), + resourcetiming_duration Nullable(UInt64), + resourcetiming_encodedbodysize Nullable(UInt64), + resourcetiming_headersize Nullable(UInt64), + resourcetiming_initiator Nullable(String), + resourcetiming_timestamp Nullable(Datetime), + resourcetiming_ttfb Nullable(UInt64), + resourcetiming_url Nullable(String), + sessiondisconnect Nullable(UInt8), + sessiondisconnect_timestamp Nullable(Datetime), + sessionend Nullable(UInt8), + sessionend_timestamp Nullable(Datetime), + sessionstart_projectid Nullable(UInt64), + sessionstart_revid Nullable(String), + sessionstart_timestamp Nullable(Datetime), + sessionstart_trackerversion Nullable(String), + sessionstart_useragent Nullable(String), + sessionstart_userbrowser Nullable(String), + sessionstart_userbrowserversion Nullable(String), + sessionstart_usercountry Nullable(String), + sessionstart_userdevice Nullable(String), + sessionstart_userdeviceheapsize Nullable(UInt64), + sessionstart_userdevicememorysize Nullable(UInt64), + sessionstart_userdevicetype Nullable(String), + sessionstart_useros Nullable(String), + sessionstart_userosversion Nullable(String), + sessionstart_useruuid Nullable(String), + setcssdata_data Nullable(UInt64), + setcssdata_id Nullable(UInt64), + setinputchecked_checked Nullable(UInt64), + setinputchecked_id Nullable(UInt64), + setinputtarget_id Nullable(UInt64), + setinputtarget_label Nullable(UInt64), + setinputvalue_id Nullable(UInt64), + setinputvalue_mask Nullable(UInt64), + setinputvalue_value Nullable(UInt64), + setnodeattribute_id Nullable(UInt64), + setnodeattribute_name Nullable(UInt64), + setnodeattribute_value Nullable(UInt64), + setnodedata_data Nullable(UInt64), + setnodedata_id Nullable(UInt64), + setnodescroll_id Nullable(UInt64), + setnodescroll_x Nullable(Int64), + setnodescroll_y Nullable(Int64), + setpagelocation_navigationstart Nullable(UInt64), + setpagelocation_referrer Nullable(String), + setpagelocation_url Nullable(String), + setpagevisibility_hidden Nullable(UInt8), + setviewportscroll_x Nullable(Int64), + setviewportscroll_y Nullable(Int64), + setviewportsize_height Nullable(UInt64), + setviewportsize_width Nullable(UInt64), + stateaction_type Nullable(String), + stateactionevent_messageid Nullable(UInt64), + stateactionevent_timestamp Nullable(Datetime), + stateactionevent_type Nullable(String), + timestamp_timestamp Nullable(Datetime), + useranonymousid_id Nullable(String), + userid_id Nullable(String), + vuex_mutation Nullable(String), + vuex_state Nullable(String), + received_at Datetime, + batch_order_number Int64 +) + ENGINE = MergeTree() + PARTITION BY toYYYYMM(received_at) + ORDER BY (received_at, batch_order_number) + SETTINGS min_bytes_for_wide_part = 1, use_minimalistic_part_header_in_zookeeper = 1; \ No newline at end of file diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/pages.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/pages.sql index 832f3423d..71d9503cf 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/pages.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/pages.sql @@ -1,38 +1,39 @@ -CREATE TABLE pages ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - url String, - url_host String MATERIALIZED lower(domain(url)), - url_path String MATERIALIZED lower(pathFull(url)), - request_start Nullable(UInt16), - response_start Nullable(UInt16), - response_end Nullable(UInt16), - dom_content_loaded_event_start Nullable(UInt16), - dom_content_loaded_event_end Nullable(UInt16), - load_event_start Nullable(UInt16), - load_event_end Nullable(UInt16), - first_paint Nullable(UInt16), - first_contentful_paint Nullable(UInt16), - speed_index Nullable(UInt16), - visually_complete Nullable(UInt16), - time_to_interactive Nullable(UInt16), - ttfb Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_start, request_start), minus(response_start, request_start), Null), - ttlb Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_end, request_start), minus(response_end, request_start), Null), - response_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_end, response_start), minus(response_end, response_start), Null), - dom_building_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(dom_content_loaded_event_start, response_end), minus(dom_content_loaded_event_start, response_end), Null), - dom_content_loaded_event_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(dom_content_loaded_event_end, dom_content_loaded_event_start), minus(dom_content_loaded_event_end, dom_content_loaded_event_start), Null), - load_event_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(load_event_end, load_event_start), minus(load_event_end, load_event_start), Null) +CREATE TABLE IF NOT EXISTS pages +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + url String, + url_host String MATERIALIZED lower(domain (url)), + url_path String MATERIALIZED lower(pathFull(url)), + request_start Nullable(UInt16), + response_start Nullable(UInt16), + response_end Nullable(UInt16), + dom_content_loaded_event_start Nullable(UInt16), + dom_content_loaded_event_end Nullable(UInt16), + load_event_start Nullable(UInt16), + load_event_end Nullable(UInt16), + first_paint Nullable(UInt16), + first_contentful_paint Nullable(UInt16), + speed_index Nullable(UInt16), + visually_complete Nullable(UInt16), + time_to_interactive Nullable(UInt16), + ttfb Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_start, request_start), minus(response_start, request_start), Null), + ttlb Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_end, request_start), minus(response_end, request_start), Null), + response_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_end, response_start), minus(response_end, response_start), Null), + dom_building_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(dom_content_loaded_event_start, response_end), minus(dom_content_loaded_event_start, response_end), Null), + dom_content_loaded_event_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(dom_content_loaded_event_end, dom_content_loaded_event_start), minus(dom_content_loaded_event_end, dom_content_loaded_event_start), Null), + load_event_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(load_event_end, load_event_start), minus(load_event_end, load_event_start), Null) ) ENGINE = MergeTree PARTITION BY toDate(datetime) ORDER BY (project_id, datetime) diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/performance.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/performance.sql index f05b79ff2..fa64967f4 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/performance.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/performance.sql @@ -1,30 +1,31 @@ -CREATE TABLE performance ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - min_fps UInt8, - avg_fps UInt8, - max_fps UInt8, - min_cpu UInt8, - avg_cpu UInt8, - max_cpu UInt8, - min_total_js_heap_size UInt64, - avg_total_js_heap_size UInt64, - max_total_js_heap_size UInt64, - min_used_js_heap_size UInt64, - avg_used_js_heap_size UInt64, - max_used_js_heap_size UInt64 +CREATE TABLE IF NOT EXISTS performance +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + min_fps UInt8, + avg_fps UInt8, + max_fps UInt8, + min_cpu UInt8, + avg_cpu UInt8, + max_cpu UInt8, + min_total_js_heap_size UInt64, + avg_total_js_heap_size UInt64, + max_total_js_heap_size UInt64, + min_used_js_heap_size UInt64, + avg_used_js_heap_size UInt64, + max_used_js_heap_size UInt64 ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/resources.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/resources.sql index 7b50443a3..cc2c7cd6d 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/resources.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/resources.sql @@ -1,31 +1,32 @@ -CREATE TABLE resources ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - url String, - url_host String MATERIALIZED lower(domain(url)), - url_hostpath String MATERIALIZED concat(url_host, lower(path(url))), - type Enum8('other'=-1, 'script'=0, 'stylesheet'=1, 'fetch'=2, 'img'=3, 'media'=4), - duration Nullable(UInt16), - ttfb Nullable(UInt16), - header_size Nullable(UInt16), - encoded_body_size Nullable(UInt32), - decoded_body_size Nullable(UInt32), - compression_ratio Nullable(Float32) MATERIALIZED divide(decoded_body_size, encoded_body_size), - success UInt8, - method Nullable(Enum8('GET' = 0, 'HEAD' = 1, 'POST' = 2, 'PUT' = 3, 'DELETE' = 4, 'CONNECT' = 5, 'OPTIONS' = 6, 'TRACE' = 7, 'PATCH' = 8)), - status Nullable(UInt16) +CREATE TABLE IF NOT EXISTS resources +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + url String, + url_host String MATERIALIZED lower(domain(url)), + url_hostpath String MATERIALIZED concat(url_host, lower(path(url))), + type Enum8('other'=-1, 'script'=0, 'stylesheet'=1, 'fetch'=2, 'img'=3, 'media'=4), + duration Nullable(UInt16), + ttfb Nullable(UInt16), + header_size Nullable(UInt16), + encoded_body_size Nullable(UInt32), + decoded_body_size Nullable(UInt32), + compression_ratio Nullable(Float32) MATERIALIZED divide(decoded_body_size, encoded_body_size), + success UInt8, + method Nullable(Enum8('GET' = 0, 'HEAD' = 1, 'POST' = 2, 'PUT' = 3, 'DELETE' = 4, 'CONNECT' = 5, 'OPTIONS' = 6, 'TRACE' = 7, 'PATCH' = 8)), + status Nullable(UInt16) ) ENGINE = MergeTree -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime) -TTL datetime + INTERVAL 1 MONTH; + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql index 77d430d85..22cc6b876 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql @@ -1,22 +1,26 @@ -CREATE TABLE sessions ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - duration UInt32, - pages_count UInt16, - events_count UInt16, - errors_count UInt16 -) ENGINE = ReplacingMergeTree( duration ) -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime, session_id) -TTL datetime + INTERVAL 1 MONTH; +CREATE TABLE IF NOT EXISTS sessions +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + duration UInt32, + pages_count UInt16, + events_count UInt16, + errors_count UInt16, + utm_source Nullable(String), + utm_medium Nullable(String), + utm_campaign Nullable(String) +) ENGINE = ReplacingMergeTree(duration) + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime, session_id) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions_metadata.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions_metadata.sql index a6d0382e6..ddf8aed01 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions_metadata.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions_metadata.sql @@ -1,4 +1,4 @@ -CREATE TABLE sessions_metadata +CREATE TABLE IF NOT EXISTS sessions_metadata ( session_id UInt64, project_id UInt32, diff --git a/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql b/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql new file mode 100644 index 000000000..94d5fced6 --- /dev/null +++ b/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql @@ -0,0 +1,167 @@ +BEGIN; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0-ee' +$$ LANGUAGE sql IMMUTABLE; + +CREATE TABLE IF NOT EXISTS traces +( + user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, + auth text NULL, + action text NOT NULL, + method text NOT NULL, + path_format text NOT NULL, + endpoint text NOT NULL, + payload jsonb NULL, + parameters jsonb NULL, + status int NULL +); +CREATE INDEX IF NOT EXISTS traces_user_id_idx ON traces (user_id); +CREATE INDEX IF NOT EXISTS traces_tenant_id_idx ON traces (tenant_id); + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); + +CREATE INDEX IF NOT EXISTS pages_first_contentful_paint_time_idx ON events.pages (first_contentful_paint_time) WHERE first_contentful_paint_time > 0; +CREATE INDEX IF NOT EXISTS pages_dom_content_loaded_time_idx ON events.pages (dom_content_loaded_time) WHERE dom_content_loaded_time > 0; +CREATE INDEX IF NOT EXISTS pages_first_paint_time_idx ON events.pages (first_paint_time) WHERE first_paint_time > 0; +CREATE INDEX IF NOT EXISTS pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0; +CREATE INDEX IF NOT EXISTS pages_time_to_interactive_idx ON events.pages (time_to_interactive) WHERE time_to_interactive > 0; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_loadgt0NN_idx ON events.pages (session_id, timestamp) WHERE load_time > 0 AND load_time IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_visualgt0nn_idx ON events.pages (session_id, timestamp) WHERE visually_complete > 0 AND visually_complete IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_timestamp_metgt0_idx ON events.pages (timestamp) WHERE response_time > 0 OR + first_paint_time > 0 OR + dom_content_loaded_time > 0 OR + ttfb > 0 OR + time_to_interactive > 0; +CREATE INDEX IF NOT EXISTS pages_session_id_speed_indexgt0nn_idx ON events.pages (session_id, speed_index) WHERE speed_index > 0 AND speed_index IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_dom_building_timegt0nn_idx ON events.pages (session_id, timestamp, dom_building_time) WHERE dom_building_time > 0 AND dom_building_time IS NOT NULL; +CREATE INDEX IF NOT EXISTS issues_project_id_idx ON issues (project_id); + +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_js_exception_idx ON public.errors (project_id, error_id) WHERE source = 'js_exception'; +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_idx ON public.errors (project_id, error_id); +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; + +CREATE INDEX IF NOT EXISTS sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; +CREATE INDEX IF NOT EXISTS sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; +CREATE INDEX IF NOT EXISTS sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); +CREATE INDEX IF NOT EXISTS jobs_project_id_idx ON jobs (project_id); +CREATE INDEX IF NOT EXISTS errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); +CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); +CREATE INDEX IF NOT EXISTS errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); +CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_session_id_idx ON events.errors (error_id, timestamp, session_id); +CREATE INDEX IF NOT EXISTS resources_timestamp_idx ON events.resources (timestamp); +CREATE INDEX IF NOT EXISTS resources_success_idx ON events.resources (success); +CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key); +CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_idx ON events.resources (session_id, timestamp); +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_type_idx ON events.resources (session_id, timestamp, type); +CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_noFetch_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL AND type != 'fetch'; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_fail_idx ON events.resources (session_id, timestamp, url_host) WHERE success = FALSE; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_firstparty_idx ON events.resources (session_id, timestamp, url_host) WHERE type IN ('fetch', 'script'); +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_duration_durationgt0NN_img_idx ON events.resources (session_id, timestamp, duration) WHERE duration > 0 AND duration IS NOT NULL AND type = 'img'; +CREATE INDEX IF NOT EXISTS resources_timestamp_session_id_idx ON events.resources (timestamp, session_id); + +DROP TRIGGER IF EXISTS on_insert_or_update ON projects; +CREATE TRIGGER on_insert_or_update + AFTER INSERT OR UPDATE + ON projects + FOR EACH ROW +EXECUTE PROCEDURE notify_project(); + +UPDATE tenants +SET name='' +WHERE name ISNULL; +ALTER TABLE tenants + ALTER COLUMN name SET NOT NULL; + +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_medium text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_campaign text NULL DEFAULT NULL; + +CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); +CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; + +DROP INDEX IF EXISTS sessions_project_id_user_browser_idx1; +DROP INDEX IF EXISTS sessions_project_id_user_country_idx1; +ALTER INDEX IF EXISTS platform_idx RENAME TO sessions_platform_idx; +ALTER INDEX IF EXISTS events.resources_duration_idx RENAME TO resources_duration_durationgt0_idx; +DROP INDEX IF EXISTS projects_project_key_idx1; +CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id); + +CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id); +CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp); +CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); +CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; +CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; + +CREATE TABLE IF NOT EXISTS metrics +( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); +CREATE TABLE IF NOT EXISTS metric_series +( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id); +CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id); + + +CREATE TABLE IF NOT EXISTS searches +( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False +); + +CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public); +CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id); +CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id); + +ALTER TABLE alerts + ADD COLUMN IF NOT EXISTS series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE; + +CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id); +UPDATE alerts +SET options=jsonb_set(options, '{change}', '"change"') +WHERE detection_method = 'change' + AND options -> 'change' ISNULL; + +ALTER TABLE roles + ADD COLUMN IF NOT EXISTS all_projects bool NOT NULL DEFAULT TRUE; + +CREATE TABLE IF NOT EXISTS roles_projects +( + role_id integer NOT NULL REFERENCES roles (role_id) ON DELETE CASCADE, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + CONSTRAINT roles_projects_pkey PRIMARY KEY (role_id, project_id) +); +CREATE INDEX IF NOT EXISTS roles_projects_role_id_idx ON roles_projects (role_id); +CREATE INDEX IF NOT EXISTS roles_projects_project_id_idx ON roles_projects (project_id); + +COMMIT; \ No newline at end of file diff --git a/ee/scripts/helm/db/init_dbs/postgresql/1.5.0/1.5.0.sql b/ee/scripts/helm/db/init_dbs/postgresql/1.5.0/1.5.0.sql deleted file mode 100644 index 5f0f4f054..000000000 --- a/ee/scripts/helm/db/init_dbs/postgresql/1.5.0/1.5.0.sql +++ /dev/null @@ -1,18 +0,0 @@ -BEGIN; -CREATE TABLE traces -( - user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, - tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, - created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, - auth text NULL, - action text NOT NULL, - method text NOT NULL, - path_format text NOT NULL, - endpoint text NOT NULL, - payload jsonb NULL, - parameters jsonb NULL, - status int NULL -); -CREATE INDEX traces_user_id_idx ON traces (user_id); -CREATE INDEX traces_tenant_id_idx ON traces (tenant_id); -COMMIT; \ No newline at end of file diff --git a/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql b/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql index 3cf1d6751..256589c91 100644 --- a/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql +++ b/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql @@ -1,9 +1,15 @@ BEGIN; --- Schemas and functions definitions: CREATE SCHEMA IF NOT EXISTS events_common; CREATE SCHEMA IF NOT EXISTS events; +CREATE EXTENSION IF NOT EXISTS pg_trgm; +CREATE EXTENSION IF NOT EXISTS pgcrypto; + +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0-ee' +$$ LANGUAGE sql IMMUTABLE; --- --- accounts.sql --- CREATE OR REPLACE FUNCTION generate_api_key(length integer) RETURNS text AS $$ @@ -23,7 +29,7 @@ begin end; $$ LANGUAGE plpgsql; --- --- events.sql --- + CREATE OR REPLACE FUNCTION events.funnel(steps integer[], m integer) RETURNS boolean AS $$ @@ -48,13 +54,14 @@ BEGIN END; $$ LANGUAGE plpgsql IMMUTABLE; --- --- integrations.sql --- + CREATE OR REPLACE FUNCTION notify_integration() RETURNS trigger AS $$ BEGIN IF NEW IS NULL THEN - PERFORM pg_notify('integration', (row_to_json(OLD)::text || '{"options": null, "request_data": null}'::text)); + PERFORM pg_notify('integration', + (row_to_json(OLD)::text || '{"options": null, "request_data": null}'::text)); ELSIF (OLD IS NULL) OR (OLD.options <> NEW.options) THEN PERFORM pg_notify('integration', row_to_json(NEW)::text); END IF; @@ -62,7 +69,7 @@ BEGIN END; $$ LANGUAGE plpgsql; --- --- alerts.sql --- + CREATE OR REPLACE FUNCTION notify_alert() RETURNS trigger AS $$ @@ -79,7 +86,6 @@ BEGIN END ; $$ LANGUAGE plpgsql; --- --- projects.sql --- CREATE OR REPLACE FUNCTION notify_project() RETURNS trigger AS $$ @@ -89,32 +95,55 @@ BEGIN END; $$ LANGUAGE plpgsql; --- All tables and types: + DO $$ BEGIN - IF EXISTS(SELECT - FROM information_schema.tables - WHERE table_schema = 'public' - AND table_name = 'tenants') THEN - raise notice 'DB exists, skipping creation query'; + IF (with to_check (name) as ( + values ('alerts'), + ('announcements'), + ('assigned_sessions'), + ('autocomplete'), + ('basic_authentication'), + ('errors'), + ('funnels'), + ('integrations'), + ('issues'), + ('jira_cloud'), + ('jobs'), + ('metric_series'), + ('metrics'), + ('notifications'), + ('oauth_authentication'), + ('projects'), + ('roles'), + ('roles_projects'), + ('searches'), + ('sessions'), + ('tenants'), + ('traces'), + ('user_favorite_errors'), + ('user_favorite_sessions'), + ('user_viewed_errors'), + ('user_viewed_sessions'), + ('users'), + ('webhooks') + ) + select bool_and(exists(select * + from information_schema.tables t + where table_schema = 'public' + AND table_name = to_check.name)) as all_present + from to_check) THEN + raise notice 'All public schema tables exists'; ELSE - raise notice 'Creating DB'; + raise notice 'Some or all public schema tables are missing, creating missing tables'; --- --- public.sql --- - - CREATE EXTENSION IF NOT EXISTS pg_trgm; - CREATE EXTENSION IF NOT EXISTS pgcrypto; - - --- --- accounts.sql --- - - CREATE TABLE tenants + CREATE TABLE IF NOT EXISTS tenants ( tenant_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, user_id text NOT NULL DEFAULT generate_api_key(20), - name text, + name text NOT NULL, api_key text UNIQUE default generate_api_key(20) not null, created_at timestamp without time zone NOT NULL DEFAULT (now() at time zone 'utc'), deleted_at timestamp without time zone NULL DEFAULT NULL, @@ -129,21 +158,27 @@ $$ ); - CREATE TABLE roles + CREATE TABLE IF NOT EXISTS roles ( - role_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, - name text NOT NULL, - description text DEFAULT NULL, - permissions text[] NOT NULL DEFAULT '{}', - protected bool NOT NULL DEFAULT FALSE, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - deleted_at timestamp NULL DEFAULT NULL + role_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + name text NOT NULL, + description text DEFAULT NULL, + permissions text[] NOT NULL DEFAULT '{}', + protected bool NOT NULL DEFAULT FALSE, + all_projects bool NOT NULL DEFAULT TRUE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp NULL DEFAULT NULL ); - CREATE TYPE user_role AS ENUM ('owner', 'admin', 'member'); + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'user_role') THEN + CREATE TYPE user_role AS ENUM ('owner','admin','member'); + END IF; - CREATE TABLE users + + CREATE TABLE IF NOT EXISTS users ( user_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, @@ -215,16 +250,16 @@ $$ }'::jsonb, api_key text UNIQUE default generate_api_key(20) not null, jwt_iat timestamp without time zone NULL DEFAULT NULL, - data jsonb NOT NULL DEFAULT '{}'::jsonb, + data jsonb NOT NULL DEFAULT'{}'::jsonb, weekly_report boolean NOT NULL DEFAULT TRUE, origin text NULL DEFAULT NULL, role_id integer REFERENCES roles (role_id) ON DELETE SET NULL, internal_id text NULL DEFAULT NULL ); - CREATE INDEX users_tenant_id_deleted_at_N_idx ON users (tenant_id) WHERE deleted_at ISNULL; + CREATE INDEX IF NOT EXISTS users_tenant_id_deleted_at_N_idx ON users (tenant_id) WHERE deleted_at ISNULL; - CREATE TABLE basic_authentication + CREATE TABLE IF NOT EXISTS basic_authentication ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, password text DEFAULT NULL, @@ -237,9 +272,13 @@ $$ UNIQUE (user_id) ); + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'oauth_provider') THEN + CREATE TYPE oauth_provider AS ENUM ('jira','github'); + END IF; - CREATE TYPE oauth_provider AS ENUM ('jira', 'github'); - CREATE TABLE oauth_authentication + CREATE TABLE IF NOT EXISTS oauth_authentication ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, provider oauth_provider NOT NULL, @@ -249,9 +288,7 @@ $$ ); --- --- projects.sql --- - - CREATE TABLE projects + CREATE TABLE IF NOT EXISTS projects ( project_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, project_key varchar(20) NOT NULL UNIQUE DEFAULT generate_api_key(20), @@ -272,49 +309,41 @@ $$ metadata_8 text DEFAULT NULL, metadata_9 text DEFAULT NULL, metadata_10 text DEFAULT NULL, - gdpr jsonb NOT NULL DEFAULT '{ + gdpr jsonb NOT NULL DEFAULT'{ "maskEmails": true, "sampleRate": 33, "maskNumbers": false, "defaultInputMode": "plain" - }'::jsonb -- ?????? - ); - - CREATE INDEX ON public.projects (project_key); - --- --- alerts.sql --- - - CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); - - CREATE TABLE alerts - ( - alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, - name text NOT NULL, - description text NULL DEFAULT NULL, - active boolean NOT NULL DEFAULT TRUE, - detection_method alert_detection_method NOT NULL, - query jsonb NOT NULL, - deleted_at timestamp NULL DEFAULT NULL, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - options jsonb NOT NULL DEFAULT '{ - "renotifyInterval": 1440 }'::jsonb ); - CREATE TRIGGER on_insert_or_update_or_delete - AFTER INSERT OR UPDATE OR DELETE - ON alerts + CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key); + DROP TRIGGER IF EXISTS on_insert_or_update ON projects; + CREATE TRIGGER on_insert_or_update + AFTER INSERT OR UPDATE + ON projects FOR EACH ROW - EXECUTE PROCEDURE notify_alert(); + EXECUTE PROCEDURE notify_project(); + + CREATE TABLE IF NOT EXISTS roles_projects + ( + role_id integer NOT NULL REFERENCES roles (role_id) ON DELETE CASCADE, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + CONSTRAINT roles_projects_pkey PRIMARY KEY (role_id, project_id) + ); + CREATE INDEX IF NOT EXISTS roles_projects_role_id_idx ON roles_projects (role_id); + CREATE INDEX IF NOT EXISTS roles_projects_project_id_idx ON roles_projects (project_id); --- --- webhooks.sql --- + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'webhook_type') THEN + create type webhook_type as enum ('webhook','slack','email'); + END IF; - create type webhook_type as enum ('webhook', 'slack', 'email'); - create table webhooks + create table IF NOT EXISTS webhooks ( webhook_id integer generated by default as identity constraint webhooks_pkey @@ -332,10 +361,8 @@ $$ name varchar(100) ); --- --- notifications.sql --- - - CREATE TABLE notifications + CREATE TABLE IF NOT EXISTS notifications ( notification_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, tenant_id integer REFERENCES tenants (tenant_id) ON DELETE CASCADE, @@ -346,25 +373,24 @@ $$ button_url text NULL, image_url text NULL, created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - options jsonb NOT NULL DEFAULT '{}'::jsonb, + options jsonb NOT NULL DEFAULT'{}'::jsonb, CONSTRAINT notification_tenant_xor_user CHECK ( tenant_id NOTNULL AND user_id ISNULL OR tenant_id ISNULL AND user_id NOTNULL ) ); - CREATE INDEX notifications_user_id_index ON public.notifications (user_id); - CREATE INDEX notifications_tenant_id_index ON public.notifications (tenant_id); - CREATE INDEX notifications_created_at_index ON public.notifications (created_at DESC); - CREATE INDEX notifications_created_at_epoch_idx ON public.notifications (CAST(EXTRACT(EPOCH FROM created_at) * 1000 AS BIGINT) DESC); + CREATE INDEX IF NOT EXISTS notifications_user_id_index ON notifications (user_id); + CREATE INDEX IF NOT EXISTS notifications_tenant_id_index ON notifications (tenant_id); + CREATE INDEX IF NOT EXISTS notifications_created_at_index ON notifications (created_at DESC); + CREATE INDEX IF NOT EXISTS notifications_created_at_epoch_idx ON notifications (CAST(EXTRACT(EPOCH FROM created_at) * 1000 AS BIGINT) DESC); - CREATE TABLE user_viewed_notifications + CREATE TABLE IF NOT EXISTS user_viewed_notifications ( user_id integer NOT NULL REFERENCES users (user_id) on delete cascade, notification_id integer NOT NULL REFERENCES notifications (notification_id) on delete cascade, constraint user_viewed_notifications_pkey primary key (user_id, notification_id) ); --- --- funnels.sql --- - CREATE TABLE funnels + CREATE TABLE IF NOT EXISTS funnels ( funnel_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -376,13 +402,16 @@ $$ is_public boolean NOT NULL DEFAULT False ); - CREATE INDEX ON public.funnels (user_id, is_public); + CREATE INDEX IF NOT EXISTS funnels_user_id_is_public_idx ON public.funnels (user_id, is_public); + CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id); --- --- announcements.sql --- + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'announcement_type') THEN + create type announcement_type as enum ('notification','alert'); + END IF; - create type announcement_type as enum ('notification', 'alert'); - - create table announcements + create table IF NOT EXISTS announcements ( announcement_id serial not null constraint announcements_pk @@ -396,18 +425,23 @@ $$ type announcement_type default 'notification'::announcement_type not null ); --- --- integrations.sql --- + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'integration_provider') THEN + CREATE TYPE integration_provider AS ENUM ('bugsnag','cloudwatch','datadog','newrelic','rollbar','sentry','stackdriver','sumologic','elasticsearch'); --,'jira','github'); + END IF; - CREATE TYPE integration_provider AS ENUM ('bugsnag', 'cloudwatch', 'datadog', 'newrelic', 'rollbar', 'sentry', 'stackdriver', 'sumologic', 'elasticsearch'); --, 'jira', 'github'); - CREATE TABLE integrations + CREATE TABLE IF NOT EXISTS integrations ( project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, provider integration_provider NOT NULL, options jsonb NOT NULL, - request_data jsonb NOT NULL DEFAULT '{}'::jsonb, + request_data jsonb NOT NULL DEFAULT'{}'::jsonb, PRIMARY KEY (project_id, provider) ); + DROP TRIGGER IF EXISTS on_insert_or_update_or_delete ON integrations; + CREATE TRIGGER on_insert_or_update_or_delete AFTER INSERT OR UPDATE OR DELETE ON integrations @@ -415,7 +449,7 @@ $$ EXECUTE PROCEDURE notify_integration(); - create table jira_cloud + CREATE TABLE IF NOT EXISTS jira_cloud ( user_id integer not null constraint jira_cloud_pk @@ -428,32 +462,33 @@ $$ url text ); + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'issue_type') THEN + CREATE TYPE issue_type AS ENUM ( + 'click_rage', + 'dead_click', + 'excessive_scrolling', + 'bad_request', + 'missing_resource', + 'memory', + 'cpu', + 'slow_resource', + 'slow_page_load', + 'crash', + 'ml_cpu', + 'ml_memory', + 'ml_dead_click', + 'ml_click_rage', + 'ml_mouse_thrashing', + 'ml_excessive_scrolling', + 'ml_slow_resources', + 'custom', + 'js_exception' + ); + END IF; --- --- issues.sql --- - - CREATE TYPE issue_type AS ENUM ( - 'click_rage', - 'dead_click', - 'excessive_scrolling', - 'bad_request', - 'missing_resource', - 'memory', - 'cpu', - 'slow_resource', - 'slow_page_load', - 'crash', - 'ml_cpu', - 'ml_memory', - 'ml_dead_click', - 'ml_click_rage', - 'ml_mouse_thrashing', - 'ml_excessive_scrolling', - 'ml_slow_resources', - 'custom', - 'js_exception' - ); - - CREATE TABLE issues + CREATE TABLE IF NOT EXISTS issues ( issue_id text NOT NULL PRIMARY KEY, project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -461,15 +496,24 @@ $$ context_string text NOT NULL, context jsonb DEFAULT NULL ); - CREATE INDEX ON issues (issue_id, type); - CREATE INDEX issues_context_string_gin_idx ON public.issues USING GIN (context_string gin_trgm_ops); - CREATE INDEX issues_project_id_issue_id_idx ON public.issues (project_id, issue_id); + CREATE INDEX IF NOT EXISTS issues_issue_id_type_idx ON issues (issue_id, type); + CREATE INDEX IF NOT EXISTS issues_context_string_gin_idx ON public.issues USING GIN (context_string gin_trgm_ops); + CREATE INDEX IF NOT EXISTS issues_project_id_issue_id_idx ON public.issues (project_id, issue_id); + CREATE INDEX IF NOT EXISTS issues_project_id_idx ON issues (project_id); --- --- errors.sql --- + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'error_source') THEN + CREATE TYPE error_source AS ENUM ('js_exception','bugsnag','cloudwatch','datadog','newrelic','rollbar','sentry','stackdriver','sumologic'); + END IF; - CREATE TYPE error_source AS ENUM ('js_exception', 'bugsnag', 'cloudwatch', 'datadog', 'newrelic', 'rollbar', 'sentry', 'stackdriver', 'sumologic'); - CREATE TYPE error_status AS ENUM ('unresolved', 'resolved', 'ignored'); - CREATE TABLE errors + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'error_status') THEN + CREATE TYPE error_status AS ENUM ('unresolved','resolved','ignored'); + END IF; + + CREATE TABLE IF NOT EXISTS errors ( error_id text NOT NULL PRIMARY KEY, project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -482,36 +526,53 @@ $$ stacktrace jsonb, --to save the stacktrace and not query S3 another time stacktrace_parsed_at timestamp ); - CREATE INDEX errors_error_id_idx ON errors (error_id); - CREATE INDEX ON errors (project_id, source); - CREATE INDEX errors_message_gin_idx ON public.errors USING GIN (message gin_trgm_ops); - CREATE INDEX errors_name_gin_idx ON public.errors USING GIN (name gin_trgm_ops); - CREATE INDEX errors_project_id_idx ON public.errors (project_id); - CREATE INDEX errors_project_id_status_idx ON public.errors (project_id, status); + CREATE INDEX IF NOT EXISTS errors_project_id_source_idx ON errors (project_id, source); + CREATE INDEX IF NOT EXISTS errors_message_gin_idx ON public.errors USING GIN (message gin_trgm_ops); + CREATE INDEX IF NOT EXISTS errors_name_gin_idx ON public.errors USING GIN (name gin_trgm_ops); + CREATE INDEX IF NOT EXISTS errors_project_id_idx ON public.errors (project_id); + CREATE INDEX IF NOT EXISTS errors_project_id_status_idx ON public.errors (project_id, status); + CREATE INDEX IF NOT EXISTS errors_project_id_error_id_js_exception_idx ON public.errors (project_id, error_id) WHERE source = 'js_exception'; + CREATE INDEX IF NOT EXISTS errors_project_id_error_id_idx ON public.errors (project_id, error_id); + CREATE INDEX IF NOT EXISTS errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; + CREATE INDEX IF NOT EXISTS errors_error_id_idx ON errors (error_id); + CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id); - CREATE TABLE user_favorite_errors + CREATE TABLE IF NOT EXISTS user_favorite_errors ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (user_id, error_id) ); - CREATE TABLE user_viewed_errors + CREATE TABLE IF NOT EXISTS user_viewed_errors ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (user_id, error_id) ); - CREATE INDEX user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id); - CREATE INDEX user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id); + CREATE INDEX IF NOT EXISTS user_viewed_errors_user_id_idx ON public.user_viewed_errors (user_id); + CREATE INDEX IF NOT EXISTS user_viewed_errors_error_id_idx ON public.user_viewed_errors (error_id); + + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'platform') THEN + CREATE TYPE platform AS ENUM ('web','ios','android'); + END IF; + + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'device_type') THEN + CREATE TYPE device_type AS ENUM ('desktop','tablet','mobile','other'); + END IF; --- --- sessions.sql --- - CREATE TYPE device_type AS ENUM ('desktop', 'tablet', 'mobile', 'other'); - CREATE TYPE country AS ENUM ('UN', 'RW', 'SO', 'YE', 'IQ', 'SA', 'IR', 'CY', 'TZ', 'SY', 'AM', 'KE', 'CD', 'DJ', 'UG', 'CF', 'SC', 'JO', 'LB', 'KW', 'OM', 'QA', 'BH', 'AE', 'IL', 'TR', 'ET', 'ER', 'EG', 'SD', 'GR', 'BI', 'EE', 'LV', 'AZ', 'LT', 'SJ', 'GE', 'MD', 'BY', 'FI', 'AX', 'UA', 'MK', 'HU', 'BG', 'AL', 'PL', 'RO', 'XK', 'ZW', 'ZM', 'KM', 'MW', 'LS', 'BW', 'MU', 'SZ', 'RE', 'ZA', 'YT', 'MZ', 'MG', 'AF', 'PK', 'BD', 'TM', 'TJ', 'LK', 'BT', 'IN', 'MV', 'IO', 'NP', 'MM', 'UZ', 'KZ', 'KG', 'TF', 'HM', 'CC', 'PW', 'VN', 'TH', 'ID', 'LA', 'TW', 'PH', 'MY', 'CN', 'HK', 'BN', 'MO', 'KH', 'KR', 'JP', 'KP', 'SG', 'CK', 'TL', 'RU', 'MN', 'AU', 'CX', 'MH', 'FM', 'PG', 'SB', 'TV', 'NR', 'VU', 'NC', 'NF', 'NZ', 'FJ', 'LY', 'CM', 'SN', 'CG', 'PT', 'LR', 'CI', 'GH', 'GQ', 'NG', 'BF', 'TG', 'GW', 'MR', 'BJ', 'GA', 'SL', 'ST', 'GI', 'GM', 'GN', 'TD', 'NE', 'ML', 'EH', 'TN', 'ES', 'MA', 'MT', 'DZ', 'FO', 'DK', 'IS', 'GB', 'CH', 'SE', 'NL', 'AT', 'BE', 'DE', 'LU', 'IE', 'MC', 'FR', 'AD', 'LI', 'JE', 'IM', 'GG', 'SK', 'CZ', 'NO', 'VA', 'SM', 'IT', 'SI', 'ME', 'HR', 'BA', 'AO', 'NA', 'SH', 'BV', 'BB', 'CV', 'GY', 'GF', 'SR', 'PM', 'GL', 'PY', 'UY', 'BR', 'FK', 'GS', 'JM', 'DO', 'CU', 'MQ', 'BS', 'BM', 'AI', 'TT', 'KN', 'DM', 'AG', 'LC', 'TC', 'AW', 'VG', 'VC', 'MS', 'MF', 'BL', 'GP', 'GD', 'KY', 'BZ', 'SV', 'GT', 'HN', 'NI', 'CR', 'VE', 'EC', 'CO', 'PA', 'HT', 'AR', 'CL', 'BO', 'PE', 'MX', 'PF', 'PN', 'KI', 'TK', 'TO', 'WF', 'WS', 'NU', 'MP', 'GU', 'PR', 'VI', 'UM', 'AS', 'CA', 'US', 'PS', 'RS', 'AQ', 'SX', 'CW', 'BQ', 'SS'); - CREATE TYPE platform AS ENUM ('web','ios','android'); + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'country') THEN + CREATE TYPE country AS ENUM ('UN','RW','SO','YE','IQ','SA','IR','CY','TZ','SY','AM','KE','CD','DJ','UG','CF','SC','JO','LB','KW','OM','QA','BH','AE','IL','TR','ET','ER','EG','SD','GR','BI','EE','LV','AZ','LT','SJ','GE','MD','BY','FI','AX','UA','MK','HU','BG','AL','PL','RO','XK','ZW','ZM','KM','MW','LS','BW','MU','SZ','RE','ZA','YT','MZ','MG','AF','PK','BD','TM','TJ','LK','BT','IN','MV','IO','NP','MM','UZ','KZ','KG','TF','HM','CC','PW','VN','TH','ID','LA','TW','PH','MY','CN','HK','BN','MO','KH','KR','JP','KP','SG','CK','TL','RU','MN','AU','CX','MH','FM','PG','SB','TV','NR','VU','NC','NF','NZ','FJ','LY','CM','SN','CG','PT','LR','CI','GH','GQ','NG','BF','TG','GW','MR','BJ','GA','SL','ST','GI','GM','GN','TD','NE','ML','EH','TN','ES','MA','MT','DZ','FO','DK','IS','GB','CH','SE','NL','AT','BE','DE','LU','IE','MC','FR','AD','LI','JE','IM','GG','SK','CZ','NO','VA','SM','IT','SI','ME','HR','BA','AO','NA','SH','BV','BB','CV','GY','GF','SR','PM','GL','PY','UY','BR','FK','GS','JM','DO','CU','MQ','BS','BM','AI','TT','KN','DM','AG','LC','TC','AW','VG','VC','MS','MF','BL','GP','GD','KY','BZ','SV','GT','HN','NI','CR','VE','EC','CO','PA','HT','AR','CL','BO','PE','MX','PF','PN','KI','TK','TO','WF','WS','NU','MP','GU','PR','VI','UM','AS','CA','US','PS','RS','AQ','SX','CW','BQ','SS'); + END IF; - CREATE TABLE sessions + CREATE TABLE IF NOT EXISTS sessions ( session_id bigint PRIMARY KEY, project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, @@ -540,6 +601,9 @@ $$ watchdogs_score bigint NOT NULL DEFAULT 0, issue_score bigint NOT NULL DEFAULT 0, issue_types issue_type[] NOT NULL DEFAULT '{}'::issue_type[], + utm_source text NULL DEFAULT NULL, + utm_medium text NULL DEFAULT NULL, + utm_campaign text NULL DEFAULT NULL, metadata_1 text DEFAULT NULL, metadata_2 text DEFAULT NULL, metadata_3 text DEFAULT NULL, @@ -550,154 +614,261 @@ $$ metadata_8 text DEFAULT NULL, metadata_9 text DEFAULT NULL, metadata_10 text DEFAULT NULL --- , --- rehydration_id integer REFERENCES rehydrations(rehydration_id) ON DELETE SET NULL ); - CREATE INDEX ON sessions (project_id, start_ts); - CREATE INDEX ON sessions (project_id, user_id); - CREATE INDEX ON sessions (project_id, user_anonymous_id); - CREATE INDEX ON sessions (project_id, user_device); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); - CREATE INDEX ON sessions (project_id, metadata_1); - CREATE INDEX ON sessions (project_id, metadata_2); - CREATE INDEX ON sessions (project_id, metadata_3); - CREATE INDEX ON sessions (project_id, metadata_4); - CREATE INDEX ON sessions (project_id, metadata_5); - CREATE INDEX ON sessions (project_id, metadata_6); - CREATE INDEX ON sessions (project_id, metadata_7); - CREATE INDEX ON sessions (project_id, metadata_8); - CREATE INDEX ON sessions (project_id, metadata_9); - CREATE INDEX ON sessions (project_id, metadata_10); --- CREATE INDEX ON sessions (rehydration_id); - CREATE INDEX ON sessions (project_id, watchdogs_score DESC); - CREATE INDEX platform_idx ON public.sessions (platform); + CREATE INDEX IF NOT EXISTS sessions_project_id_start_ts_idx ON sessions (project_id, start_ts); + CREATE INDEX IF NOT EXISTS sessions_project_id_user_id_idx ON sessions (project_id, user_id); + CREATE INDEX IF NOT EXISTS sessions_project_id_user_anonymous_id_idx ON sessions (project_id, user_anonymous_id); + CREATE INDEX IF NOT EXISTS sessions_project_id_user_device_idx ON sessions (project_id, user_device); + CREATE INDEX IF NOT EXISTS sessions_project_id_user_country_idx ON sessions (project_id, user_country); + CREATE INDEX IF NOT EXISTS sessions_project_id_user_browser_idx ON sessions (project_id, user_browser); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_1_idx ON sessions (project_id, metadata_1); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_2_idx ON sessions (project_id, metadata_2); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_3_idx ON sessions (project_id, metadata_3); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_4_idx ON sessions (project_id, metadata_4); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_5_idx ON sessions (project_id, metadata_5); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_6_idx ON sessions (project_id, metadata_6); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_7_idx ON sessions (project_id, metadata_7); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_8_idx ON sessions (project_id, metadata_8); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_9_idx ON sessions (project_id, metadata_9); + CREATE INDEX IF NOT EXISTS sessions_project_id_metadata_10_idx ON sessions (project_id, metadata_10); + CREATE INDEX IF NOT EXISTS sessions_project_id_watchdogs_score_idx ON sessions (project_id, watchdogs_score DESC); + CREATE INDEX IF NOT EXISTS sessions_platform_idx ON public.sessions (platform); - CREATE INDEX sessions_metadata1_gin_idx ON public.sessions USING GIN (metadata_1 gin_trgm_ops); - CREATE INDEX sessions_metadata2_gin_idx ON public.sessions USING GIN (metadata_2 gin_trgm_ops); - CREATE INDEX sessions_metadata3_gin_idx ON public.sessions USING GIN (metadata_3 gin_trgm_ops); - CREATE INDEX sessions_metadata4_gin_idx ON public.sessions USING GIN (metadata_4 gin_trgm_ops); - CREATE INDEX sessions_metadata5_gin_idx ON public.sessions USING GIN (metadata_5 gin_trgm_ops); - CREATE INDEX sessions_metadata6_gin_idx ON public.sessions USING GIN (metadata_6 gin_trgm_ops); - CREATE INDEX sessions_metadata7_gin_idx ON public.sessions USING GIN (metadata_7 gin_trgm_ops); - CREATE INDEX sessions_metadata8_gin_idx ON public.sessions USING GIN (metadata_8 gin_trgm_ops); - CREATE INDEX sessions_metadata9_gin_idx ON public.sessions USING GIN (metadata_9 gin_trgm_ops); - CREATE INDEX sessions_metadata10_gin_idx ON public.sessions USING GIN (metadata_10 gin_trgm_ops); - CREATE INDEX sessions_user_os_gin_idx ON public.sessions USING GIN (user_os gin_trgm_ops); - CREATE INDEX sessions_user_browser_gin_idx ON public.sessions USING GIN (user_browser gin_trgm_ops); - CREATE INDEX sessions_user_device_gin_idx ON public.sessions USING GIN (user_device gin_trgm_ops); - CREATE INDEX sessions_user_id_gin_idx ON public.sessions USING GIN (user_id gin_trgm_ops); - CREATE INDEX sessions_user_anonymous_id_gin_idx ON public.sessions USING GIN (user_anonymous_id gin_trgm_ops); - CREATE INDEX sessions_user_country_gin_idx ON public.sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); - CREATE INDEX sessions_session_id_project_id_start_ts_durationNN_idx ON sessions (session_id, project_id, start_ts) WHERE duration IS NOT NULL; - CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL; - CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0; - - - ALTER TABLE public.sessions - ADD CONSTRAINT web_browser_constraint CHECK ( - (sessions.platform = 'web' AND sessions.user_browser NOTNULL) OR - (sessions.platform != 'web' AND sessions.user_browser ISNULL)); - - ALTER TABLE public.sessions - ADD CONSTRAINT web_user_browser_version_constraint CHECK ( sessions.platform = 'web' OR sessions.user_browser_version ISNULL); - - ALTER TABLE public.sessions - ADD CONSTRAINT web_user_agent_constraint CHECK ( - (sessions.platform = 'web' AND sessions.user_agent NOTNULL) OR - (sessions.platform != 'web' AND sessions.user_agent ISNULL)); - - - CREATE TABLE user_viewed_sessions + CREATE INDEX IF NOT EXISTS sessions_metadata1_gin_idx ON public.sessions USING GIN (metadata_1 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata2_gin_idx ON public.sessions USING GIN (metadata_2 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata3_gin_idx ON public.sessions USING GIN (metadata_3 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata4_gin_idx ON public.sessions USING GIN (metadata_4 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata5_gin_idx ON public.sessions USING GIN (metadata_5 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata6_gin_idx ON public.sessions USING GIN (metadata_6 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata7_gin_idx ON public.sessions USING GIN (metadata_7 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata8_gin_idx ON public.sessions USING GIN (metadata_8 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata9_gin_idx ON public.sessions USING GIN (metadata_9 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_metadata10_gin_idx ON public.sessions USING GIN (metadata_10 gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_os_gin_idx ON public.sessions USING GIN (user_os gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_browser_gin_idx ON public.sessions USING GIN (user_browser gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_device_gin_idx ON public.sessions USING GIN (user_device gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_id_gin_idx ON public.sessions USING GIN (user_id gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_anonymous_id_gin_idx ON public.sessions USING GIN (user_anonymous_id gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_user_country_gin_idx ON public.sessions (project_id, user_country); + CREATE INDEX IF NOT EXISTS sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; + CREATE INDEX IF NOT EXISTS sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; + CREATE INDEX IF NOT EXISTS sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; + CREATE INDEX IF NOT EXISTS sessions_session_id_project_id_start_ts_durationNN_idx ON sessions (session_id, project_id, start_ts) WHERE duration IS NOT NULL; + CREATE INDEX IF NOT EXISTS sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL; + CREATE INDEX IF NOT EXISTS sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0; + CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); + CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); + BEGIN + ALTER TABLE public.sessions + ADD CONSTRAINT web_browser_constraint CHECK ( + (sessions.platform = 'web' AND sessions.user_browser NOTNULL) OR + (sessions.platform != 'web' AND sessions.user_browser ISNULL)); + EXCEPTION + WHEN duplicate_object THEN RAISE NOTICE 'Table constraint exists'; + END; + BEGIN + ALTER TABLE public.sessions + ADD CONSTRAINT web_user_browser_version_constraint CHECK ( + sessions.platform = 'web' OR sessions.user_browser_version ISNULL); + EXCEPTION + WHEN duplicate_object THEN RAISE NOTICE 'Table constraint exists'; + END; + BEGIN + ALTER TABLE public.sessions + ADD CONSTRAINT web_user_agent_constraint CHECK ( + (sessions.platform = 'web' AND sessions.user_agent NOTNULL) OR + (sessions.platform != 'web' AND sessions.user_agent ISNULL)); + EXCEPTION + WHEN duplicate_object THEN RAISE NOTICE 'Table constraint already exists'; + END; + CREATE TABLE IF NOT EXISTS user_viewed_sessions ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, PRIMARY KEY (user_id, session_id) ); - CREATE TABLE user_favorite_sessions + CREATE TABLE IF NOT EXISTS user_favorite_sessions ( user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, PRIMARY KEY (user_id, session_id) ); + CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); --- --- assignments.sql --- - - create table assigned_sessions + CREATE TABLE IF NOT EXISTS assigned_sessions ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, issue_id text NOT NULL, provider oauth_provider NOT NULL, created_by integer NOT NULL, created_at timestamp default timezone('utc'::text, now()) NOT NULL, - provider_data jsonb default '{}'::jsonb NOT NULL + provider_data jsonb default'{}'::jsonb NOT NULL ); - CREATE INDEX ON assigned_sessions (session_id); + CREATE INDEX IF NOT EXISTS assigned_sessions_session_id_idx ON assigned_sessions (session_id); --- --- events_common.sql --- - CREATE SCHEMA IF NOT EXISTS events_common; - - CREATE TYPE events_common.custom_level AS ENUM ('info','error'); - - CREATE TABLE events_common.customs + CREATE TABLE IF NOT EXISTS autocomplete ( - session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, - timestamp bigint NOT NULL, - seq_index integer NOT NULL, - name text NOT NULL, - payload jsonb NOT NULL, - level events_common.custom_level NOT NULL DEFAULT 'info', - PRIMARY KEY (session_id, timestamp, seq_index) + value text NOT NULL, + type text NOT NULL, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE ); - CREATE INDEX ON events_common.customs (name); - CREATE INDEX customs_name_gin_idx ON events_common.customs USING GIN (name gin_trgm_ops); - CREATE INDEX ON events_common.customs (timestamp); + CREATE unique index IF NOT EXISTS autocomplete_unique ON autocomplete (project_id, value, type); + CREATE index IF NOT EXISTS autocomplete_project_id_idx ON autocomplete (project_id); + CREATE INDEX IF NOT EXISTS autocomplete_type_idx ON public.autocomplete (type); + CREATE INDEX IF NOT EXISTS autocomplete_value_gin_idx ON public.autocomplete USING GIN (value gin_trgm_ops); - CREATE TABLE events_common.issues + BEGIN + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'job_status') THEN + CREATE TYPE job_status AS ENUM ('scheduled','running','cancelled','failed','completed'); + END IF; + END; + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'job_action') THEN + CREATE TYPE job_action AS ENUM ('delete_user_data'); + END IF; + + CREATE TABLE IF NOT EXISTS jobs ( - session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, - timestamp bigint NOT NULL, - seq_index integer NOT NULL, - issue_id text NOT NULL REFERENCES issues (issue_id) ON DELETE CASCADE, - payload jsonb DEFAULT NULL, - PRIMARY KEY (session_id, timestamp, seq_index) + job_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + description text NOT NULL, + status job_status NOT NULL, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + action job_action NOT NULL, + reference_id text NOT NULL, + created_at timestamp default timezone('utc'::text, now()) NOT NULL, + updated_at timestamp default timezone('utc'::text, now()) NULL, + start_at timestamp NOT NULL, + errors text NULL ); - CREATE INDEX issues_issue_id_timestamp_idx ON events_common.issues (issue_id, timestamp); - CREATE INDEX issues_timestamp_idx ON events_common.issues (timestamp); + CREATE INDEX IF NOT EXISTS jobs_status_idx ON jobs (status); + CREATE INDEX IF NOT EXISTS jobs_start_at_idx ON jobs (start_at); + CREATE INDEX IF NOT EXISTS jobs_project_id_idx ON jobs (project_id); - CREATE TABLE events_common.requests + CREATE TABLE IF NOT EXISTS traces ( - session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, - timestamp bigint NOT NULL, - seq_index integer NOT NULL, - url text NOT NULL, - duration integer NOT NULL, - success boolean NOT NULL, - PRIMARY KEY (session_id, timestamp, seq_index) + user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, + auth text NULL, + action text NOT NULL, + method text NOT NULL, + path_format text NOT NULL, + endpoint text NOT NULL, + payload jsonb NULL, + parameters jsonb NULL, + status int NULL ); - CREATE INDEX ON events_common.requests (url); - CREATE INDEX ON events_common.requests (duration); - CREATE INDEX requests_url_gin_idx ON events_common.requests USING GIN (url gin_trgm_ops); - CREATE INDEX ON events_common.requests (timestamp); - CREATE INDEX requests_url_gin_idx2 ON events_common.requests USING GIN (RIGHT(url, length(url) - (CASE - WHEN url LIKE 'http://%' - THEN 7 - WHEN url LIKE 'https://%' - THEN 8 - ELSE 0 END)) - gin_trgm_ops); + CREATE INDEX IF NOT EXISTS traces_user_id_idx ON traces (user_id); + CREATE INDEX IF NOT EXISTS traces_tenant_id_idx ON traces (tenant_id); --- --- events.sql --- - CREATE SCHEMA IF NOT EXISTS events; + CREATE TABLE IF NOT EXISTS metrics + ( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp + ); + CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); + CREATE TABLE IF NOT EXISTS metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id); - CREATE TABLE events.pages + CREATE TABLE IF NOT EXISTS searches + ( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + + CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public); + CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id); + + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'alert_detection_method') THEN + CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); + END IF; + CREATE TABLE IF NOT EXISTS alerts + ( + alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE, + name text NOT NULL, + description text NULL DEFAULT NULL, + active boolean NOT NULL DEFAULT TRUE, + detection_method alert_detection_method NOT NULL, + query jsonb NOT NULL, + deleted_at timestamp NULL DEFAULT NULL, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + options jsonb NOT NULL DEFAULT'{ + "renotifyInterval": 1440 + }'::jsonb + ); + CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id); + CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id); + + DROP TRIGGER IF EXISTS on_insert_or_update_or_delete ON alerts; + + CREATE TRIGGER on_insert_or_update_or_delete + AFTER INSERT OR UPDATE OR DELETE + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE notify_alert(); + + RAISE NOTICE 'Created missing public schema tables'; + END IF; + END; + +$$ +LANGUAGE plpgsql; + + +DO +$$ + BEGIN + IF (with to_check (name) as ( + values ('clicks'), + ('errors'), + ('graphql'), + ('inputs'), + ('pages'), + ('performance'), + ('resources'), + ('state_actions') + ) + select bool_and(exists(select * + from information_schema.tables t + where table_schema = 'events' + AND table_name = to_check.name)) as all_present + from to_check) THEN + raise notice 'All events schema tables exists'; + ELSE + CREATE TABLE IF NOT EXISTS events.pages ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -720,34 +891,49 @@ $$ ttfb integer DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.pages (session_id); - CREATE INDEX pages_base_path_gin_idx ON events.pages USING GIN (base_path gin_trgm_ops); - CREATE INDEX pages_base_referrer_gin_idx ON events.pages USING GIN (base_referrer gin_trgm_ops); - CREATE INDEX ON events.pages (timestamp); - CREATE INDEX pages_base_path_gin_idx2 ON events.pages USING GIN (RIGHT(base_path, length(base_path) - 1) gin_trgm_ops); - CREATE INDEX pages_base_path_idx ON events.pages (base_path); - CREATE INDEX pages_base_path_idx2 ON events.pages (RIGHT(base_path, length(base_path) - 1)); - CREATE INDEX pages_base_referrer_idx ON events.pages (base_referrer); - CREATE INDEX pages_base_referrer_gin_idx2 ON events.pages USING GIN (RIGHT(base_referrer, - length(base_referrer) - (CASE - WHEN base_referrer LIKE 'http://%' - THEN 7 - WHEN base_referrer LIKE 'https://%' - THEN 8 - ELSE 0 END)) - gin_trgm_ops); - CREATE INDEX ON events.pages (response_time); - CREATE INDEX ON events.pages (response_end); - CREATE INDEX pages_path_gin_idx ON events.pages USING GIN (path gin_trgm_ops); - CREATE INDEX pages_path_idx ON events.pages (path); - CREATE INDEX pages_visually_complete_idx ON events.pages (visually_complete) WHERE visually_complete > 0; - CREATE INDEX pages_dom_building_time_idx ON events.pages (dom_building_time) WHERE dom_building_time > 0; - CREATE INDEX pages_load_time_idx ON events.pages (load_time) WHERE load_time > 0; - CREATE INDEX pages_base_path_session_id_timestamp_idx ON events.pages (base_path, session_id, timestamp); - CREATE INDEX pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); - CREATE INDEX pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2; + CREATE INDEX IF NOT EXISTS pages_session_id_idx ON events.pages (session_id); + CREATE INDEX IF NOT EXISTS pages_base_path_gin_idx ON events.pages USING GIN (base_path gin_trgm_ops); + CREATE INDEX IF NOT EXISTS pages_base_referrer_gin_idx ON events.pages USING GIN (base_referrer gin_trgm_ops); + CREATE INDEX IF NOT EXISTS pages_timestamp_idx ON events.pages (timestamp); + CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); + CREATE INDEX IF NOT EXISTS pages_base_path_gin_idx2 ON events.pages USING GIN (RIGHT(base_path, length(base_path) - 1) gin_trgm_ops); + CREATE INDEX IF NOT EXISTS pages_base_path_idx ON events.pages (base_path); + CREATE INDEX IF NOT EXISTS pages_base_path_idx2 ON events.pages (RIGHT(base_path, length(base_path) - 1)); + CREATE INDEX IF NOT EXISTS pages_base_referrer_idx ON events.pages (base_referrer); + CREATE INDEX IF NOT EXISTS pages_base_referrer_gin_idx2 ON events.pages USING GIN (RIGHT(base_referrer, + length(base_referrer) - + (CASE + WHEN base_referrer LIKE 'http://%' + THEN 7 + WHEN base_referrer LIKE 'https://%' + THEN 8 + ELSE 0 END)) + gin_trgm_ops); + CREATE INDEX IF NOT EXISTS pages_response_time_idx ON events.pages (response_time); + CREATE INDEX IF NOT EXISTS pages_response_end_idx ON events.pages (response_end); + CREATE INDEX IF NOT EXISTS pages_path_gin_idx ON events.pages USING GIN (path gin_trgm_ops); + CREATE INDEX IF NOT EXISTS pages_path_idx ON events.pages (path); + CREATE INDEX IF NOT EXISTS pages_visually_complete_idx ON events.pages (visually_complete) WHERE visually_complete > 0; + CREATE INDEX IF NOT EXISTS pages_dom_building_time_idx ON events.pages (dom_building_time) WHERE dom_building_time > 0; + CREATE INDEX IF NOT EXISTS pages_load_time_idx ON events.pages (load_time) WHERE load_time > 0; + CREATE INDEX IF NOT EXISTS pages_first_contentful_paint_time_idx ON events.pages (first_contentful_paint_time) WHERE first_contentful_paint_time > 0; + CREATE INDEX IF NOT EXISTS pages_dom_content_loaded_time_idx ON events.pages (dom_content_loaded_time) WHERE dom_content_loaded_time > 0; + CREATE INDEX IF NOT EXISTS pages_first_paint_time_idx ON events.pages (first_paint_time) WHERE first_paint_time > 0; + CREATE INDEX IF NOT EXISTS pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0; + CREATE INDEX IF NOT EXISTS pages_time_to_interactive_idx ON events.pages (time_to_interactive) WHERE time_to_interactive > 0; + CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_loadgt0NN_idx ON events.pages (session_id, timestamp) WHERE load_time > 0 AND load_time IS NOT NULL; + CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_visualgt0nn_idx ON events.pages (session_id, timestamp) WHERE visually_complete > 0 AND visually_complete IS NOT NULL; + CREATE INDEX IF NOT EXISTS pages_timestamp_metgt0_idx ON events.pages (timestamp) WHERE response_time > 0 OR + first_paint_time > 0 OR + dom_content_loaded_time > 0 OR + ttfb > 0 OR + time_to_interactive > 0; + CREATE INDEX IF NOT EXISTS pages_session_id_speed_indexgt0nn_idx ON events.pages (session_id, speed_index) WHERE speed_index > 0 AND speed_index IS NOT NULL; + CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_dom_building_timegt0nn_idx ON events.pages (session_id, timestamp, dom_building_time) WHERE dom_building_time > 0 AND dom_building_time IS NOT NULL; + CREATE INDEX IF NOT EXISTS pages_base_path_session_id_timestamp_idx ON events.pages (base_path, session_id, timestamp); + CREATE INDEX IF NOT EXISTS pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2; - CREATE TABLE events.clicks + CREATE TABLE IF NOT EXISTS events.clicks ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -757,18 +943,18 @@ $$ selector text DEFAULT '' NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.clicks (session_id); - CREATE INDEX ON events.clicks (label); - CREATE INDEX clicks_label_gin_idx ON events.clicks USING GIN (label gin_trgm_ops); - CREATE INDEX ON events.clicks (timestamp); - CREATE INDEX clicks_label_session_id_timestamp_idx ON events.clicks (label, session_id, timestamp); - CREATE INDEX clicks_url_idx ON events.clicks (url); - CREATE INDEX clicks_url_gin_idx ON events.clicks USING GIN (url gin_trgm_ops); - CREATE INDEX clicks_url_session_id_timestamp_selector_idx ON events.clicks (url, session_id, timestamp, selector); - CREATE INDEX clicks_session_id_timestamp_idx ON events.clicks (session_id, timestamp); + CREATE INDEX IF NOT EXISTS clicks_session_id_idx ON events.clicks (session_id); + CREATE INDEX IF NOT EXISTS clicks_label_idx ON events.clicks (label); + CREATE INDEX IF NOT EXISTS clicks_label_gin_idx ON events.clicks USING GIN (label gin_trgm_ops); + CREATE INDEX IF NOT EXISTS clicks_timestamp_idx ON events.clicks (timestamp); + CREATE INDEX IF NOT EXISTS clicks_label_session_id_timestamp_idx ON events.clicks (label, session_id, timestamp); + CREATE INDEX IF NOT EXISTS clicks_url_idx ON events.clicks (url); + CREATE INDEX IF NOT EXISTS clicks_url_gin_idx ON events.clicks USING GIN (url gin_trgm_ops); + CREATE INDEX IF NOT EXISTS clicks_url_session_id_timestamp_selector_idx ON events.clicks (url, session_id, timestamp, selector); + CREATE INDEX IF NOT EXISTS clicks_session_id_timestamp_idx ON events.clicks (session_id, timestamp); - CREATE TABLE events.inputs + CREATE TABLE IF NOT EXISTS events.inputs ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -777,14 +963,14 @@ $$ value text DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.inputs (session_id); - CREATE INDEX ON events.inputs (label, value); - CREATE INDEX inputs_label_gin_idx ON events.inputs USING GIN (label gin_trgm_ops); - CREATE INDEX inputs_label_idx ON events.inputs (label); - CREATE INDEX ON events.inputs (timestamp); - CREATE INDEX inputs_label_session_id_timestamp_idx ON events.inputs (label, session_id, timestamp); + CREATE INDEX IF NOT EXISTS inputs_session_id_idx ON events.inputs (session_id); + CREATE INDEX IF NOT EXISTS inputs_label_value_idx ON events.inputs (label, value); + CREATE INDEX IF NOT EXISTS inputs_label_gin_idx ON events.inputs USING GIN (label gin_trgm_ops); + CREATE INDEX IF NOT EXISTS inputs_label_idx ON events.inputs (label); + CREATE INDEX IF NOT EXISTS inputs_timestamp_idx ON events.inputs (timestamp); + CREATE INDEX IF NOT EXISTS inputs_label_session_id_timestamp_idx ON events.inputs (label, session_id, timestamp); - CREATE TABLE events.errors + CREATE TABLE IF NOT EXISTS events.errors ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -792,12 +978,16 @@ $$ error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.errors (session_id); - CREATE INDEX ON events.errors (timestamp); - CREATE INDEX errors_error_id_idx ON events.errors (error_id); + CREATE INDEX IF NOT EXISTS errors_session_id_idx ON events.errors (session_id); + CREATE INDEX IF NOT EXISTS errors_timestamp_idx ON events.errors (timestamp); + CREATE INDEX IF NOT EXISTS errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); + CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); + CREATE INDEX IF NOT EXISTS errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); + CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_session_id_idx ON events.errors (error_id, timestamp, session_id); + CREATE INDEX IF NOT EXISTS errors_error_id_idx ON events.errors (error_id); - CREATE TABLE events.graphql + CREATE TABLE IF NOT EXISTS events.graphql ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -805,11 +995,11 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.graphql (name); - CREATE INDEX graphql_name_gin_idx ON events.graphql USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.graphql (timestamp); + CREATE INDEX IF NOT EXISTS graphql_name_idx ON events.graphql (name); + CREATE INDEX IF NOT EXISTS graphql_name_gin_idx ON events.graphql USING GIN (name gin_trgm_ops); + CREATE INDEX IF NOT EXISTS graphql_timestamp_idx ON events.graphql (timestamp); - CREATE TABLE events.state_actions + CREATE TABLE IF NOT EXISTS events.state_actions ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -817,13 +1007,21 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.state_actions (name); - CREATE INDEX state_actions_name_gin_idx ON events.state_actions USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.state_actions (timestamp); + CREATE INDEX IF NOT EXISTS state_actions_name_idx ON events.state_actions (name); + CREATE INDEX IF NOT EXISTS state_actions_name_gin_idx ON events.state_actions USING GIN (name gin_trgm_ops); + CREATE INDEX IF NOT EXISTS state_actions_timestamp_idx ON events.state_actions (timestamp); - CREATE TYPE events.resource_type AS ENUM ('other', 'script', 'stylesheet', 'fetch', 'img', 'media'); - CREATE TYPE events.resource_method AS ENUM ('GET' , 'HEAD' , 'POST' , 'PUT' , 'DELETE' , 'CONNECT' , 'OPTIONS' , 'TRACE' , 'PATCH' ); - CREATE TABLE events.resources + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'resource_type') THEN + CREATE TYPE events.resource_type AS ENUM ('other', 'script', 'stylesheet', 'fetch', 'img', 'media'); + END IF; + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'resource_method') THEN + CREATE TYPE events.resource_method AS ENUM ('GET' , 'HEAD' , 'POST' , 'PUT' , 'DELETE' , 'CONNECT' , 'OPTIONS' , 'TRACE' , 'PATCH' ); + END IF; + CREATE TABLE IF NOT EXISTS events.resources ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, message_id bigint NOT NULL, @@ -842,21 +1040,28 @@ $$ decoded_body_size integer NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.resources (session_id); - CREATE INDEX ON events.resources (timestamp); - CREATE INDEX ON events.resources (success); - CREATE INDEX ON events.resources (status); - CREATE INDEX ON events.resources (type); - CREATE INDEX ON events.resources (duration) WHERE duration > 0; - CREATE INDEX ON events.resources (url_host); + CREATE INDEX IF NOT EXISTS resources_session_id_idx ON events.resources (session_id); + CREATE INDEX IF NOT EXISTS resources_status_idx ON events.resources (status); + CREATE INDEX IF NOT EXISTS resources_type_idx ON events.resources (type); + CREATE INDEX IF NOT EXISTS resources_duration_durationgt0_idx ON events.resources (duration) WHERE duration > 0; + CREATE INDEX IF NOT EXISTS resources_url_host_idx ON events.resources (url_host); + CREATE INDEX IF NOT EXISTS resources_timestamp_idx ON events.resources (timestamp); + CREATE INDEX IF NOT EXISTS resources_success_idx ON events.resources (success); - CREATE INDEX resources_url_gin_idx ON events.resources USING GIN (url gin_trgm_ops); - CREATE INDEX resources_url_idx ON events.resources (url); - CREATE INDEX resources_url_hostpath_gin_idx ON events.resources USING GIN (url_hostpath gin_trgm_ops); - CREATE INDEX resources_url_hostpath_idx ON events.resources (url_hostpath); + CREATE INDEX IF NOT EXISTS resources_url_gin_idx ON events.resources USING GIN (url gin_trgm_ops); + CREATE INDEX IF NOT EXISTS resources_url_idx ON events.resources (url); + CREATE INDEX IF NOT EXISTS resources_url_hostpath_gin_idx ON events.resources USING GIN (url_hostpath gin_trgm_ops); + CREATE INDEX IF NOT EXISTS resources_url_hostpath_idx ON events.resources (url_hostpath); + CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL; + CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_idx ON events.resources (session_id, timestamp); + CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_type_idx ON events.resources (session_id, timestamp, type); + CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_noFetch_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL AND type != 'fetch'; + CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_fail_idx ON events.resources (session_id, timestamp, url_host) WHERE success = FALSE; + CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_firstparty_idx ON events.resources (session_id, timestamp, url_host) WHERE type IN ('fetch', 'script'); + CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_duration_durationgt0NN_img_idx ON events.resources (session_id, timestamp, duration) WHERE duration > 0 AND duration IS NOT NULL AND type = 'img'; + CREATE INDEX IF NOT EXISTS resources_timestamp_session_id_idx ON events.resources (timestamp, session_id); - - CREATE TABLE events.performance + CREATE TABLE IF NOT EXISTS events.performance ( session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, timestamp bigint NOT NULL, @@ -875,62 +1080,93 @@ $$ max_used_js_heap_size bigint NOT NULL, PRIMARY KEY (session_id, message_id) ); - --- --- autocomplete.sql --- - - CREATE TABLE autocomplete - ( - value text NOT NULL, - type text NOT NULL, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE - ); - - CREATE unique index autocomplete_unique ON autocomplete (project_id, value, type); - CREATE index autocomplete_project_id_idx ON autocomplete (project_id); - CREATE INDEX autocomplete_type_idx ON public.autocomplete (type); - CREATE INDEX autocomplete_value_gin_idx ON public.autocomplete USING GIN (value gin_trgm_ops); - --- --- jobs.sql --- - CREATE TYPE job_status AS ENUM ('scheduled','running','cancelled','failed','completed'); - CREATE TYPE job_action AS ENUM ('delete_user_data'); - CREATE TABLE jobs - ( - job_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - description text NOT NULL, - status job_status NOT NULL, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, - action job_action NOT NULL, - reference_id text NOT NULL, - created_at timestamp default timezone('utc'::text, now()) NOT NULL, - updated_at timestamp default timezone('utc'::text, now()) NULL, - start_at timestamp NOT NULL, - errors text NULL - ); - CREATE INDEX ON jobs (status); - CREATE INDEX ON jobs (start_at); - - - CREATE TABLE traces - ( - user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, - tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, - created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, - auth text NULL, - action text NOT NULL, - method text NOT NULL, - path_format text NOT NULL, - endpoint text NOT NULL, - payload jsonb NULL, - parameters jsonb NULL, - status int NULL - ); - CREATE INDEX traces_user_id_idx ON traces (user_id); - CREATE INDEX traces_tenant_id_idx ON traces (tenant_id); - - raise notice 'DB created'; + CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id); + CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp); + CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); + CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; + CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; END IF; END; - $$ LANGUAGE plpgsql; + + +DO +$$ + BEGIN + IF (with to_check (name) as ( + values ('customs'), + ('issues'), + ('requests') + ) + select bool_and(exists(select * + from information_schema.tables t + where table_schema = 'events_common' + AND table_name = to_check.name)) as all_present + from to_check) THEN + raise notice 'All events_common schema tables exists'; + ELSE + IF NOT EXISTS(SELECT * + FROM pg_type typ + WHERE typ.typname = 'custom_level') THEN + CREATE TYPE events_common.custom_level AS ENUM ('info','error'); + END IF; + CREATE TABLE IF NOT EXISTS events_common.customs + ( + session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, + timestamp bigint NOT NULL, + seq_index integer NOT NULL, + name text NOT NULL, + payload jsonb NOT NULL, + level events_common.custom_level NOT NULL DEFAULT 'info', + PRIMARY KEY (session_id, timestamp, seq_index) + ); + CREATE INDEX IF NOT EXISTS customs_name_idx ON events_common.customs (name); + CREATE INDEX IF NOT EXISTS customs_name_gin_idx ON events_common.customs USING GIN (name gin_trgm_ops); + CREATE INDEX IF NOT EXISTS customs_timestamp_idx ON events_common.customs (timestamp); + + + CREATE TABLE IF NOT EXISTS events_common.issues + ( + session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, + timestamp bigint NOT NULL, + seq_index integer NOT NULL, + issue_id text NOT NULL REFERENCES issues (issue_id) ON DELETE CASCADE, + payload jsonb DEFAULT NULL, + PRIMARY KEY (session_id, timestamp, seq_index) + ); + CREATE INDEX IF NOT EXISTS issues_issue_id_timestamp_idx ON events_common.issues (issue_id, timestamp); + CREATE INDEX IF NOT EXISTS issues_timestamp_idx ON events_common.issues (timestamp); + + + CREATE TABLE IF NOT EXISTS events_common.requests + ( + session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, + timestamp bigint NOT NULL, + seq_index integer NOT NULL, + url text NOT NULL, + duration integer NOT NULL, + success boolean NOT NULL, + PRIMARY KEY (session_id, timestamp, seq_index) + ); + CREATE INDEX IF NOT EXISTS requests_url_idx ON events_common.requests (url); + CREATE INDEX IF NOT EXISTS requests_duration_idx ON events_common.requests (duration); + CREATE INDEX IF NOT EXISTS requests_url_gin_idx ON events_common.requests USING GIN (url gin_trgm_ops); + CREATE INDEX IF NOT EXISTS requests_timestamp_idx ON events_common.requests (timestamp); + CREATE INDEX IF NOT EXISTS requests_url_gin_idx2 ON events_common.requests USING GIN (RIGHT(url, + length(url) - + (CASE + WHEN url LIKE 'http://%' + THEN 7 + WHEN url LIKE 'https://%' + THEN 8 + ELSE 0 END)) + gin_trgm_ops); + CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; + END IF; + END; +$$ +LANGUAGE plpgsql; + + COMMIT; \ No newline at end of file diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore b/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml new file mode 100644 index 000000000..c7a0eb3d6 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: clickhouse +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl new file mode 100644 index 000000000..44cfadff0 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "clickhouse.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "clickhouse.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "clickhouse.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "clickhouse.labels" -}} +helm.sh/chart: {{ include "clickhouse.chart" . }} +{{ include "clickhouse.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "clickhouse.selectorLabels" -}} +app.kubernetes.io/name: {{ include "clickhouse.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "clickhouse.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "clickhouse.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml new file mode 100644 index 000000000..4496f556c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: clickhouse + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.service.webPort }} + targetPort: web + protocol: TCP + name: web + - port: {{ .Values.service.dataPort }} + targetPort: data + protocol: TCP + name: data + selector: + {{- include "clickhouse.selectorLabels" . | nindent 4 }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml new file mode 100644 index 000000000..1f1183598 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "clickhouse.serviceAccountName" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml new file mode 100644 index 000000000..392976eec --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "clickhouse.fullname" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "clickhouse.fullname" . }} + selector: + matchLabels: + {{- include "clickhouse.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "clickhouse.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "clickhouse.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 9000 + name: web + - containerPort: 8123 + name: data + volumeMounts: + - name: ch-volume + mountPath: /var/lib/mydata + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: ch-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storageSize }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml new file mode 100644 index 000000000..4cba1c1f8 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml @@ -0,0 +1,62 @@ +# Default values for clickhouse. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: yandex/clickhouse-server + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "20.9" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +env: {} + +service: + webPort: 9000 + dataPort: 8123 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +storageSize: 8G diff --git a/ee/scripts/helm/helm/databases/charts/kafka/.helmignore b/ee/scripts/helm/helm/databases/charts/kafka/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml b/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml new file mode 100755 index 000000000..165e70d55 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2.6.0 +description: Apache Kafka is a distributed streaming platform. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kafka +sources: +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 11.8.6 diff --git a/ee/scripts/helm/helm/databases/charts/kafka/README.md b/ee/scripts/helm/helm/databases/charts/kafka/README.md new file mode 100755 index 000000000..5584bd43d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/README.md @@ -0,0 +1,737 @@ +# Kafka + +[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. + +## TL;DR + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the Kafka chart and their default values per section/component: + +### Global parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override kafka.fullname | `nil` | +| `fullnameOverride` | String to fully override kafka.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) | + +### Kafka parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image name | `bitnami/kafka` | +| `image.tag` | Kafka image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` | +| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` | +| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | +| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` | +| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | +| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` | +| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` | +| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` | +| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` | +| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` | +| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` | +| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` | +| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` | +| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of Kafka nodes | `1` | +| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `podSecurityContext` | Kafka pods' Security Context | `{}` | +| `containerSecurityContext` | Kafka containers' Security Context | `{}` | +| `resources.limits` | The resources limits for Kafka containers | `{}` | +| `resources.requests` | The requested resources for Kafka containers | `{}` | +| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` | +| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` | +| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | +| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | +| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) | +| `args` | Override kafka container arguments | `[]` (evaluated as a template) | +| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Kafka port for client connections | `9092` | +| `service.internalPort` | Kafka port for inter-broker connections | `9093` | +| `service.externalPort` | Kafka port for external connections | `9094` | +| `service.nodePorts.client` | Nodeport for client connections | `""` | +| `service.nodePorts.external` | Nodeport for external connections | `""` | +| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `service.annotations` | Service annotations | `{}`(evaluated as a template) | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) | + +### RBAC parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` | +| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | +| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | +| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | +| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | +| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | +| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | +| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | +| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | +| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) | +| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | +| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` | +| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + bitnami/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml bitnami/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of Kafka nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Allow to use the PLAINTEXT listener: + +```diff +- allowPlaintextListener: true ++ allowPlaintextListener: false +``` + +- Default replication factors for automatically created topics: + +```diff +- defaultReplicationFactor: 1 ++ defaultReplicationFactor: 3 +``` + +- Allow auto creation of topics. + +```diff +- autoCreateTopicsEnable: true ++ autoCreateTopicsEnable: false +``` + +- The replication factor for the offsets topic: + +```diff +- offsetsTopicReplicationFactor: 1 ++ offsetsTopicReplicationFactor: 3 +``` + +- The replication factor for the transaction topic: + +```diff +- transactionStateLogReplicationFactor: 1 ++ transactionStateLogReplicationFactor: 3 +``` + +- Overridden min.insync.replicas config for the transaction topic: + +```diff +- transactionStateLogMinIsr: 1 ++ transactionStateLogMinIsr: 3 +``` + +- Switch to enable the Kafka SASAL authentication on client and inter-broker communications: + +```diff +- auth.clientProtocol: plaintext ++ auth.clientProtocol: sasl +- auth.interBrokerProtocol: plaintext ++ auth.interBrokerProtocol: sasl +``` + +- Enable Zookeeper authentication: + +```diff ++ auth.jaas.zookeeperUser: zookeeperUser ++ auth.jaas.zookeeperPassword: zookeeperPassword +- zookeeper.auth.enabled: false ++ zookeeper.auth.enabled: true ++ zookeeper.auth.clientUser: zookeeperUser ++ zookeeper.auth.clientPassword: zookeeperPassword ++ zookeeper.auth.serverUsers: zookeeperUser ++ zookeeper.auth.serverPasswords: zookeeperPassword +``` + +- Enable Pod Disruption Budget: + +```diff +- pdb.create: false ++ pdb.create: true +``` + +- Create a separate Kafka metrics exporter: + +```diff +- metrics.kafka.enabled: false ++ metrics.kafka.enabled: true +``` + +- Expose JMX metrics to Prometheus: + +```diff +- metrics.jmx.enabled: false ++ metrics.jmx.enabled: true +``` + +- Enable Zookeeper metrics: + +```diff ++ zookeeper.metrics.enabled: true +``` + +To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|-------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret: + +```console +kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it. + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.certificatesSecret=kafka-jks +auth.certificatesPassword=jksPassword +auth.jaas.clientUsers[0]=brokerUser +auth.jaas.clientPassword[0]=brokerPassword +auth.jaas.zookeeperUser=zookeeperUser +auth.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.serivce.nodePorts[0]='node-port-1' +externalAccess.serivce.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: |- + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 10 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /opt/bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 11.8.0 + +External access to brokers can now be archived through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100755 index 000000000..c3b15dc5c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 3.6.2 +description: A centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services for distributed + applications. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png +keywords: +- zookeeper +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 5.21.9 diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md new file mode 100755 index 000000000..0291875ed --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,297 @@ +# ZooKeeper + +[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component: + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override zookeeper.fullname | `nil` | +| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` | +| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` | +| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` | +| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` | +| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` | +| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` | +| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` | +| `auth.enabled` | Enable ZooKeeper auth | `false` | +| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` | +| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` | +| `auth.serverUsers` | List of user to be created | `nil` | +| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` | +| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` | +| `logLevel` | Log level of ZooKeeper server | `ERROR` | +| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` | +| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` | +| `dataLogDir` | Data log directory | `""` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podManagementPolicy` | Pod management policy | `Parallel` | +| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` | +| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` | +| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` | +| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file | +| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file | +| `extraVolumes` | Extra volumes | `nil` | +| `extraVolumeMounts` | Mount extra volume(s) | `nil` | +| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | ZooKeeper port | `2181` | +| `service.followerPort` | ZooKeeper follower port | `2888` | +| `service.electionPort` | ZooKeeper election port | `3888` | +| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template | +| `service.tls.client_enable` | Enable tls for client connections | `false` | +| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` | +| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` | +| `service.tls.client_port` | Service port for tls client connections | `3181` | +| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.annotations` | Annotations for the Service | `{}` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | + +### Metrics parameters + +| Parameter | Description | Default | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` | +| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` | +| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` | +| `metrics.service.port` | Prometheus metrics service port | `9141` | +| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace | +| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of ZooKeeper nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Enable prometheus metrics: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection. + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Data Log Directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +## Upgrading + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100755 index 000000000..3cc2edbed --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,57 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.auth.clientPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster: + + {{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + zkCli.sh $SERVICE_IP:2181 + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 & + zkCli.sh 127.0.0.1:2181 + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100755 index 000000000..f82502d69 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,212 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Zookeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper.labels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +helm.sh/chart: {{ include "zookeeper.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "zookeeper.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "zookeeper.matchLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return ZooKeeper Client Password +*/}} +{{- define "zookeeper.clientPassword" -}} +{{- if .Values.auth.clientPassword -}} + {{- .Values.auth.clientPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return ZooKeeper Servers Passwords +*/}} +{{- define "zookeeper.serverPasswords" -}} +{{- if .Values.auth.serverPasswords -}} + {{- .Values.auth.serverPasswords -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "zookeeper.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100755 index 000000000..1a4061565 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- +{{ .Values.config | indent 4 }} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100755 index 000000000..3e26ed6c8 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100755 index 000000000..f7e30b4bc --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections to zookeeper + - ports: + - port: {{ .Values.service.port }} + from: + {{- if not .Values.networkPolicy.allowExternal }} + - podSelector: + matchLabels: + {{ include "zookeeper.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + {{- else }} + - podSelector: + matchLabels: {} + {{- end }} + # Internal ports + - ports: &intranodes_ports + - port: {{ .Values.service.followerPort }} + - port: {{ .Values.service.electionPort }} + from: + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + egress: + - ports: *intranodes_ports + # Allow outbound connections from zookeeper nodes + +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..818950c66 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if gt $replicaCount 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml new file mode 100755 index 000000000..9cda3985c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "zookeeper.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.prometheusRule.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "zookeeper.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }} +{{- end }} + diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100755 index 000000000..b3d727fec --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }} + server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100755 index 000000000..3f7ef39fd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100755 index 000000000..5782dad59 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "zookeeper.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100755 index 000000000..fa1e5231f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,334 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "zookeeper.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if (eq "Recreate" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + template: + metadata: + name: {{ template "zookeeper.fullname" . }} + labels: {{- include "zookeeper.labels" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "zookeeper.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "zookeeper.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "zookeeper.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "zookeeper.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - chown + args: + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + - /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - {{ .Values.dataLogDir }} + {{- end }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.service.port | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $followerPort := int .Values.service.followerPort }} + {{- $electionPort := int .Values.service.electionPort }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $zookeeperFullname := include "zookeeper.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "yes" "no" .Values.allowAnonymousLogin | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.service.tls.client_enable }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.service.tls.client_enable | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.service.tls.client_keystore_path | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.client_keystore_password | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.service.tls.client_truststore_path | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.client_truststore_password | quote }} + {{ end }} + {{- if .Values.service.tls.quorum_enable }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.service.tls.quorum_enable | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.service.tls.quorum_keystore_path | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_keystore_password | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.service.tls.quorum_truststore_path | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_truststore_password | quote }} + {{ end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- toYaml .Values.extraEnvVars | nindent 12 }} + {{- end }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: client + containerPort: {{ .Values.service.port }} + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: client-tls + containerPort: {{ .Values.service.tls.client_port }} + {{ end }} + - name: follower + containerPort: {{ .Values.service.followerPort }} + - name: election + containerPort: {{ .Values.service.electionPort }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if .Values.config }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.config }} + - name: config + configMap: + name: {{ template "zookeeper.fullname" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) )}} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100755 index 000000000..972efb51d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100755 index 000000000..da3a2895a --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml new file mode 100755 index 000000000..7d678603f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 3 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: true + quorum_enable: true + disable_base_client_port: true + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml new file mode 100755 index 000000000..a40decb54 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 1 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: false + quorum_enable: false + disable_base_client_port: false + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + enabled: true + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md b/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md new file mode 100755 index 000000000..e110a8825 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md @@ -0,0 +1,10 @@ +# Java Key Stores + +You can copy here your Java Key Stores (JKS) files so a secret is created including them. Remember to use a truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. For instance, if you have 3 brokers you need to copy here the following files: + +- kafka.truststore.jks +- kafka-0.keystore.jks +- kafka-1.keystore.jks +- kafka-2.keystore.jks + +Find more info in [this section](https://github.com/bitnami/charts/tree/master/bitnami/kafka#enable-security-for-kafka-and-zookeeper) of the README.md file. diff --git a/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml b/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml new file mode 100644 index 000000000..acd718957 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml @@ -0,0 +1,521 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-scripts + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm +data: + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"kafka-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + exec /entrypoint.sh /run.sh +--- +# Source: kafka/charts/zookeeper/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper-headless + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/charts/zookeeper/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/templates/kafka-metrics-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-metrics + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + annotations: + + prometheus.io/path: /metrics + prometheus.io/port: '9308' + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 9308 + protocol: TCP + targetPort: metrics + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics +--- +# Source: kafka/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: 9093 + protocol: TCP + targetPort: kafka-internal + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/kafka-metrics-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-exporter + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + spec: + containers: + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.2.0-debian-10-r220 + imagePullPolicy: "IfNotPresent" + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + --kafka.server=kafka-0.kafka-headless.db.svc.cluster.local:9092 \ + --kafka.server=kafka-1.kafka-headless.db.svc.cluster.local:9092 \ + --web.listen-address=:9308 + ports: + - name: metrics + containerPort: 9308 + resources: + limits: {} + requests: {} +--- +# Source: kafka/charts/zookeeper/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + role: zookeeper +spec: + serviceName: kafka-zookeeper-headless + replicas: 1 + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper + template: + metadata: + name: kafka-zookeeper + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + spec: + + serviceAccountName: default + securityContext: + fsGroup: 1001 + containers: + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.6.2-debian-10-r10 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: ZOO_DATA_LOG_DIR + value: "" + - name: ZOO_PORT_NUMBER + value: "2181" + - name: ZOO_TICK_TIME + value: "2000" + - name: ZOO_INIT_LIMIT + value: "10" + - name: ZOO_SYNC_LIMIT + value: "5" + - name: ZOO_MAX_CLIENT_CNXNS + value: "60" + - name: ZOO_4LW_COMMANDS_WHITELIST + value: "srvr, mntr, ruok" + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: "no" + - name: ZOO_AUTOPURGE_INTERVAL + value: "0" + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: "3" + - name: ZOO_MAX_SESSION_TIMEOUT + value: "40000" + - name: ZOO_SERVERS + value: kafka-zookeeper-0.kafka-zookeeper-headless.db.svc.cluster.local:2888:3888 + - name: ZOO_ENABLE_AUTH + value: "no" + - name: ZOO_HEAP_SIZE + value: "1024" + - name: ZOO_LOG_LEVEL + value: "ERROR" + - name: ALLOW_ANONYMOUS_LOGIN + value: "yes" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + + - name: client + containerPort: 2181 + + + - name: follower + containerPort: 2888 + - name: election + containerPort: 3888 + livenessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + volumes: + volumeClaimTemplates: + - metadata: + name: data + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" +--- +# Source: kafka/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka + serviceName: kafka-headless + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka + spec: + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: kafka + containers: + - name: kafka + image: docker.io/bitnami/kafka:2.6.0-debian-10-r30 + imagePullPolicy: "IfNotPresent" + command: + - /scripts/setup.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + value: "kafka-zookeeper" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INTERNAL" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT" + - name: KAFKA_CFG_LISTENERS + value: "INTERNAL://:9093,CLIENT://:9092" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "INTERNAL://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9092" + - name: ALLOW_PLAINTEXT_LISTENER + value: "yes" + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: "false" + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_HEAP_OPTS + value: "-Xmx1024m -Xms1024m" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: "10000" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: "1000" + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: "300000" + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: "168" + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: "1000012" + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_DIRS + value: "/bitnami/kafka/data" + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: "1" + - name: KAFKA_CFG_NUM_IO_THREADS + value: "8" + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: "3" + - name: KAFKA_CFG_NUM_PARTITIONS + value: "1" + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: "1" + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: "104857600" + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: "6000" + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: 9093 + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: + periodSeconds: + successThreshold: + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + timeoutSeconds: 5 + failureThreshold: 6 + periodSeconds: + successThreshold: + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + volumes: + - name: scripts + configMap: + name: kafka-scripts + defaultMode: 0755 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock b/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock new file mode 100755 index 000000000..115d0b229 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 5.21.9 +digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e +generated: "2020-09-29T07:43:56.483358254Z" diff --git a/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml b/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml new file mode 100755 index 000000000..533875258 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: zookeeper.enabled diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt b/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt new file mode 100755 index 000000000..0347c21c4 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt @@ -0,0 +1,181 @@ +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $servicePort := int .Values.service.port -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +############################################################################### +### ERROR: You enabled external access to Kafka brokers without specifying ### +### the array of load balancer IPs for Kafka brokers. ### +############################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for Kafka brokers. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $i, $e := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ + --set replicaCount={{ $replicaCount }} \ + --set externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq .Values.auth.clientProtocol "plaintext") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} + + +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files by executing these commands: + + - kafka_jaas.conf: + +cat > kafka_jaas.conf < client.properties <<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100755 index 000000000..83edd8422 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.jmx.service.type }} + {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} + {{- if .Values.metrics.jmx.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} + nodePort: {{ .Values.metrics.jmx.service.nodePort }} + {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100755 index 000000000..c547fbb39 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,87 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $servicePort := int .Values.service.port -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kafka.fullname" . }}-exporter + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: metrics + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username="$SASL_USERNAME" \ + --sasl.password="${sasl_passwords[0]}" \ + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.ca-file="/opt/bitnami/kafka-exporter/certs/ca-file" \ + --tls.cert-file="/opt/bitnami/kafka-exporter/certs/cert-file" \ + --tls.key-file="/opt/bitnami/kafka-exporter/certs/key-file" \ + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:9308 + {{- if (include "kafka.client.saslAuthentication" .) }} + env: + - name: SASL_USERNAME + value: {{ index .Values.auth.jaas.clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + ports: + - name: metrics + containerPort: 9308 + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + {{- if and (include "kafka.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + volumeMounts: + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + volumes: + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100755 index 000000000..54a4ccb0b --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.kafka.service.type }} + {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} + {{- if .Values.metrics.kafka.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} + nodePort: {{ .Values.metrics.kafka.service.nodePort }} + {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml new file mode 100755 index 000000000..0a34d50dd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,16 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{ .Values.log4j | nindent 4 }} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..cf515becb --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml new file mode 100755 index 000000000..943c5bf3c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml new file mode 100755 index 000000000..78f940f85 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "kafka.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml new file mode 100755 index 000000000..705545a61 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kafka.fullname" . }}-scripts + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "kafka.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.internalPort }} + {{- $clientPort := .Values.service.port }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliar functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + if [[ -f "/certs/kafka.truststore.jks" ]] && [[ -f "/certs/kafka-${ID}.keystore.jks" ]]; then + mkdir -p /opt/bitnami/kafka/config/certs + cp "/certs/kafka.truststore.jks" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "/certs/kafka-${ID}.keystore.jks" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml new file mode 100755 index 000000000..790790b3f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100755 index 000000000..250bb5306 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100755 index 000000000..951bf7c41 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml new file mode 100755 index 000000000..e9b5ce8f9 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,435 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "kafka.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.internalPort }} +{{- $clientPort := .Values.service.port }} +{{- $interBrokerProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.interBrokerProtocol ) -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: Parallel + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ template "kafka.fullname" . }}-headless + updateStrategy: + type: {{ .Values.updateStrategy | quote }} + {{- if (eq "OnDelete" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "kafka.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "kafka.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ template "kafka.serviceAccountName" . }} + {{- end }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/kafka + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/kafka" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: {{- include "kafka.tplValue" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- if .Values.args }} + args: {{- include "kafka.tplValue" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ include "kafka.zookeeper.fullname" . | quote }} + {{- else }} + value: {{ join "," .Values.externalZookeeper.servers | quote }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ include "kafka.auth.saslMechanisms" ( dict "type" .Values.auth.saslMechanisms ) }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.saslInterBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: "SASL" + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ .Values.auth.tlsEndpointIdentificationAlgorithm | quote }} + {{- if .Values.auth.jksPassword }} + - name: KAFKA_CERTIFICATE_PASSWORD + value: {{ .Values.auth.jksPassword | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + {{- if .Values.extraEnvVars }} + {{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: {{ $interBrokerPort }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: 9094 + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customlivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customreadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: /bitnami/kafka/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + mountPath: /certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ template "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + ports: + - name: metrics + containerPort: 5556 + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "kafka.tplValue" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "kafka.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + secret: + secretName: {{ include "kafka.jksSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "kafka.tplValue" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml new file mode 100755 index 000000000..eefe0046d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,52 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "kafka.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" $ }}-{{ $i }}-external + labels: {{- include "kafka.labels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.port }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + selector: {{- include "kafka.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml new file mode 100755 index 000000000..e7c2e5e6e --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-headless + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.internalPort }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml new file mode 100755 index 000000000..189cb9ffd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml b/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml new file mode 100755 index 000000000..af6f43dba --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml @@ -0,0 +1,931 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: false + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 3 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 3 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 3 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 3 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: sasl + interBrokerProtocol: sasl + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords + ## + ## clientPasswords: + ## - password1 + ## - password2 + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + zookeeperUser: zookeeperUser + + ## Kafka Zookeeper password + ## + zookeeperPassword: zookeeperPassword + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-password=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: false + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 3 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: true + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: true + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: true + ## User that will use Zookeeper clients to auth + ## + clientUser: zookeeperUser + ## Password that will use Zookeeper clients to auth + ## + clientPassword: zookeeperPassword + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: zookeeperUser + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: zookeeperPassword + metrics: + enabled: true + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/ee/scripts/helm/helm/databases/charts/kafka/values.yaml b/ee/scripts/helm/helm/databases/charts/kafka/values.yaml new file mode 100755 index 000000000..154d71bd5 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/values.yaml @@ -0,0 +1,934 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: true + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: plaintext + interBrokerProtocol: plaintext + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers. + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + # zookeeperUser: + + ## Kafka Zookeeper password + ## + # zookeeperPassword: + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: true + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 2 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Custom liveness/readiness probes that will override the default ones +## +customLivenessProbe: {} +customReadinessProbe: {} + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: false + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: false + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + # clientUser: + ## Password that will use Zookeeper clients to auth + ## + # clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + # serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + # serverPasswords: + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/frontend/app/Router.js b/frontend/app/Router.js index e7f05190e..edacf56c5 100644 --- a/frontend/app/Router.js +++ b/frontend/app/Router.js @@ -24,6 +24,7 @@ import * as routes from './routes'; import { OB_DEFAULT_TAB } from 'App/routes'; import Signup from './components/Signup/Signup'; import { fetchTenants } from 'Duck/user'; +import { setSessionPath } from 'Duck/sessions'; const BugFinder = withSiteIdUpdater(BugFinderPure); const Dashboard = withSiteIdUpdater(DashboardPure); @@ -73,9 +74,12 @@ const ONBOARDING_REDIRECT_PATH = routes.onboarding(OB_DEFAULT_TAB); onboarding: state.getIn([ 'user', 'onboarding' ]) }; }, { - fetchUserInfo, fetchTenants + fetchUserInfo, fetchTenants, setSessionPath }) class Router extends React.Component { + state = { + destinationPath: null + } constructor(props) { super(props); if (props.isLoggedIn) { @@ -85,10 +89,23 @@ class Router extends React.Component { props.fetchTenants(); } - componentDidUpdate(prevProps) { + componentDidMount() { + const { isLoggedIn, location } = this.props; + if (!isLoggedIn) { + this.setState({ destinationPath: location.pathname }); + } + } + + componentDidUpdate(prevProps, prevState) { + this.props.setSessionPath(prevProps.location.pathname) if (prevProps.email !== this.props.email && !this.props.email) { this.props.fetchTenants(); } + + if (!prevProps.isLoggedIn && this.props.isLoggedIn && this.state.destinationPath !== routes.login() && this.state.destinationPath !== '/') { + this.props.history.push(this.state.destinationPath); + this.setState({ destinationPath: null }); + } } render() { diff --git a/frontend/app/components/Assist/components/AssistTabs/AssistTabs.tsx b/frontend/app/components/Assist/components/AssistTabs/AssistTabs.tsx new file mode 100644 index 000000000..ff62a899a --- /dev/null +++ b/frontend/app/components/Assist/components/AssistTabs/AssistTabs.tsx @@ -0,0 +1,38 @@ +import React, { useEffect, useState } from 'react'; +import { SlideModal, Icon } from 'UI'; +import SessionList from '../SessionList'; +import stl from './assistTabs.css' + +interface Props { + userId: any, +} + +const AssistTabs = (props: Props) => { + const [showMenu, setShowMenu] = useState(false) + + return ( +
+
+
setShowMenu(!showMenu)} + > + More Live Sessions +
+ by +
+ +
{props.userId}
+
+
+ Live Sessions by {props.userId}
} + isDisplayed={ showMenu } + content={ showMenu && } + onClose={ () => setShowMenu(false) } + /> + + ); +}; + +export default AssistTabs; \ No newline at end of file diff --git a/frontend/app/components/Assist/components/AssistTabs/assistTabs.css b/frontend/app/components/Assist/components/AssistTabs/assistTabs.css new file mode 100644 index 000000000..462879395 --- /dev/null +++ b/frontend/app/components/Assist/components/AssistTabs/assistTabs.css @@ -0,0 +1,5 @@ +.btnLink { + cursor: pointer; + color: $green; + text-decoration: underline; +} \ No newline at end of file diff --git a/frontend/app/components/Assist/components/AssistTabs/index.ts b/frontend/app/components/Assist/components/AssistTabs/index.ts new file mode 100644 index 000000000..b089b3734 --- /dev/null +++ b/frontend/app/components/Assist/components/AssistTabs/index.ts @@ -0,0 +1 @@ +export { default } from './AssistTabs'; \ No newline at end of file diff --git a/frontend/app/components/Assist/components/SessionList/SessionList.tsx b/frontend/app/components/Assist/components/SessionList/SessionList.tsx new file mode 100644 index 000000000..f556a8f1d --- /dev/null +++ b/frontend/app/components/Assist/components/SessionList/SessionList.tsx @@ -0,0 +1,40 @@ +import React, { useEffect } from 'react'; +import { connect } from 'react-redux'; +import { fetchLiveList } from 'Duck/sessions'; +import { Loader, NoContent } from 'UI'; +import SessionItem from 'Shared/SessionItem'; + +interface Props { + loading: boolean, + list: any, + session: any, + fetchLiveList: () => void, +} +function SessionList(props: Props) { + useEffect(() => { + props.fetchLiveList(); + }, []) + + return ( + + +
+ { props.list.map(session => ) } +
+
+
+ ); +} + +export default connect(state => { + const session = state.getIn([ 'sessions', 'current' ]); + return { + session, + list: state.getIn(['sessions', 'liveSessions']) + .filter(i => i.userId === session.userId && i.sessionId !== session.sessionId), + loading: state.getIn([ 'sessions', 'fetchLiveListRequest', 'loading' ]), + } +}, { fetchLiveList })(SessionList); \ No newline at end of file diff --git a/frontend/app/components/Assist/components/SessionList/index.ts b/frontend/app/components/Assist/components/SessionList/index.ts new file mode 100644 index 000000000..779c9df2a --- /dev/null +++ b/frontend/app/components/Assist/components/SessionList/index.ts @@ -0,0 +1 @@ +export { default } from './SessionList'; \ No newline at end of file diff --git a/frontend/app/components/BugFinder/SessionsMenu/SessionsMenu.js b/frontend/app/components/BugFinder/SessionsMenu/SessionsMenu.js index fb89f6967..7275d9ac0 100644 --- a/frontend/app/components/BugFinder/SessionsMenu/SessionsMenu.js +++ b/frontend/app/components/BugFinder/SessionsMenu/SessionsMenu.js @@ -119,7 +119,7 @@ export default connect(state => ({ activeFlow: state.getIn([ 'filters', 'activeFlow' ]), captureRate: state.getIn(['watchdogs', 'captureRate']), filters: state.getIn([ 'filters', 'appliedFilter' ]), - sessionsLoading: state.getIn([ 'sessions', 'loading' ]), + sessionsLoading: state.getIn([ 'sessions', 'fetchLiveListRequest', 'loading' ]), }), { fetchWatchdogStatus, setActiveFlow, clearEvents, setActiveTab, fetchSessionList })(SessionsMenu); diff --git a/frontend/app/components/Dashboard/Dashboard.js b/frontend/app/components/Dashboard/Dashboard.js index ecbfc9daa..7d2a2f37b 100644 --- a/frontend/app/components/Dashboard/Dashboard.js +++ b/frontend/app/components/Dashboard/Dashboard.js @@ -178,7 +178,7 @@ export default class Dashboard extends React.PureComponent {
diff --git a/frontend/app/components/Errors/List/List.js b/frontend/app/components/Errors/List/List.js index 70b0953fb..cb0ffd55a 100644 --- a/frontend/app/components/Errors/List/List.js +++ b/frontend/app/components/Errors/List/List.js @@ -2,7 +2,7 @@ import cn from 'classnames'; import { connect } from 'react-redux'; import { Set, List as ImmutableList } from "immutable"; import { NoContent, Loader, Checkbox, LoadMoreButton, IconButton, Input, DropdownPlain } from 'UI'; -import { merge, resolve,unresolve,ignore } from "Duck/errors"; +import { merge, resolve, unresolve, ignore, updateCurrentPage } from "Duck/errors"; import { applyFilter } from 'Duck/filters'; import { IGNORED, RESOLVED, UNRESOLVED } from 'Types/errorInfo'; import SortDropdown from 'Components/BugFinder/Filters/SortDropdown'; @@ -30,18 +30,19 @@ const sortOptions = Object.entries(sortOptionsMap) state.getIn(["errors", "unresolve", "loading"]), ignoreLoading: state.getIn([ "errors", "ignore", "loading" ]), mergeLoading: state.getIn([ "errors", "merge", "loading" ]), + currentPage: state.getIn(["errors", "currentPage"]), }), { merge, resolve, unresolve, ignore, - applyFilter + applyFilter, + updateCurrentPage, }) export default class List extends React.PureComponent { state = { checkedAll: false, checkedIds: Set(), - showPages: 1, sort: {} } @@ -106,7 +107,7 @@ export default class List extends React.PureComponent { this.applyToAllChecked(this.props.ignore); } - addPage = () => this.setState({ showPages: this.state.showPages + 1 }) + addPage = () => this.props.updateCurrentPage(this.props.currentPage + 1) writeOption = (e, { name, value }) => { const [ sort, order ] = value.split('-'); @@ -123,16 +124,16 @@ export default class List extends React.PureComponent { resolveToggleLoading, mergeLoading, onFilterChange, + currentPage, } = this.props; const { checkedAll, checkedIds, - showPages, sort } = this.state; const someLoading = loading || ignoreLoading || resolveToggleLoading || mergeLoading; const currentCheckedIds = this.currentCheckedIds(); - const displayedCount = Math.min(showPages * PER_PAGE, list.size); + const displayedCount = Math.min(currentPage * PER_PAGE, list.size); let _list = sort.sort ? list.sortBy(i => i[sort.sort]) : list; _list = sort.order === 'desc' ? _list.reverse() : _list; diff --git a/frontend/app/components/Session/LivePlayer.js b/frontend/app/components/Session/LivePlayer.js index cbc3410cd..878ddf10d 100644 --- a/frontend/app/components/Session/LivePlayer.js +++ b/frontend/app/components/Session/LivePlayer.js @@ -31,17 +31,19 @@ const InitLoader = connectPlayer(state => ({ }))(Loader); -function WebPlayer ({ showAssist, session, toggleFullscreen, closeBottomBlock, live, fullscreen, jwt, loadingCredentials, assistCredendials, request }) { +function WebPlayer ({ showAssist, session, toggleFullscreen, closeBottomBlock, live, fullscreen, jwt, loadingCredentials, assistCredendials, request, isEnterprise, hasSessionsPath }) { useEffect(() => { if (!loadingCredentials) { - initPlayer(session, jwt, assistCredendials); + initPlayer(session, jwt, assistCredendials, !hasSessionsPath && session.live); } return () => cleanPlayer() }, [ session.sessionId, loadingCredentials, assistCredendials ]); // LAYOUT (TODO: local layout state - useContext or something..) useEffect(() => { - request(); + if (isEnterprise) { + request(); + } return () => { toggleFullscreen(false); closeBottomBlock(); @@ -60,7 +62,7 @@ function WebPlayer ({ showAssist, session, toggleFullscreen, closeBottomBlock, l ); -} +}; export default withRequest({ initialData: null, @@ -69,11 +71,17 @@ export default withRequest({ dataName: 'assistCredendials', loadingName: 'loadingCredentials', })(withPermissions(['SESSION_REPLAY', 'ASSIST_LIVE'], '', true)(connect( - state => ({ - session: state.getIn([ 'sessions', 'current' ]), - showAssist: state.getIn([ 'sessions', 'showChatWindow' ]), - jwt: state.get('jwt'), - fullscreen: state.getIn([ 'components', 'player', 'fullscreen' ]), - }), + state => { + const isAssist = state.getIn(['sessions', 'activeTab']).type === 'live'; + const hasSessioPath = state.getIn([ 'sessions', 'sessionPath' ]).includes('/sessions'); + return { + session: state.getIn([ 'sessions', 'current' ]), + showAssist: state.getIn([ 'sessions', 'showChatWindow' ]), + jwt: state.get('jwt'), + fullscreen: state.getIn([ 'components', 'player', 'fullscreen' ]), + hasSessionsPath: hasSessioPath && !isAssist, + isEnterprise: state.getIn([ 'user', 'client', 'edition' ]) === 'ee', + } + }, { toggleFullscreen, closeBottomBlock }, -)(WebPlayer))); \ No newline at end of file +)(WebPlayer))); diff --git a/frontend/app/components/Session/Session.js b/frontend/app/components/Session/Session.js index 6b90ff4c1..0138e7e50 100644 --- a/frontend/app/components/Session/Session.js +++ b/frontend/app/components/Session/Session.js @@ -20,6 +20,7 @@ function Session({ session, fetchSession, fetchSlackList, + hasSessionsPath }) { usePageTitle("OpenReplay Session Player"); useEffect(() => { @@ -34,7 +35,7 @@ function Session({ return () => { if (!session.exists()) return; } - },[ sessionId ]); + },[ sessionId, hasSessionsPath ]); return ( { session.isIOS ? - : (session.live ? : ) + : (session.live && !hasSessionsPath ? : ) } @@ -59,11 +60,14 @@ function Session({ export default withPermissions(['SESSION_REPLAY'], '', true)(connect((state, props) => { const { match: { params: { sessionId } } } = props; + const isAssist = state.getIn(['sessions', 'activeTab']).type === 'live'; + const hasSessiosPath = state.getIn([ 'sessions', 'sessionPath' ]).includes('/sessions'); return { sessionId, loading: state.getIn([ 'sessions', 'loading' ]), hasErrors: !!state.getIn([ 'sessions', 'errors' ]), session: state.getIn([ 'sessions', 'current' ]), + hasSessionsPath: hasSessiosPath && !isAssist, }; }, { fetchSession, diff --git a/frontend/app/components/Session_/Fetch/Fetch.js b/frontend/app/components/Session_/Fetch/Fetch.js index 71e7a2fb9..8bc4f2117 100644 --- a/frontend/app/components/Session_/Fetch/Fetch.js +++ b/frontend/app/components/Session_/Fetch/Fetch.js @@ -1,56 +1,89 @@ -//import cn from 'classnames'; import { getRE } from 'App/utils'; import { Label, NoContent, Input, SlideModal, CloseButton } from 'UI'; -import { connectPlayer, pause } from 'Player'; -import Autoscroll from '../Autoscroll'; +import { connectPlayer, pause, jump } from 'Player'; +// import Autoscroll from '../Autoscroll'; import BottomBlock from '../BottomBlock'; import TimeTable from '../TimeTable'; import FetchDetails from './FetchDetails'; import { renderName, renderDuration } from '../Network'; +import { connect } from 'react-redux'; +import { setTimelinePointer } from 'Duck/sessions'; @connectPlayer(state => ({ list: state.fetchList, })) +@connect(state => ({ + timelinePointer: state.getIn(['sessions', 'timelinePointer']), +}), { setTimelinePointer }) export default class Fetch extends React.PureComponent { state = { filter: "", + filteredList: this.props.list, current: null, + currentIndex: 0, + showFetchDetails: false, + hasNextError: false, + hasPreviousError: false, } - onFilterChange = (e, { value }) => this.setState({ filter: value }) + + onFilterChange = (e, { value }) => { + const { list } = this.props; + const filterRE = getRE(value, 'i'); + const filtered = list + .filter((r) => filterRE.test(r.name) || filterRE.test(r.url) || filterRE.test(r.method) || filterRE.test(r.status)); + this.setState({ filter: value, filteredList: value ? filtered : list, currentIndex: 0 }); + } setCurrent = (item, index) => { pause() + jump(item.time) this.setState({ current: item, currentIndex: index }); } - closeModal = () => this.setState({ current: null}) + onRowClick = (item, index) => { + pause() + this.setState({ current: item, currentIndex: index, showFetchDetails: true }); + this.props.setTimelinePointer(null); + } + + closeModal = () => this.setState({ current: null, showFetchDetails: false }); nextClickHander = () => { - const { list } = this.props; - const { currentIndex } = this.state; + // const { list } = this.props; + const { currentIndex, filteredList } = this.state; - if (currentIndex === list.length - 1) return; + if (currentIndex === filteredList.length - 1) return; const newIndex = currentIndex + 1; - this.setCurrent(list[newIndex], newIndex); + this.setCurrent(filteredList[newIndex], newIndex); + this.setState({ showFetchDetails: true }); } prevClickHander = () => { - const { list } = this.props; - const { currentIndex } = this.state; + // const { list } = this.props; + const { currentIndex, filteredList } = this.state; if (currentIndex === 0) return; const newIndex = currentIndex - 1; - this.setCurrent(list[newIndex], newIndex); + this.setCurrent(filteredList[newIndex], newIndex); + this.setState({ showFetchDetails: true }); + } + + static getDerivedStateFromProps(nextProps, prevState) { + const { filteredList } = prevState; + if (nextProps.timelinePointer) { + let activeItem = filteredList.find((r) => r.time >= nextProps.timelinePointer.time); + activeItem = activeItem || filteredList[filteredList.length - 1]; + return { + current: activeItem, + currentIndex: filteredList.indexOf(activeItem), + }; + } } render() { - const { list } = this.props; - const { filter, current, currentIndex } = this.state; - const filterRE = getRE(filter, 'i'); - const filtered = list - .filter((r) => filterRE.test(r.name) || filterRE.test(r.url) || filterRE.test(r.method) || filterRE.test(r.status)); - + // const { list } = this.props; + const { current, currentIndex, showFetchDetails, filteredList } = this.state; return (
} - isDisplayed={ current != null } - content={ current && + isDisplayed={ current != null && showFetchDetails } + content={ current && showFetchDetails && } onClose={ this.closeModal } @@ -88,25 +121,31 @@ export default class Fetch extends React.PureComponent {

Fetch

- +
+ {/*
+
Prev
+
Next
+
*/} + +
{[ @@ -120,7 +159,7 @@ export default class Fetch extends React.PureComponent { width: 60, }, { label: "Name", - width: 130, + width: 180, render: renderName, }, { diff --git a/frontend/app/components/Session_/Network/Network.js b/frontend/app/components/Session_/Network/Network.js index 878266bda..150cadc59 100644 --- a/frontend/app/components/Session_/Network/Network.js +++ b/frontend/app/components/Session_/Network/Network.js @@ -3,14 +3,10 @@ import { connectPlayer, jump, pause } from 'Player'; import { QuestionMarkHint, Popup, Tabs, Input } from 'UI'; import { getRE } from 'App/utils'; import { TYPES } from 'Types/session/resource'; -import { formatBytes } from 'App/utils'; -import { formatMs } from 'App/date'; - -import TimeTable from '../TimeTable'; -import BottomBlock from '../BottomBlock'; -import InfoLine from '../BottomBlock/InfoLine'; import stl from './network.css'; import NetworkContent from './NetworkContent'; +import { connect } from 'react-redux'; +import { setTimelinePointer } from 'Duck/sessions'; const ALL = 'ALL'; const XHR = 'xhr'; @@ -28,73 +24,24 @@ const TAB_TO_TYPE_MAP = { [ MEDIA ]: TYPES.MEDIA, [ OTHER ]: TYPES.OTHER } -const TABS = [ ALL, XHR, JS, CSS, IMG, MEDIA, OTHER ].map(tab => ({ - text: tab, - key: tab, -})); - -const DOM_LOADED_TIME_COLOR = "teal"; -const LOAD_TIME_COLOR = "red"; export function renderName(r) { return ( - { r.name }
} - content={
{ r.url }
} - size="mini" - position="right center" - /> - ); -} - -const renderXHRText = () => ( - - {XHR} - - Use our Fetch plugin - {' to capture HTTP requests and responses, including status codes and bodies.'}
- We also provide support for GraphQL - {' for easy debugging of your queries.'} - - } - className="ml-1" - /> -
-); - -function renderSize(r) { - let triggerText; - let content; - if (r.decodedBodySize == null) { - triggerText = "x"; - content = "Not captured"; - } else { - const headerSize = r.headerSize || 0; - const encodedSize = r.encodedBodySize || 0; - const transferred = headerSize + encodedSize; - const showTransferred = r.headerSize != null; - - triggerText = formatBytes(r.decodedBodySize); - content = ( -
    - { showTransferred && -
  • {`${formatBytes( r.encodedBodySize + headerSize )} transfered over network`}
  • - } -
  • {`Resource size: ${formatBytes(r.decodedBodySize)} `}
  • -
- ); - } - - return ( - { triggerText } } - content={ content } - size="mini" - position="right center" - /> +
+ { r.name }
} + content={
{ r.url }
} + size="mini" + position="right center" + /> +
{ + e.stopPropagation(); + jump(r.time) + }} + >Jump
+ ); } @@ -130,14 +77,18 @@ export function renderDuration(r) { resources: state.resourceList, domContentLoadedTime: state.domContentLoadedTime, loadTime: state.loadTime, - time: state.time, + // time: state.time, playing: state.playing, domBuildingTime: state.domBuildingTime, fetchPresented: state.fetchList.length > 0, })) +@connect(state => ({ + timelinePointer: state.getIn(['sessions', 'timelinePointer']), +}), { setTimelinePointer }) export default class Network extends React.PureComponent { state = { filter: '', + filteredList: this.props.resources, activeTab: ALL, currentIndex: 0 } @@ -146,10 +97,29 @@ export default class Network extends React.PureComponent { pause(); jump(e.time); this.setState({ currentIndex: index }) + this.props.setTimelinePointer(null); } onTabClick = activeTab => this.setState({ activeTab }) - onFilterChange = (e, { value }) => this.setState({ filter: value }) + + onFilterChange = (e, { value }) => { + const { resources } = this.props; + const filterRE = getRE(value, 'i'); + const filtered = resources.filter(({ type, name }) => + filterRE.test(name) && (activeTab === ALL || type === TAB_TO_TYPE_MAP[ activeTab ])); + + this.setState({ filter: value, filteredList: value ? filtered : resources, currentIndex: 0 }); + } + + static getDerivedStateFromProps(nextProps, prevState) { + const { filteredList } = prevState; + if (nextProps.timelinePointer) { + const activeItem = filteredList.find((r) => r.time >= nextProps.timelinePointer.time); + return { + currentIndex: activeItem ? filteredList.indexOf(activeItem) : filteredList.length - 1, + }; + } + } render() { const { @@ -159,50 +129,23 @@ export default class Network extends React.PureComponent { loadTime, domBuildingTime, fetchPresented, - time, + // time, playing } = this.props; - const { filter, activeTab, currentIndex } = this.state; - const filterRE = getRE(filter, 'i'); - let filtered = resources.filter(({ type, name }) => - filterRE.test(name) && (activeTab === ALL || type === TAB_TO_TYPE_MAP[ activeTab ])); - -// const referenceLines = []; -// if (domContentLoadedTime != null) { -// referenceLines.push({ -// time: domContentLoadedTime, -// color: DOM_LOADED_TIME_COLOR, -// }) -// } -// if (loadTime != null) { -// referenceLines.push({ -// time: loadTime, -// color: LOAD_TIME_COLOR, -// }) -// } -// -// let tabs = TABS; -// if (!fetchPresented) { -// tabs = TABS.map(tab => tab.key === XHR -// ? { -// text: renderXHRText(), -// key: XHR, -// } -// : tab -// ); -// } - - const resourcesSize = filtered.reduce((sum, { decodedBodySize }) => sum + (decodedBodySize || 0), 0); - const transferredSize = filtered + const { filter, activeTab, currentIndex, filteredList } = this.state; + // const filterRE = getRE(filter, 'i'); + // let filtered = resources.filter(({ type, name }) => + // filterRE.test(name) && (activeTab === ALL || type === TAB_TO_TYPE_MAP[ activeTab ])); + const resourcesSize = filteredList.reduce((sum, { decodedBodySize }) => sum + (decodedBodySize || 0), 0); + const transferredSize = filteredList .reduce((sum, { headerSize, encodedBodySize }) => sum + (headerSize || 0) + (encodedBodySize || 0), 0); return ( - {/* - - - - - - - - 0 } - /> - 0 } - /> - - - - - - {[ - { - label: "Status", - dataKey: 'status', - width: 70, - }, { - label: "Type", - dataKey: 'type', - width: 60, - }, { - label: "Name", - width: 130, - render: renderName, - }, - { - label: "Size", - width: 60, - render: renderSize, - }, - { - label: "Time", - width: 80, - render: renderDuration, - } - ]} - - - */} ); } diff --git a/frontend/app/components/Session_/Network/network.css b/frontend/app/components/Session_/Network/network.css index eb37e49c9..e299211da 100644 --- a/frontend/app/components/Session_/Network/network.css +++ b/frontend/app/components/Session_/Network/network.css @@ -22,7 +22,7 @@ white-space: nowrap; text-overflow: ellipsis; overflow: hidden; - max-width: 100%; + max-width: 80%; width: fit-content; } .popupNameContent { diff --git a/frontend/app/components/Session_/Player/Controls/Timeline.js b/frontend/app/components/Session_/Player/Controls/Timeline.js index c25101d69..27c1de1ab 100644 --- a/frontend/app/components/Session_/Player/Controls/Timeline.js +++ b/frontend/app/components/Session_/Player/Controls/Timeline.js @@ -7,6 +7,7 @@ import TimeTracker from './TimeTracker'; import { ReduxTime } from './Time'; import stl from './timeline.css'; import { TYPES } from 'Types/session/event'; +import { setTimelinePointer } from 'Duck/sessions'; const getPointerIcon = (type) => { // exception, @@ -69,7 +70,7 @@ const getPointerIcon = (type) => { state.getIn([ 'sessions', 'current', 'clickRageTime' ]), returningLocationTime: state.getIn([ 'sessions', 'current', 'returningLocation' ]) && state.getIn([ 'sessions', 'current', 'returningLocationTime' ]), -})) +}), { setTimelinePointer }) export default class Timeline extends React.PureComponent { seekProgress = (e) => { const { endTime } = this.props; @@ -78,9 +79,10 @@ export default class Timeline extends React.PureComponent { this.props.jump(time); } - createEventClickHandler = time => (e) => { + createEventClickHandler = pointer => (e) => { e.stopPropagation(); - this.props.jump(time) + this.props.jump(pointer.time); + this.props.setTimelinePointer(pointer); } componentDidMount() { @@ -144,7 +146,7 @@ export default class Timeline extends React.PureComponent { //width: `${ 2000 * scale }%` } } className={ stl.clickRage } - onClick={ this.createEventClickHandler(iss.time) } + onClick={ this.createEventClickHandler(iss) } > ({ - session: state.getIn([ 'sessions', 'current' ]), - loading: state.getIn([ 'sessions', 'toggleFavoriteRequest', 'loading' ]), - disabled: state.getIn([ 'components', 'targetDefiner', 'inspectorMode' ]) || props.loading, - jiraConfig: state.getIn([ 'issues', 'list' ]).first(), - issuesFetched: state.getIn([ 'issues', 'issuesFetched' ]), - local: state.getIn(['sessions', 'timezone']), - funnelRef: state.getIn(['funnels', 'navRef']), - siteId: state.getIn([ 'user', 'siteId' ]), - funnelPage: state.getIn(['sessions', 'funnelPage']), -}), { - toggleFavorite, fetchListIntegration +@connect((state, props) => { + const isAssist = state.getIn(['sessions', 'activeTab']).type === 'live'; + const hasSessioPath = state.getIn([ 'sessions', 'sessionPath' ]).includes('/sessions'); + return { + session: state.getIn([ 'sessions', 'current' ]), + sessionPath: state.getIn([ 'sessions', 'sessionPath' ]), + loading: state.getIn([ 'sessions', 'toggleFavoriteRequest', 'loading' ]), + disabled: state.getIn([ 'components', 'targetDefiner', 'inspectorMode' ]) || props.loading, + jiraConfig: state.getIn([ 'issues', 'list' ]).first(), + issuesFetched: state.getIn([ 'issues', 'issuesFetched' ]), + local: state.getIn(['sessions', 'timezone']), + funnelRef: state.getIn(['funnels', 'navRef']), + siteId: state.getIn([ 'user', 'siteId' ]), + funnelPage: state.getIn(['sessions', 'funnelPage']), + hasSessionsPath: hasSessioPath && !isAssist, + } +}, { + toggleFavorite, fetchListIntegration, setSessionPath }) @withRouter export default class PlayerBlockHeader extends React.PureComponent { @@ -54,16 +61,22 @@ export default class PlayerBlockHeader extends React.PureComponent { ); backHandler = () => { - const { history, siteId, funnelPage } = this.props; - const funnelId = funnelPage && funnelPage.get('funnelId'); - const issueId = funnelPage && funnelPage.get('issueId'); - if (funnelId || issueId) { - if (issueId) { - history.push(withSiteId(funnelIssueRoute(funnelId, issueId), siteId)) - } else - history.push(withSiteId(funnelRoute(funnelId), siteId)); - } else + const { history, siteId, funnelPage, sessionPath } = this.props; + // alert(sessionPath) + if (sessionPath === history.location.pathname) { history.push(withSiteId(SESSIONS_ROUTE), siteId); + } else { + history.push(sessionPath ? sessionPath : withSiteId(SESSIONS_ROUTE, siteId)); + } + // const funnelId = funnelPage && funnelPage.get('funnelId'); + // const issueId = funnelPage && funnelPage.get('issueId'); + // if (funnelId || issueId) { + // if (issueId) { + // history.push(withSiteId(funnelIssueRoute(funnelId, issueId), siteId)) + // } else + // history.push(withSiteId(funnelRoute(funnelId), siteId)); + // } else + // history.push(withSiteId(SESSIONS_ROUTE), siteId); } toggleFavorite = () => { @@ -86,21 +99,24 @@ export default class PlayerBlockHeader extends React.PureComponent { userDevice, userBrowserVersion, userDeviceType, + live, }, loading, - live, + // live, disabled, jiraConfig, fullscreen, + hasSessionsPath } = this.props; - const { history, siteId } = this.props; + // const { history, siteId } = this.props; + const _live = live && !hasSessionsPath; return ( -
+
-
+
@@ -115,11 +131,17 @@ export default class PlayerBlockHeader extends React.PureComponent {
- { live && } - { !live && ( + { live && hasSessionsPath && ( +
this.props.setSessionPath('')}> + This Session is Now Continuing Live +
+ )} + { _live && } + { _live && } + { !_live && ( <> -
+
)} - { !live && jiraConfig && jiraConfig.token && } + { !_live && jiraConfig && jiraConfig.token && }
diff --git a/frontend/app/components/Session_/TimeTable/TimeTable.js b/frontend/app/components/Session_/TimeTable/TimeTable.js index 9753e3d44..e316dae56 100644 --- a/frontend/app/components/Session_/TimeTable/TimeTable.js +++ b/frontend/app/components/Session_/TimeTable/TimeTable.js @@ -135,7 +135,7 @@ export default class TimeTable extends React.PureComponent { ...computeTimeLine(this.props.rows, this.state.firstVisibleRowIndex, this.visibleCount), }); } - if (this.props.activeIndex && prevProps.activeIndex !== this.props.activeIndex && this.scroller.current != null) { + if (this.props.activeIndex >= 0 && prevProps.activeIndex !== this.props.activeIndex && this.scroller.current != null) { this.scroller.current.scrollToRow(this.props.activeIndex); } } @@ -168,7 +168,7 @@ export default class TimeTable extends React.PureComponent {
onRowClick(row, index) : null } id="table-row" > @@ -223,7 +223,7 @@ export default class TimeTable extends React.PureComponent { navigation=false, referenceLines = [], additionalHeight = 0, - activeIndex + activeIndex, } = this.props; const { timewidth, @@ -247,7 +247,7 @@ export default class TimeTable extends React.PureComponent { return (
{ navigation && -
+
(
{label}
) @connect(state => ({ - timezone: state.getIn(['sessions', 'timezone']) -}), { toggleFavorite }) + timezone: state.getIn(['sessions', 'timezone']), + isAssist: state.getIn(['sessions', 'activeTab']).type === 'live', + siteId: state.getIn([ 'user', 'siteId' ]), +}), { toggleFavorite, setSessionPath }) +@withRouter export default class SessionItem extends React.PureComponent { + + replaySession = () => { + const { history, session: { sessionId }, siteId, isAssist } = this.props; + if (!isAssist) { + this.props.setSessionPath(history.location.pathname) + } + history.push(withSiteId(sessionRoute(sessionId), siteId)) + } // eslint-disable-next-line complexity render() { const { @@ -110,9 +122,9 @@ export default class SessionItem extends React.PureComponent {
- +
- +
diff --git a/frontend/app/components/shared/SessionItem/sessionItem.css b/frontend/app/components/shared/SessionItem/sessionItem.css index 8f9824bec..cbf7bb2d1 100644 --- a/frontend/app/components/shared/SessionItem/sessionItem.css +++ b/frontend/app/components/shared/SessionItem/sessionItem.css @@ -92,7 +92,7 @@ display: flex; align-items: center; transition: all 0.2s; - /* opacity: 0; */ + cursor: pointer; &[data-viewed=true] { opacity: 1; } diff --git a/frontend/app/components/ui/IconButton/iconButton.css b/frontend/app/components/ui/IconButton/iconButton.css index 41ed89e45..3aaff8347 100644 --- a/frontend/app/components/ui/IconButton/iconButton.css +++ b/frontend/app/components/ui/IconButton/iconButton.css @@ -7,6 +7,7 @@ border-radius: 3px; display: flex; align-items: center; + justify-content: center; cursor: pointer; height: 36px; font-size: 14px; diff --git a/frontend/app/duck/errors.js b/frontend/app/duck/errors.js index 3f0793103..9e7b552f2 100644 --- a/frontend/app/duck/errors.js +++ b/frontend/app/duck/errors.js @@ -17,6 +17,7 @@ const IGNORE = "errors/IGNORE"; const MERGE = "errors/MERGE"; const TOGGLE_FAVORITE = "errors/TOGGLE_FAVORITE"; const FETCH_TRACE = "errors/FETCH_TRACE"; +const UPDATE_CURRENT_PAGE = "errors/UPDATE_CURRENT_PAGE"; function chartWrapper(chart = []) { return chart.map(point => ({ ...point, count: Math.max(point.count, 0) })); @@ -33,7 +34,8 @@ const initialState = Map({ instance: ErrorInfo(), instanceTrace: List(), stats: Map(), - sourcemapUploaded: true + sourcemapUploaded: true, + currentPage: 1, }); @@ -67,7 +69,8 @@ function reducer(state = initialState, action = {}) { return state.update("list", list => list.filter(e => !ids.includes(e.errorId))); case success(FETCH_NEW_ERRORS_COUNT): return state.set('stats', action.data); - + case UPDATE_CURRENT_PAGE: + return state.set('currentPage', action.page); } return state; } @@ -166,3 +169,9 @@ export function fetchNewErrorsCount(params = {}) { } } +export function updateCurrentPage(page) { + return { + type: 'errors/UPDATE_CURRENT_PAGE', + page, + }; +} diff --git a/frontend/app/duck/sessions.js b/frontend/app/duck/sessions.js index e82602799..977af85bb 100644 --- a/frontend/app/duck/sessions.js +++ b/frontend/app/duck/sessions.js @@ -8,7 +8,6 @@ import { getRE } from 'App/utils'; import { LAST_7_DAYS } from 'Types/app/period'; import { getDateRangeFromValue } from 'App/dateRange'; - const INIT = 'sessions/INIT'; const FETCH_LIST = new RequestTypes('sessions/FETCH_LIST'); @@ -25,6 +24,8 @@ const SET_EVENT_QUERY = 'sessions/SET_EVENT_QUERY'; const SET_AUTOPLAY_VALUES = 'sessions/SET_AUTOPLAY_VALUES'; const TOGGLE_CHAT_WINDOW = 'sessions/TOGGLE_CHAT_WINDOW'; const SET_FUNNEL_PAGE_FLAG = 'sessions/SET_FUNNEL_PAGE_FLAG'; +const SET_TIMELINE_POINTER = 'sessions/SET_TIMELINE_POINTER'; +const SET_SESSION_PATH = 'sessions/SET_SESSION_PATH'; const SET_ACTIVE_TAB = 'sessions/SET_ACTIVE_TAB'; @@ -57,6 +58,8 @@ const initialState = Map({ insightFilters: defaultDateFilters, host: '', funnelPage: Map(), + timelinePointer: null, + sessionPath: '', }); const reducer = (state = initialState, action = {}) => { @@ -242,13 +245,18 @@ const reducer = (state = initialState, action = {}) => { return state.set('insights', List(action.data).sort((a, b) => b.count - a.count)); case SET_FUNNEL_PAGE_FLAG: return state.set('funnelPage', action.funnelPage ? Map(action.funnelPage) : false); + case SET_TIMELINE_POINTER: + return state.set('timelinePointer', action.pointer); + case SET_SESSION_PATH: + return state.set('sessionPath', action.path); default: return state; } }; export default withRequestState({ - _: [ FETCH, FETCH_LIST, FETCH_LIVE_LIST ], + _: [ FETCH, FETCH_LIST ], + fetchLiveListRequest: FETCH_LIVE_LIST, fetchFavoriteListRequest: FETCH_FAVORITE_LIST, toggleFavoriteRequest: TOGGLE_FAVORITE, fetchErrorStackList: FETCH_ERROR_STACK, @@ -262,10 +270,10 @@ function init(session) { } } -export const fetchList = (params = {}, clear = false) => (dispatch, getState) => { +export const fetchList = (params = {}, clear = false, live = false) => (dispatch, getState) => { const activeTab = getState().getIn([ 'sessions', 'activeTab' ]); - return dispatch(activeTab && activeTab.type === 'live' ? { + return dispatch((activeTab && activeTab.type === 'live' || live )? { types: FETCH_LIVE_LIST.toArray(), call: client => client.post('/assist/sessions', params), } : { @@ -376,3 +384,16 @@ export function setFunnelPage(funnelPage) { } } +export function setTimelinePointer(pointer) { + return { + type: SET_TIMELINE_POINTER, + pointer + } +} + +export function setSessionPath(path) { + return { + type: SET_SESSION_PATH, + path + } +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/MessageDistributor.ts b/frontend/app/player/MessageDistributor/MessageDistributor.ts index c742c10b5..69e4b4836 100644 --- a/frontend/app/player/MessageDistributor/MessageDistributor.ts +++ b/frontend/app/player/MessageDistributor/MessageDistributor.ts @@ -25,12 +25,11 @@ import WindowNodeCounter from './managers/WindowNodeCounter'; import ActivityManager from './managers/ActivityManager'; import AssistManager from './managers/AssistManager'; -import MessageReader from './MessageReader'; +import MFileReader from './messages/MFileReader'; import { INITIAL_STATE as SUPER_INITIAL_STATE, State as SuperState } from './StatedScreen/StatedScreen'; import { INITIAL_STATE as ASSIST_INITIAL_STATE, State as AssistState } from './managers/AssistManager'; -import type { TimedMessage } from './Timed'; import type { PerformanceChartPoint } from './managers/PerformanceTrackManager'; import type { SkipInterval } from './managers/ActivityManager'; @@ -82,32 +81,24 @@ import type { SetViewportScroll, } from './messages'; -interface Timed { //TODO: to common space - time: number; -} - -type ReduxDecoded = Timed & { - action: {}, - state: {}, - duration: number, -} +import type { Timed } from './messages/timed'; export default class MessageDistributor extends StatedScreen { // TODO: consistent with the other data-lists private readonly locationEventManager: ListWalker/**/ = new ListWalker(); - private readonly locationManager: ListWalker = new ListWalker(); - private readonly loadedLocationManager: ListWalker = new ListWalker(); - private readonly connectionInfoManger: ListWalker = new ListWalker(); + private readonly locationManager: ListWalker = new ListWalker(); + private readonly loadedLocationManager: ListWalker = new ListWalker(); + private readonly connectionInfoManger: ListWalker = new ListWalker(); private readonly performanceTrackManager: PerformanceTrackManager = new PerformanceTrackManager(); private readonly windowNodeCounter: WindowNodeCounter = new WindowNodeCounter(); private readonly clickManager: ListWalker = new ListWalker(); - private readonly resizeManager: ListWalker = new ListWalker([]); + private readonly resizeManager: ListWalker = new ListWalker([]); private readonly pagesManager: PagesManager; private readonly mouseManager: MouseManager; private readonly assistManager: AssistManager; - private readonly scrollManager: ListWalker = new ListWalker(); + private readonly scrollManager: ListWalker = new ListWalker(); private readonly decoder = new Decoder(); private readonly lists = initLists(); @@ -118,7 +109,7 @@ export default class MessageDistributor extends StatedScreen { private navigationStartOffset: number = 0; private lastMessageTime: number = 0; - constructor(private readonly session: any /*Session*/, jwt: string, config) { + constructor(private readonly session: any /*Session*/, jwt: string, config, live: boolean) { super(); this.pagesManager = new PagesManager(this, this.session.isMobile) this.mouseManager = new MouseManager(this); @@ -126,7 +117,7 @@ export default class MessageDistributor extends StatedScreen { this.sessionStart = this.session.startedAt; - if (this.session.live) { + if (live) { // const sockUrl = `wss://live.openreplay.com/1/${ this.session.siteId }/${ this.session.sessionId }/${ jwt }`; // this.subscribeOnMessages(sockUrl); initListsDepr({}) @@ -184,7 +175,7 @@ export default class MessageDistributor extends StatedScreen { window.fetch(fileUrl) .then(r => r.arrayBuffer()) .then(b => { - const r = new MessageReader(new Uint8Array(b), this.sessionStart); + const r = new MFileReader(new Uint8Array(b), this.sessionStart); const msgs: Array = []; while (r.hasNext()) { @@ -334,7 +325,7 @@ export default class MessageDistributor extends StatedScreen { } /* Binded */ - distributeMessage = (msg: TimedMessage, index: number): void => { + distributeMessage = (msg: Message, index: number): void => { if ([ "mouse_move", "mouse_click", diff --git a/frontend/app/player/MessageDistributor/MessageReader.ts b/frontend/app/player/MessageDistributor/MessageReader.ts deleted file mode 100644 index dea8759c9..000000000 --- a/frontend/app/player/MessageDistributor/MessageReader.ts +++ /dev/null @@ -1,80 +0,0 @@ -import type { TimedMessage, Indexed } from './Timed'; - -import logger from 'App/logger'; -import readMessage, { Message } from './messages'; -import PrimitiveReader from './PrimitiveReader'; - -// function needSkipMessage(data: Uint8Array, p: number, pLast: number): boolean { -// for (let i = 7; i >= 0; i--) { -// if (data[ p + i ] !== data[ pLast + i ]) { -// return data[ p + i ] - data[ pLast + i ] < 0 -// } -// } -// return true -// } - -export default class MessageReader extends PrimitiveReader { - private pLastMessageID: number = 0; - private currentTime: number = 0; - public error: boolean = false; - constructor(data: Uint8Array, private readonly startTime: number) { - super(data); - } - - private needSkipMessage(): boolean { - if (this.p === 0) return false; - for (let i = 7; i >= 0; i--) { - if (this.buf[ this.p + i ] !== this.buf[ this.pLastMessageID + i ]) { - return this.buf[ this.p + i ] - this.buf[ this.pLastMessageID + i ] < 0; - } - } - return true; - } - - private readMessage(): Message | null { - this.skip(8); - try { - let msg - msg = readMessage(this); - return msg; - } catch (e) { - this.error = true; - logger.error("Read message error:", e); - return null; - } - } - - hasNext():boolean { - return !this.error && this.buf.length > this.p; - } - - next(): [ TimedMessage, number] | null { - if (!this.hasNext()) { - return null; - } - - while (this.needSkipMessage()) { - this.readMessage(); - } - this.pLastMessageID = this.p; - - const msg = this.readMessage(); - if (!msg) { - return null; - } - - if (msg.tp === "timestamp") { - // if (this.startTime == null) { - // this.startTime = msg.timestamp - // } - this.currentTime = msg.timestamp - this.startTime; - } else { - const tMsg = Object.assign(msg, { - time: this.currentTime, - _index: this.pLastMessageID, - }) - return [tMsg, this.pLastMessageID]; - } - return null; - } -} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/PrimitiveReader.ts b/frontend/app/player/MessageDistributor/PrimitiveReader.ts deleted file mode 100644 index b49955074..000000000 --- a/frontend/app/player/MessageDistributor/PrimitiveReader.ts +++ /dev/null @@ -1,40 +0,0 @@ -export default class PrimitiveReader { - protected p = 0 - constructor(protected readonly buf: Uint8Array) {} - - hasNext() { - return this.p < this.buf.length - } - - readUint() { - var r = 0, s = 1, b; - do { - b = this.buf[this.p++]; - r += (b & 0x7F) * s; - s *= 128; - } while (b >= 0x80) - return r; - } - - readInt() { - let u = this.readUint(); - if (u % 2) { - u = (u + 1) / -2; - } else { - u = u / 2; - } - return u; - } - - readString() { - var l = this.readUint(); - return new TextDecoder().decode(this.buf.subarray(this.p, this.p+=l)); - } - - readBoolean() { - return !!this.buf[this.p++]; - } - skip(n: number) { - this.p += n; - } -} diff --git a/frontend/app/player/MessageDistributor/Timed.ts b/frontend/app/player/MessageDistributor/Timed.ts deleted file mode 100644 index e0a1d6a82..000000000 --- a/frontend/app/player/MessageDistributor/Timed.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { Message } from './messages'; - -export interface Timed { readonly time: number }; -export interface Indexed { readonly _index: number }; // TODO: remove dash (evwrywhere) -export type TimedMessage = Timed & Message; diff --git a/frontend/app/player/MessageDistributor/managers/AssistManager.ts b/frontend/app/player/MessageDistributor/managers/AssistManager.ts index 2ccea1ad5..2b0ac4e63 100644 --- a/frontend/app/player/MessageDistributor/managers/AssistManager.ts +++ b/frontend/app/player/MessageDistributor/managers/AssistManager.ts @@ -1,14 +1,14 @@ import type Peer from 'peerjs'; import type { DataConnection, MediaConnection } from 'peerjs'; import type MessageDistributor from '../MessageDistributor'; -import type { TimedMessage } from '../Timed'; import type { Message } from '../messages' -import { ID_TP_MAP } from '../messages'; import store from 'App/store'; import type { LocalStream } from './LocalStream'; import { update, getState } from '../../store'; import { iceServerConfigFromString } from 'App/utils' +import MStreamReader from '../messages/MStreamReader';; +import JSONRawMessageReader from '../messages/JSONRawMessageReader' export enum CallingState { Reconnecting, @@ -59,68 +59,15 @@ export const INITIAL_STATE: State = { const MAX_RECONNECTION_COUNT = 4; -function resolveURL(baseURL: string, relURL: string): string { - if (relURL.startsWith('#') || relURL === "") { - return relURL; - } - return new URL(relURL, baseURL).toString(); -} - - -var match = /bar/.exec("foobar"); -const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g -const re2 = /@import "(.*?)"/g -function cssUrlsIndex(css: string): Array<[number, number]> { - const idxs: Array<[number, number]> = []; - const i1 = css.matchAll(re1); - // @ts-ignore - for (let m of i1) { - // @ts-ignore - const s: number = m.index + m[0].indexOf(m[1]); - const e: number = s + m[1].length; - idxs.push([s, e]); - } - const i2 = css.matchAll(re2); - // @ts-ignore - for (let m of i2) { - // @ts-ignore - const s = m.index + m[0].indexOf(m[1]); - const e = s + m[1].length; - idxs.push([s, e]) - } - return idxs; -} -function unquote(str: string): [string, string] { - str = str.trim(); - if (str.length <= 2) { - return [str, ""] - } - if (str[0] == '"' && str[str.length-1] == '"') { - return [ str.substring(1, str.length-1), "\""]; - } - if (str[0] == '\'' && str[str.length-1] == '\'') { - return [ str.substring(1, str.length-1), "'" ]; - } - return [str, ""] -} -function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { - for (let idx of cssUrlsIndex(css)) { - const f = idx[0] - const t = idx[1] - const [ rawurl, q ] = unquote(css.substring(f, t)); - css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); - } - return css -} - -function resolveCSS(baseURL: string, css: string): string { - return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); -} - export default class AssistManager { constructor(private session, private md: MessageDistributor, private config) {} private setStatus(status: ConnectionStatus) { + if (getState().peerConnectionStatus === ConnectionStatus.Disconnected && + status !== ConnectionStatus.Connected) { + return + } + if (status === ConnectionStatus.Connecting) { this.md.setMessagesLoading(true); } else { @@ -147,12 +94,14 @@ export default class AssistManager { return; } this.setStatus(ConnectionStatus.Connecting) + // @ts-ignore + const urlObject = new URL(window.ENV.API_EDP) import('peerjs').then(({ default: Peer }) => { + if (this.closed) {return} const _config = { - // @ts-ignore - host: new URL(window.ENV.API_EDP).host, + host: urlObject.hostname, path: '/assist', - port: location.protocol === 'https:' ? 443 : 80, + port: urlObject.port === "" ? (location.protocol === 'https:' ? 443 : 80 ): parseInt(urlObject.port), } if (this.config) { @@ -170,12 +119,11 @@ export default class AssistManager { console.warn("AssistManager PeerJS peer error: ", e.type, e) } if (['peer-unavailable', 'network', 'webrtc'].includes(e.type)) { - if (this.peer && this.connectionAttempts++ < MAX_RECONNECTION_COUNT) { - this.setStatus(ConnectionStatus.Connecting); + if (this.peer) { + this.setStatus(this.connectionAttempts++ < MAX_RECONNECTION_COUNT + ? ConnectionStatus.Connecting + : ConnectionStatus.Disconnected); this.connectToPeer(); - } else { - this.setStatus(ConnectionStatus.Disconnected); - this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); } } else { console.error(`PeerJS error (on peer). Type ${e.type}`, e); @@ -190,12 +138,11 @@ export default class AssistManager { }); } - private dataCheckIntervalID: ReturnType | undefined; private connectToPeer() { if (!this.peer) { return; } this.setStatus(ConnectionStatus.Connecting); const id = this.peerID; - const conn = this.peer.connect(id, { serialization: 'json', reliable: true}); + const conn = this.peer.connect(id, { serialization: "json", reliable: true}); conn.on('open', () => { window.addEventListener("beforeunload", ()=>conn.open &&conn.send("unload")); @@ -206,75 +153,42 @@ export default class AssistManager { this._call() } - let i = 0; let firstMessage = true; this.setStatus(ConnectionStatus.WaitingMessages) + const jmr = new JSONRawMessageReader() + const reader = new MStreamReader(jmr) + conn.on('data', (data) => { - if (!Array.isArray(data)) { return this.handleCommand(data); } this.disconnectTimeout && clearTimeout(this.disconnectTimeout); + + + if (Array.isArray(data)) { + jmr.append(data) // as RawMessage[] + } else if (data instanceof ArrayBuffer) { + //rawMessageReader.append(new Uint8Array(data)) + } else { return this.handleCommand(data); } + if (firstMessage) { firstMessage = false; this.setStatus(ConnectionStatus.Connected) } - let time = 0; - let ts0 = 0; - (data as Array).forEach(msg => { - - // TODO: more appropriate way to do it. - if (msg._id === 60) { - // @ts-ignore - if (msg.name === 'src' || msg.name === 'href') { - // @ts-ignore - msg.value = resolveURL(msg.baseURL, msg.value); - // @ts-ignore - } else if (msg.name === 'style') { - // @ts-ignore - msg.value = resolveCSS(msg.baseURL, msg.value); - } - msg._id = 12; - } else if (msg._id === 61) { // "SetCSSDataURLBased" - // @ts-ignore - msg.data = resolveCSS(msg.baseURL, msg.data); - msg._id = 15; - } else if (msg._id === 67) { // "insert_rule" - // @ts-ignore - msg.rule = resolveCSS(msg.baseURL, msg.rule); - msg._id = 37; - } - - - msg.tp = ID_TP_MAP[msg._id]; // _id goes from tracker - - if (msg.tp === "timestamp") { - ts0 = ts0 || msg.timestamp - time = msg.timestamp - ts0; - return; - } - const tMsg: TimedMessage = Object.assign(msg, { - time, - _index: i, - }); - this.md.distributeMessage(tMsg, i++); - }); + for (let msg = reader.readNext();msg !== null;msg = reader.readNext()) { + //@ts-ignore + this.md.distributeMessage(msg, msg._index); + } }); }); const onDataClose = () => { this.onCallDisconnect() - //console.log('closed peer conn. Reconnecting...') this.connectToPeer(); } - // this.dataCheckIntervalID = setInterval(() => { - // if (!this.dataConnection && getState().peerConnectionStatus === ConnectionStatus.Connected) { - // onDataClose(); - // } - // }, 3000); - conn.on('close', onDataClose);// Does it work ? + conn.on('close', onDataClose);// What case does it work ? conn.on("error", (e) => { this.setStatus(ConnectionStatus.Error); }) @@ -284,11 +198,9 @@ export default class AssistManager { private get dataConnection(): DataConnection | undefined { return this.peer?.connections[this.peerID]?.find(c => c.type === 'data' && c.open); } - private get callConnection(): MediaConnection | undefined { return this.peer?.connections[this.peerID]?.find(c => c.type === 'media' && c.open); } - private send(data: any) { this.dataConnection?.send(data); } @@ -326,18 +238,20 @@ export default class AssistManager { private disconnectTimeout: ReturnType | undefined; + private closeDataConnectionTimeout: ReturnType | undefined; private handleCommand(command: string) { console.log("Data command", command) switch (command) { case "unload": //this.onTrackerCallEnd(); - this.onCallDisconnect() - this.dataConnection?.close(); + this.closeDataConnectionTimeout = setTimeout(() => { + this.onCallDisconnect() + this.dataConnection?.close(); + }, 1500); this.disconnectTimeout = setTimeout(() => { this.onTrackerCallEnd(); this.setStatus(ConnectionStatus.Disconnected); }, 15000); // TODO: more convenient way - //this.dataConnection?.close(); return; case "call_end": this.onTrackerCallEnd(); @@ -349,29 +263,17 @@ export default class AssistManager { } } - // private mmtid?:ReturnType private onMouseMove = (e: MouseEvent): void => { - // this.mmtid && clearTimeout(this.mmtid) - // this.mmtid = setTimeout(() => { const data = this.md.getInternalCoordinates(e); this.send({ x: Math.round(data.x), y: Math.round(data.y) }); - // }, 5) } - // private wtid?: ReturnType - // private scrollDelta: [number, number] = [0,0] private onWheel = (e: WheelEvent): void => { e.preventDefault() - //throttling makes movements less smooth - // this.wtid && clearTimeout(this.wtid) - // this.scrollDelta[0] += e.deltaX - // this.scrollDelta[1] += e.deltaY - // this.wtid = setTimeout(() => { - this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]})//this.scrollDelta }); - this.onMouseMove(e) - // this.scrollDelta = [0,0] - // }, 20) + //throttling makes movements less smooth, so it is omitted + //this.onMouseMove(e) + this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]}) } private onMouseClick = (e: MouseEvent): void => { @@ -459,7 +361,7 @@ export default class AssistManager { }); this.md.overlay.addEventListener("mousemove", this.onMouseMove) - // this.md.overlay.addEventListener("click", this.onMouseClick) + this.md.overlay.addEventListener("click", this.onMouseClick) }); //call.peerConnection.addEventListener("track", e => console.log('newtrack',e.track)) @@ -473,13 +375,15 @@ export default class AssistManager { window.addEventListener("beforeunload", this.initiateCallEnd) } + closed = false clear() { + this.closed =true this.initiateCallEnd(); - this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); if (this.peer) { - //console.log("destroying peer...") + console.log("destroying peer...") const peer = this.peer; // otherwise it calls reconnection on data chan close this.peer = null; + peer.disconnect(); peer.destroy(); } } diff --git a/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts b/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts new file mode 100644 index 000000000..b901dc076 --- /dev/null +++ b/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts @@ -0,0 +1,486 @@ +// import type Peer from 'peerjs'; +// import type { DataConnection, MediaConnection } from 'peerjs'; +// import type MessageDistributor from '../MessageDistributor'; +// import type { Message } from '../messages' +// import store from 'App/store'; +// import type { LocalStream } from './LocalStream'; +// import { update, getState } from '../../store'; +// import { iceServerConfigFromString } from 'App/utils' + + +// export enum CallingState { +// Reconnecting, +// Requesting, +// True, +// False, +// }; + +// export enum ConnectionStatus { +// Connecting, +// WaitingMessages, +// Connected, +// Inactive, +// Disconnected, +// Error, +// }; + + +// export function getStatusText(status: ConnectionStatus): string { +// switch(status) { +// case ConnectionStatus.Connecting: +// return "Connecting..."; +// case ConnectionStatus.Connected: +// return ""; +// case ConnectionStatus.Inactive: +// return "Client tab is inactive"; +// case ConnectionStatus.Disconnected: +// return "Disconnected"; +// case ConnectionStatus.Error: +// return "Something went wrong. Try to reload the page."; +// case ConnectionStatus.WaitingMessages: +// return "Connected. Waiting for the data... (The tab might be inactive)" +// } +// } + +// export interface State { +// calling: CallingState, +// peerConnectionStatus: ConnectionStatus, +// remoteControl: boolean, +// } + +// export const INITIAL_STATE: State = { +// calling: CallingState.False, +// peerConnectionStatus: ConnectionStatus.Connecting, +// remoteControl: false, +// } + +// const MAX_RECONNECTION_COUNT = 4; + + +// function resolveURL(baseURL: string, relURL: string): string { +// if (relURL.startsWith('#') || relURL === "") { +// return relURL; +// } +// return new URL(relURL, baseURL).toString(); +// } + + +// var match = /bar/.exec("foobar"); +// const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g +// const re2 = /@import "(.*?)"/g +// function cssUrlsIndex(css: string): Array<[number, number]> { +// const idxs: Array<[number, number]> = []; +// const i1 = css.matchAll(re1); +// // @ts-ignore +// for (let m of i1) { +// // @ts-ignore +// const s: number = m.index + m[0].indexOf(m[1]); +// const e: number = s + m[1].length; +// idxs.push([s, e]); +// } +// const i2 = css.matchAll(re2); +// // @ts-ignore +// for (let m of i2) { +// // @ts-ignore +// const s = m.index + m[0].indexOf(m[1]); +// const e = s + m[1].length; +// idxs.push([s, e]) +// } +// return idxs; +// } +// function unquote(str: string): [string, string] { +// str = str.trim(); +// if (str.length <= 2) { +// return [str, ""] +// } +// if (str[0] == '"' && str[str.length-1] == '"') { +// return [ str.substring(1, str.length-1), "\""]; +// } +// if (str[0] == '\'' && str[str.length-1] == '\'') { +// return [ str.substring(1, str.length-1), "'" ]; +// } +// return [str, ""] +// } +// function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { +// for (let idx of cssUrlsIndex(css)) { +// const f = idx[0] +// const t = idx[1] +// const [ rawurl, q ] = unquote(css.substring(f, t)); +// css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); +// } +// return css +// } + +// function resolveCSS(baseURL: string, css: string): string { +// return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); +// } + +// export default class AssistManager { +// constructor(private session, private md: MessageDistributor, private config) {} + +// private setStatus(status: ConnectionStatus) { +// if (status === ConnectionStatus.Connecting) { +// this.md.setMessagesLoading(true); +// } else { +// this.md.setMessagesLoading(false); +// } +// if (status === ConnectionStatus.Connected) { +// this.md.display(true); +// } else { +// this.md.display(false); +// } +// update({ peerConnectionStatus: status }); +// } + +// private get peerID(): string { +// return `${this.session.projectKey}-${this.session.sessionId}` +// } + +// private peer: Peer | null = null; +// connectionAttempts: number = 0; +// private peeropened: boolean = false; +// connect() { +// if (this.peer != null) { +// console.error("AssistManager: trying to connect more than once"); +// return; +// } +// this.setStatus(ConnectionStatus.Connecting) +// import('peerjs').then(({ default: Peer }) => { +// const _config = { +// // @ts-ignore +// host: new URL(window.ENV.API_EDP).host, +// path: '/assist', +// port: location.protocol === 'https:' ? 443 : 80, +// } + +// if (this.config) { +// _config['config'] = { +// iceServers: this.config, +// sdpSemantics: 'unified-plan', +// iceTransportPolicy: 'relay', +// }; +// } + +// const peer = new Peer(_config); +// this.peer = peer; +// peer.on('error', e => { +// if (e.type !== 'peer-unavailable') { +// console.warn("AssistManager PeerJS peer error: ", e.type, e) +// } +// if (['peer-unavailable', 'network', 'webrtc'].includes(e.type)) { +// if (this.peer && this.connectionAttempts++ < MAX_RECONNECTION_COUNT) { +// this.setStatus(ConnectionStatus.Connecting); +// this.connectToPeer(); +// } else { +// this.setStatus(ConnectionStatus.Disconnected); +// this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); +// } +// } else { +// console.error(`PeerJS error (on peer). Type ${e.type}`, e); +// this.setStatus(ConnectionStatus.Error) +// } +// }) +// peer.on("open", () => { +// if (this.peeropened) { return; } +// this.peeropened = true; +// this.connectToPeer(); +// }); +// }); +// } + +// private dataCheckIntervalID: ReturnType | undefined; +// private connectToPeer() { +// if (!this.peer) { return; } +// this.setStatus(ConnectionStatus.Connecting); +// const id = this.peerID; +// const conn = this.peer.connect(id, { serialization: 'json', reliable: true}); +// conn.on('open', () => { +// window.addEventListener("beforeunload", ()=>conn.open &&conn.send("unload")); + +// //console.log("peer connected") + + +// if (getState().calling === CallingState.Reconnecting) { +// this._call() +// } + +// let i = 0; +// let firstMessage = true; + +// this.setStatus(ConnectionStatus.WaitingMessages) + +// conn.on('data', (data) => { +// if (!Array.isArray(data)) { return this.handleCommand(data); } +// this.disconnectTimeout && clearTimeout(this.disconnectTimeout); +// if (firstMessage) { +// firstMessage = false; +// this.setStatus(ConnectionStatus.Connected) +// } + +// let time = 0; +// let ts0 = 0; +// (data as Array).forEach(msg => { + +// // TODO: more appropriate way to do it. +// if (msg._id === 60) { +// // @ts-ignore +// if (msg.name === 'src' || msg.name === 'href') { +// // @ts-ignore +// msg.value = resolveURL(msg.baseURL, msg.value); +// // @ts-ignore +// } else if (msg.name === 'style') { +// // @ts-ignore +// msg.value = resolveCSS(msg.baseURL, msg.value); +// } +// msg._id = 12; +// } else if (msg._id === 61) { // "SetCSSDataURLBased" +// // @ts-ignore +// msg.data = resolveCSS(msg.baseURL, msg.data); +// msg._id = 15; +// } else if (msg._id === 67) { // "insert_rule" +// // @ts-ignore +// msg.rule = resolveCSS(msg.baseURL, msg.rule); +// msg._id = 37; +// } + + +// msg.tp = ID_TP_MAP[msg._id]; // _id goes from tracker + +// if (msg.tp === "timestamp") { +// ts0 = ts0 || msg.timestamp +// time = msg.timestamp - ts0; +// return; +// } +// const tMsg: TimedMessage = Object.assign(msg, { +// time, +// _index: i, +// }); +// this.md.distributeMessage(tMsg, i++); +// }); +// }); +// }); + + +// const onDataClose = () => { +// this.onCallDisconnect() +// //console.log('closed peer conn. Reconnecting...') +// this.connectToPeer(); +// } + +// // this.dataCheckIntervalID = setInterval(() => { +// // if (!this.dataConnection && getState().peerConnectionStatus === ConnectionStatus.Connected) { +// // onDataClose(); +// // } +// // }, 3000); +// conn.on('close', onDataClose);// Does it work ? +// conn.on("error", (e) => { +// this.setStatus(ConnectionStatus.Error); +// }) +// } + + +// private get dataConnection(): DataConnection | undefined { +// return this.peer?.connections[this.peerID]?.find(c => c.type === 'data' && c.open); +// } + +// private get callConnection(): MediaConnection | undefined { +// return this.peer?.connections[this.peerID]?.find(c => c.type === 'media' && c.open); +// } + +// private send(data: any) { +// this.dataConnection?.send(data); +// } + + +// private forceCallEnd() { +// this.callConnection?.close(); +// } +// private notifyCallEnd() { +// const dataConn = this.dataConnection; +// if (dataConn) { +// dataConn.send("call_end"); +// } +// } +// private initiateCallEnd = () => { +// this.forceCallEnd(); +// this.notifyCallEnd(); +// this.localCallData && this.localCallData.onCallEnd(); +// } + +// private onTrackerCallEnd = () => { +// console.log('onTrackerCallEnd') +// this.forceCallEnd(); +// if (getState().calling === CallingState.Requesting) { +// this.localCallData && this.localCallData.onReject(); +// } +// this.localCallData && this.localCallData.onCallEnd(); +// } + +// private onCallDisconnect = () => { +// if (getState().calling === CallingState.True) { +// update({ calling: CallingState.Reconnecting }); +// } +// } + + +// private disconnectTimeout: ReturnType | undefined; +// private handleCommand(command: string) { +// console.log("Data command", command) +// switch (command) { +// case "unload": +// //this.onTrackerCallEnd(); +// this.onCallDisconnect() +// this.dataConnection?.close(); +// this.disconnectTimeout = setTimeout(() => { +// this.onTrackerCallEnd(); +// this.setStatus(ConnectionStatus.Disconnected); +// }, 15000); // TODO: more convenient way +// //this.dataConnection?.close(); +// return; +// case "call_end": +// this.onTrackerCallEnd(); +// return; +// case "call_error": +// this.onTrackerCallEnd(); +// this.setStatus(ConnectionStatus.Error); +// return; +// } +// } + +// // private mmtid?:ReturnType +// private onMouseMove = (e: MouseEvent): void => { +// // this.mmtid && clearTimeout(this.mmtid) +// // this.mmtid = setTimeout(() => { +// const data = this.md.getInternalCoordinates(e); +// this.send({ x: Math.round(data.x), y: Math.round(data.y) }); +// // }, 5) +// } + + +// // private wtid?: ReturnType +// // private scrollDelta: [number, number] = [0,0] +// private onWheel = (e: WheelEvent): void => { +// e.preventDefault() +// //throttling makes movements less smooth +// // this.wtid && clearTimeout(this.wtid) +// // this.scrollDelta[0] += e.deltaX +// // this.scrollDelta[1] += e.deltaY +// // this.wtid = setTimeout(() => { +// this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]})//this.scrollDelta }); +// this.onMouseMove(e) +// // this.scrollDelta = [0,0] +// // }, 20) +// } + +// private onMouseClick = (e: MouseEvent): void => { +// const conn = this.dataConnection; +// if (!conn) { return; } +// const data = this.md.getInternalCoordinates(e); +// // const el = this.md.getElementFromPoint(e); // requires requestiong node_id from domManager +// const el = this.md.getElementFromInternalPoint(data) +// if (el instanceof HTMLElement) { +// el.focus() +// el.oninput = e => e.preventDefault(); +// el.onkeydown = e => e.preventDefault(); +// } +// conn.send({ type: "click", x: Math.round(data.x), y: Math.round(data.y) }); +// } + +// private toggleRemoteControl = (flag?: boolean) => { +// const state = getState().remoteControl; +// const newState = typeof flag === 'boolean' ? flag : !state; +// if (state === newState) { return } +// if (newState) { +// this.md.overlay.addEventListener("click", this.onMouseClick); +// this.md.overlay.addEventListener("wheel", this.onWheel) +// update({ remoteControl: true }) +// } else { +// this.md.overlay.removeEventListener("click", this.onMouseClick); +// this.md.overlay.removeEventListener("wheel", this.onWheel); +// update({ remoteControl: false }) +// } +// } + +// private localCallData: { +// localStream: LocalStream, +// onStream: (s: MediaStream)=>void, +// onCallEnd: () => void, +// onReject: () => void, +// onError?: ()=> void +// } | null = null + +// call(localStream: LocalStream, onStream: (s: MediaStream)=>void, onCallEnd: () => void, onReject: () => void, onError?: ()=> void): { end: Function, toggleRemoteControl: Function } { +// this.localCallData = { +// localStream, +// onStream, +// onCallEnd: () => { +// onCallEnd(); +// this.toggleRemoteControl(false); +// this.md.overlay.removeEventListener("mousemove", this.onMouseMove); +// this.md.overlay.removeEventListener("click", this.onMouseClick); +// update({ calling: CallingState.False }); +// this.localCallData = null; +// }, +// onReject, +// onError, +// } +// this._call() +// return { +// end: this.initiateCallEnd, +// toggleRemoteControl: this.toggleRemoteControl, +// } +// } + +// private _call() { +// if (!this.peer || !this.localCallData || ![CallingState.False, CallingState.Reconnecting].includes(getState().calling)) { return null; } + +// update({ calling: CallingState.Requesting }); + +// //console.log('calling...', this.localCallData.localStream) + +// const call = this.peer.call(this.peerID, this.localCallData.localStream.stream); +// this.localCallData.localStream.onVideoTrack(vTrack => { +// const sender = call.peerConnection.getSenders().find(s => s.track?.kind === "video") +// if (!sender) { +// //logger.warn("No video sender found") +// return +// } +// //logger.log("sender found:", sender) +// sender.replaceTrack(vTrack) +// }) + +// call.on('stream', stream => { +// update({ calling: CallingState.True }); +// this.localCallData && this.localCallData.onStream(stream); +// this.send({ +// name: store.getState().getIn([ 'user', 'account', 'name']), +// }); + +// this.md.overlay.addEventListener("mousemove", this.onMouseMove) +// // this.md.overlay.addEventListener("click", this.onMouseClick) +// }); +// //call.peerConnection.addEventListener("track", e => console.log('newtrack',e.track)) + +// call.on("close", this.localCallData.onCallEnd); +// call.on("error", (e) => { +// console.error("PeerJS error (on call):", e) +// this.initiateCallEnd(); +// this.localCallData && this.localCallData.onError && this.localCallData.onError(); +// }); + +// window.addEventListener("beforeunload", this.initiateCallEnd) +// } + +// clear() { +// this.initiateCallEnd(); +// this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); +// if (this.peer) { +// //console.log("destroying peer...") +// const peer = this.peer; // otherwise it calls reconnection on data chan close +// this.peer = null; +// peer.destroy(); +// } +// } +// } + + diff --git a/frontend/app/player/MessageDistributor/managers/DOMManager.ts b/frontend/app/player/MessageDistributor/managers/DOMManager.ts index f226c1b4e..7c40a4668 100644 --- a/frontend/app/player/MessageDistributor/managers/DOMManager.ts +++ b/frontend/app/player/MessageDistributor/managers/DOMManager.ts @@ -1,24 +1,22 @@ import type StatedScreen from '../StatedScreen'; import type { Message, SetNodeScroll, CreateElementNode } from '../messages'; -import type { TimedMessage } from '../Timed'; import logger from 'App/logger'; import StylesManager, { rewriteNodeStyleSheet } from './StylesManager'; import ListWalker from './ListWalker'; -import type { Timed }from '../Timed'; const IGNORED_ATTRS = [ "autocomplete", "name" ]; const ATTR_NAME_REGEXP = /([^\t\n\f \/>"'=]+)/; // regexp costs ~ -export default class DOMManager extends ListWalker { +export default class DOMManager extends ListWalker { private isMobile: boolean; private screen: StatedScreen; private nl: Array = []; private isLink: Array = []; // Optimisations private bodyId: number = -1; private postponedBodyMessage: CreateElementNode | null = null; - private nodeScrollManagers: Array> = []; + private nodeScrollManagers: Array> = []; private stylesManager: StylesManager; @@ -36,7 +34,7 @@ export default class DOMManager extends ListWalker { return this.startTime; } - add(m: TimedMessage): void { + add(m: Message): void { switch (m.tp) { case "set_node_scroll": if (!this.nodeScrollManagers[ m.id ]) { @@ -104,8 +102,9 @@ export default class DOMManager extends ListWalker { if ((el instanceof HTMLStyleElement) && // TODO: correct ordering OR filter in tracker el.sheet && el.sheet.cssRules && - el.sheet.cssRules.length > 0) { - logger.log("Trying to insert child to style tag with virtual rules: ", this.nl[ parentID ], this.nl[ id ]); + el.sheet.cssRules.length > 0 && + el.innerText.trim().length === 0) { + logger.log("Trying to insert child to a style tag with virtual rules: ", this.nl[ parentID ], this.nl[ id ]); return; } @@ -183,6 +182,9 @@ export default class DOMManager extends ListWalker { } this.stylesManager.setStyleHandlers(node, value); } + if (node.namespaceURI === 'http://www.w3.org/2000/svg' && value.startsWith("url(")) { + value = "url(#" + (value.split("#")[1] ||")") + } try { node.setAttribute(name, value); } catch(e) { diff --git a/frontend/app/player/MessageDistributor/managers/ListWalker.ts b/frontend/app/player/MessageDistributor/managers/ListWalker.ts index 6283ff3ab..dcfe5cd96 100644 --- a/frontend/app/player/MessageDistributor/managers/ListWalker.ts +++ b/frontend/app/player/MessageDistributor/managers/ListWalker.ts @@ -1,4 +1,4 @@ -import type { Timed } from '../Timed'; +import type { Timed } from '../messages/timed'; export default class ListWalker { // Optimisation: #prop compiles to method that costs mor than strict property call. diff --git a/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts b/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts new file mode 100644 index 000000000..7a181fcf0 --- /dev/null +++ b/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts @@ -0,0 +1,14 @@ +// import type { MobX } from '../messages'; +// import type { Timed } from '../Timed'; + +// import ListWalker from './ListWalker'; + +// type MobXTimed = MobX & Timed; + +// export default class MobXStateManager extends ListWalker { +// moveToLast(t: number) { +// super.moveApply(t, ) +// } + + +// } \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/managers/MouseManager.ts b/frontend/app/player/MessageDistributor/managers/MouseManager.ts index cd26c5b7f..ba174ae89 100644 --- a/frontend/app/player/MessageDistributor/managers/MouseManager.ts +++ b/frontend/app/player/MessageDistributor/managers/MouseManager.ts @@ -1,15 +1,12 @@ import type StatedScreen from '../StatedScreen'; import type { MouseMove } from '../messages'; -import type { Timed } from '../Timed'; import ListWalker from './ListWalker'; -type MouseMoveTimed = MouseMove & Timed; - const HOVER_CLASS = "-openreplay-hover"; const HOVER_CLASS_DEPR = "-asayer-hover"; -export default class MouseManager extends ListWalker { +export default class MouseManager extends ListWalker { private hoverElements: Array = []; constructor(private screen: StatedScreen) {super();} @@ -39,6 +36,7 @@ export default class MouseManager extends ListWalker { if (!!lastMouseMove){ // @ts-ignore TODO this.screen.cursor.move(lastMouseMove); + //window.getComputedStyle(this.screen.getCursorTarget()).cursor === 'pointer' // might nfluence performance though this.updateHover(); } } diff --git a/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts b/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts index 1b11813b2..4c756616e 100644 --- a/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts +++ b/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts @@ -1,11 +1,7 @@ import type { PerformanceTrack, SetPageVisibility } from '../messages'; -import type { Timed } from '../Timed'; import ListWalker from './ListWalker'; -type TimedPerformanceTrack = Timed & PerformanceTrack; -type TimedSetPageVisibility = Timed & SetPageVisibility; - export type PerformanceChartPoint = { time: number, usedHeap: number, @@ -15,7 +11,7 @@ export type PerformanceChartPoint = { nodesCount: number, } -export default class PerformanceTrackManager extends ListWalker { +export default class PerformanceTrackManager extends ListWalker { private chart: Array = []; private isHidden: boolean = false; private timeCorrection: number = 0; @@ -26,7 +22,7 @@ export default class PerformanceTrackManager extends ListWalker { +export default class StylesManager extends ListWalker { private linkLoadingCount: number = 0; private linkLoadPromises: Array> = []; private skipCSSLinks: Array = []; // should be common for all pages diff --git a/frontend/app/player/MessageDistributor/messages.ts b/frontend/app/player/MessageDistributor/messages.ts deleted file mode 100644 index bb391b9f4..000000000 --- a/frontend/app/player/MessageDistributor/messages.ts +++ /dev/null @@ -1,715 +0,0 @@ -// Auto-generated, do not edit - -import PrimitiveReader from './PrimitiveReader'; - -export const ID_TP_MAP = { - - 0: "timestamp", - 2: "session_disconnect", - 4: "set_page_location", - 5: "set_viewport_size", - 6: "set_viewport_scroll", - 7: "create_document", - 8: "create_element_node", - 9: "create_text_node", - 10: "move_node", - 11: "remove_node", - 12: "set_node_attribute", - 13: "remove_node_attribute", - 14: "set_node_data", - 15: "set_css_data", - 16: "set_node_scroll", - 18: "set_input_value", - 19: "set_input_checked", - 20: "mouse_move", - 22: "console_log", - 37: "css_insert_rule", - 38: "css_delete_rule", - 39: "fetch", - 40: "profiler", - 41: "o_table", - 44: "redux", - 45: "vuex", - 46: "mob_x", - 47: "ng_rx", - 48: "graph_ql", - 49: "performance_track", - 54: "connection_information", - 55: "set_page_visibility", - 59: "long_task", - 69: "mouse_click", - 70: "create_i_frame_document", - 90: "ios_session_start", - 93: "ios_custom_event", - 96: "ios_screen_changes", - 100: "ios_click_event", - 102: "ios_performance_event", - 103: "ios_log", - 105: "ios_network_call", -} as const; - - -export interface Timestamp { - tp: "timestamp", - timestamp: number, -} - -export interface SessionDisconnect { - tp: "session_disconnect", - timestamp: number, -} - -export interface SetPageLocation { - tp: "set_page_location", - url: string, - referrer: string, - navigationStart: number, -} - -export interface SetViewportSize { - tp: "set_viewport_size", - width: number, - height: number, -} - -export interface SetViewportScroll { - tp: "set_viewport_scroll", - x: number, - y: number, -} - -export interface CreateDocument { - tp: "create_document", - -} - -export interface CreateElementNode { - tp: "create_element_node", - id: number, - parentID: number, - index: number, - tag: string, - svg: boolean, -} - -export interface CreateTextNode { - tp: "create_text_node", - id: number, - parentID: number, - index: number, -} - -export interface MoveNode { - tp: "move_node", - id: number, - parentID: number, - index: number, -} - -export interface RemoveNode { - tp: "remove_node", - id: number, -} - -export interface SetNodeAttribute { - tp: "set_node_attribute", - id: number, - name: string, - value: string, -} - -export interface RemoveNodeAttribute { - tp: "remove_node_attribute", - id: number, - name: string, -} - -export interface SetNodeData { - tp: "set_node_data", - id: number, - data: string, -} - -export interface SetCssData { - tp: "set_css_data", - id: number, - data: string, -} - -export interface SetNodeScroll { - tp: "set_node_scroll", - id: number, - x: number, - y: number, -} - -export interface SetInputValue { - tp: "set_input_value", - id: number, - value: string, - mask: number, -} - -export interface SetInputChecked { - tp: "set_input_checked", - id: number, - checked: boolean, -} - -export interface MouseMove { - tp: "mouse_move", - x: number, - y: number, -} - -export interface ConsoleLog { - tp: "console_log", - level: string, - value: string, -} - -export interface CssInsertRule { - tp: "css_insert_rule", - id: number, - rule: string, - index: number, -} - -export interface CssDeleteRule { - tp: "css_delete_rule", - id: number, - index: number, -} - -export interface Fetch { - tp: "fetch", - method: string, - url: string, - request: string, - response: string, - status: number, - timestamp: number, - duration: number, -} - -export interface Profiler { - tp: "profiler", - name: string, - duration: number, - args: string, - result: string, -} - -export interface OTable { - tp: "o_table", - key: string, - value: string, -} - -export interface Redux { - tp: "redux", - action: string, - state: string, - duration: number, -} - -export interface Vuex { - tp: "vuex", - mutation: string, - state: string, -} - -export interface MobX { - tp: "mob_x", - type: string, - payload: string, -} - -export interface NgRx { - tp: "ng_rx", - action: string, - state: string, - duration: number, -} - -export interface GraphQl { - tp: "graph_ql", - operationKind: string, - operationName: string, - variables: string, - response: string, -} - -export interface PerformanceTrack { - tp: "performance_track", - frames: number, - ticks: number, - totalJSHeapSize: number, - usedJSHeapSize: number, -} - -export interface ConnectionInformation { - tp: "connection_information", - downlink: number, - type: string, -} - -export interface SetPageVisibility { - tp: "set_page_visibility", - hidden: boolean, -} - -export interface LongTask { - tp: "long_task", - timestamp: number, - duration: number, - context: number, - containerType: number, - containerSrc: string, - containerId: string, - containerName: string, -} - -export interface MouseClick { - tp: "mouse_click", - id: number, - hesitationTime: number, - label: string, - selector: string, -} - -export interface CreateIFrameDocument { - tp: "create_i_frame_document", - frameID: number, - id: number, -} - -export interface IosSessionStart { - tp: "ios_session_start", - timestamp: number, - projectID: number, - trackerVersion: string, - revID: string, - userUUID: string, - userOS: string, - userOSVersion: string, - userDevice: string, - userDeviceType: string, - userCountry: string, -} - -export interface IosCustomEvent { - tp: "ios_custom_event", - timestamp: number, - length: number, - name: string, - payload: string, -} - -export interface IosScreenChanges { - tp: "ios_screen_changes", - timestamp: number, - length: number, - x: number, - y: number, - width: number, - height: number, -} - -export interface IosClickEvent { - tp: "ios_click_event", - timestamp: number, - length: number, - label: string, - x: number, - y: number, -} - -export interface IosPerformanceEvent { - tp: "ios_performance_event", - timestamp: number, - length: number, - name: string, - value: number, -} - -export interface IosLog { - tp: "ios_log", - timestamp: number, - length: number, - severity: string, - content: string, -} - -export interface IosNetworkCall { - tp: "ios_network_call", - timestamp: number, - length: number, - duration: number, - headers: string, - body: string, - url: string, - success: boolean, - method: string, - status: number, -} - - -export type Message = Timestamp | SessionDisconnect | SetPageLocation | SetViewportSize | SetViewportScroll | CreateDocument | CreateElementNode | CreateTextNode | MoveNode | RemoveNode | SetNodeAttribute | RemoveNodeAttribute | SetNodeData | SetCssData | SetNodeScroll | SetInputValue | SetInputChecked | MouseMove | ConsoleLog | CssInsertRule | CssDeleteRule | Fetch | Profiler | OTable | Redux | Vuex | MobX | NgRx | GraphQl | PerformanceTrack | ConnectionInformation | SetPageVisibility | LongTask | MouseClick | CreateIFrameDocument | IosSessionStart | IosCustomEvent | IosScreenChanges | IosClickEvent | IosPerformanceEvent | IosLog | IosNetworkCall; - -export default function (r: PrimitiveReader): Message | null { - const tp = r.readUint() - switch (tp) { - - case 0: - return { - tp: ID_TP_MAP[0], - timestamp: r.readUint(), - }; - - case 2: - return { - tp: ID_TP_MAP[2], - timestamp: r.readUint(), - }; - - case 4: - return { - tp: ID_TP_MAP[4], - url: r.readString(), - referrer: r.readString(), - navigationStart: r.readUint(), - }; - - case 5: - return { - tp: ID_TP_MAP[5], - width: r.readUint(), - height: r.readUint(), - }; - - case 6: - return { - tp: ID_TP_MAP[6], - x: r.readInt(), - y: r.readInt(), - }; - - case 7: - return { - tp: ID_TP_MAP[7], - - }; - - case 8: - return { - tp: ID_TP_MAP[8], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - tag: r.readString(), - svg: r.readBoolean(), - }; - - case 9: - return { - tp: ID_TP_MAP[9], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - }; - - case 10: - return { - tp: ID_TP_MAP[10], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - }; - - case 11: - return { - tp: ID_TP_MAP[11], - id: r.readUint(), - }; - - case 12: - return { - tp: ID_TP_MAP[12], - id: r.readUint(), - name: r.readString(), - value: r.readString(), - }; - - case 13: - return { - tp: ID_TP_MAP[13], - id: r.readUint(), - name: r.readString(), - }; - - case 14: - return { - tp: ID_TP_MAP[14], - id: r.readUint(), - data: r.readString(), - }; - - case 15: - return { - tp: ID_TP_MAP[15], - id: r.readUint(), - data: r.readString(), - }; - - case 16: - return { - tp: ID_TP_MAP[16], - id: r.readUint(), - x: r.readInt(), - y: r.readInt(), - }; - - case 18: - return { - tp: ID_TP_MAP[18], - id: r.readUint(), - value: r.readString(), - mask: r.readInt(), - }; - - case 19: - return { - tp: ID_TP_MAP[19], - id: r.readUint(), - checked: r.readBoolean(), - }; - - case 20: - return { - tp: ID_TP_MAP[20], - x: r.readUint(), - y: r.readUint(), - }; - - case 22: - return { - tp: ID_TP_MAP[22], - level: r.readString(), - value: r.readString(), - }; - - case 37: - return { - tp: ID_TP_MAP[37], - id: r.readUint(), - rule: r.readString(), - index: r.readUint(), - }; - - case 38: - return { - tp: ID_TP_MAP[38], - id: r.readUint(), - index: r.readUint(), - }; - - case 39: - return { - tp: ID_TP_MAP[39], - method: r.readString(), - url: r.readString(), - request: r.readString(), - response: r.readString(), - status: r.readUint(), - timestamp: r.readUint(), - duration: r.readUint(), - }; - - case 40: - return { - tp: ID_TP_MAP[40], - name: r.readString(), - duration: r.readUint(), - args: r.readString(), - result: r.readString(), - }; - - case 41: - return { - tp: ID_TP_MAP[41], - key: r.readString(), - value: r.readString(), - }; - - case 44: - return { - tp: ID_TP_MAP[44], - action: r.readString(), - state: r.readString(), - duration: r.readUint(), - }; - - case 45: - return { - tp: ID_TP_MAP[45], - mutation: r.readString(), - state: r.readString(), - }; - - case 46: - return { - tp: ID_TP_MAP[46], - type: r.readString(), - payload: r.readString(), - }; - - case 47: - return { - tp: ID_TP_MAP[47], - action: r.readString(), - state: r.readString(), - duration: r.readUint(), - }; - - case 48: - return { - tp: ID_TP_MAP[48], - operationKind: r.readString(), - operationName: r.readString(), - variables: r.readString(), - response: r.readString(), - }; - - case 49: - return { - tp: ID_TP_MAP[49], - frames: r.readInt(), - ticks: r.readInt(), - totalJSHeapSize: r.readUint(), - usedJSHeapSize: r.readUint(), - }; - - case 54: - return { - tp: ID_TP_MAP[54], - downlink: r.readUint(), - type: r.readString(), - }; - - case 55: - return { - tp: ID_TP_MAP[55], - hidden: r.readBoolean(), - }; - - case 59: - return { - tp: ID_TP_MAP[59], - timestamp: r.readUint(), - duration: r.readUint(), - context: r.readUint(), - containerType: r.readUint(), - containerSrc: r.readString(), - containerId: r.readString(), - containerName: r.readString(), - }; - - case 69: - return { - tp: ID_TP_MAP[69], - id: r.readUint(), - hesitationTime: r.readUint(), - label: r.readString(), - selector: r.readString(), - }; - - case 70: - return { - tp: ID_TP_MAP[70], - frameID: r.readUint(), - id: r.readUint(), - }; - - case 90: - return { - tp: ID_TP_MAP[90], - timestamp: r.readUint(), - projectID: r.readUint(), - trackerVersion: r.readString(), - revID: r.readString(), - userUUID: r.readString(), - userOS: r.readString(), - userOSVersion: r.readString(), - userDevice: r.readString(), - userDeviceType: r.readString(), - userCountry: r.readString(), - }; - - case 93: - return { - tp: ID_TP_MAP[93], - timestamp: r.readUint(), - length: r.readUint(), - name: r.readString(), - payload: r.readString(), - }; - - case 96: - return { - tp: ID_TP_MAP[96], - timestamp: r.readUint(), - length: r.readUint(), - x: r.readUint(), - y: r.readUint(), - width: r.readUint(), - height: r.readUint(), - }; - - case 100: - return { - tp: ID_TP_MAP[100], - timestamp: r.readUint(), - length: r.readUint(), - label: r.readString(), - x: r.readUint(), - y: r.readUint(), - }; - - case 102: - return { - tp: ID_TP_MAP[102], - timestamp: r.readUint(), - length: r.readUint(), - name: r.readString(), - value: r.readUint(), - }; - - case 103: - return { - tp: ID_TP_MAP[103], - timestamp: r.readUint(), - length: r.readUint(), - severity: r.readString(), - content: r.readString(), - }; - - case 105: - return { - tp: ID_TP_MAP[105], - timestamp: r.readUint(), - length: r.readUint(), - duration: r.readUint(), - headers: r.readString(), - body: r.readString(), - url: r.readString(), - success: r.readBoolean(), - method: r.readString(), - status: r.readUint(), - }; - - default: - throw new Error(`Unrecognizable message type: ${ tp }`) - return null; - } -} diff --git a/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts b/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts new file mode 100644 index 000000000..8143ae17c --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts @@ -0,0 +1,18 @@ +import type { RawMessage } from './raw' + +import { TP_MAP } from './raw' + +export default class JSONRawMessageReader { + constructor(private messages: any[] = []){} + append(messages: any[]) { + this.messages = this.messages.concat(messages) + } + readMessage(): RawMessage | null { + const msg = this.messages.shift() + if (!msg) { return null } + msg.tp = TP_MAP[msg._id] + delete msg._id + return msg as RawMessage + } + +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/MFileReader.ts b/frontend/app/player/MessageDistributor/messages/MFileReader.ts new file mode 100644 index 000000000..0204259e5 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/MFileReader.ts @@ -0,0 +1,68 @@ +import type { Message } from './message'; +import type { RawMessage } from './raw'; +import logger from 'App/logger'; +import RawMessageReader from './RawMessageReader'; + +// TODO: composition instead of inheritance +// needSkipMessage() and next() methods here use buf and p protected properties, +// which should be probably somehow incapsulated +export default class MFileReader extends RawMessageReader { + private pLastMessageID: number = 0; + private currentTime: number = 0; + public error: boolean = false; + constructor(data: Uint8Array, private readonly startTime: number) { + super(data); + } + + private needSkipMessage(): boolean { + if (this.p === 0) return false; + for (let i = 7; i >= 0; i--) { + if (this.buf[ this.p + i ] !== this.buf[ this.pLastMessageID + i ]) { + return this.buf[ this.p + i ] - this.buf[ this.pLastMessageID + i ] < 0; + } + } + return true; + } + + private readRawMessage(): RawMessage | null { + this.skip(8); + try { + return super.readMessage(); + } catch (e) { + this.error = true; + logger.error("Read message error:", e); + return null; + } + } + + hasNext():boolean { + return !this.error && this.hasNextByte(); + } + + next(): [ Message, number] | null { + if (!this.hasNext()) { + return null; + } + + while (this.needSkipMessage()) { + this.readRawMessage(); + } + this.pLastMessageID = this.p; + + const rMsg = this.readRawMessage(); + if (!rMsg) { + return null; + } + + if (rMsg.tp === "timestamp") { + this.currentTime = rMsg.timestamp - this.startTime; + } else { + const msg = Object.assign(rMsg, { + time: this.currentTime, + _index: this.pLastMessageID, + }) + return [msg, this.pLastMessageID]; + } + return null; + } +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/MStreamReader.ts b/frontend/app/player/MessageDistributor/messages/MStreamReader.ts new file mode 100644 index 000000000..1cc30dcec --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/MStreamReader.ts @@ -0,0 +1,69 @@ +import type { Message } from './message' +import type { + RawMessage, + RawSetNodeAttributeURLBased, + RawSetNodeAttribute, + RawSetCssDataURLBased, + RawSetCssData, + RawCssInsertRuleURLBased, + RawCssInsertRule, +} from './raw' +import RawMessageReader from './RawMessageReader' +import type { RawMessageReaderI } from './RawMessageReader' +import { resolveURL, resolveCSS } from './urlResolve' + + +const resolveMsg = { + "set_node_attribute_url_based": (msg: RawSetNodeAttributeURLBased): RawSetNodeAttribute => + ({ + ...msg, + value: msg.name === 'src' || msg.name === 'href' + ? resolveURL(msg.baseURL, msg.value) + : (msg.name === 'style' + ? resolveCSS(msg.baseURL, msg.value) + : msg.value + ), + tp: "set_node_attribute", + }), + "set_css_data_url_based": (msg: RawSetCssDataURLBased): RawSetCssData => + ({ + ...msg, + data: resolveCSS(msg.baseURL, msg.data), + tp: "set_css_data", + }), + "css_insert_rule_url_based": (msg: RawCssInsertRuleURLBased): RawCssInsertRule => + ({ + ...msg, + rule: resolveCSS(msg.baseURL, msg.rule), + tp: "css_insert_rule", + }) +} + +export default class MStreamReader { + constructor(private readonly r: RawMessageReaderI = new RawMessageReader()){} + + // append(buf: Uint8Array) { + // this.r.append(buf) + // } + + private t0: number = 0 + private t: number = 0 + private idx: number = 0 + readNext(): Message | null { + let msg = this.r.readMessage() + if (msg === null) { return null } + if (msg.tp === "timestamp" || msg.tp === "batch_meta") { + this.t0 = this.t0 || msg.timestamp + this.t = msg.timestamp - this.t0 + return this.readNext() + } + + // why typescript doesn't work here? + msg = (resolveMsg[msg.tp] || ((m:RawMessage)=>m))(msg) + + return Object.assign(msg, { + time: this.t, + _index: this.idx++, + }) + } +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts b/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts new file mode 100644 index 000000000..bc62bf653 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts @@ -0,0 +1,55 @@ +export default class PrimitiveReader { + protected p: number = 0 + constructor(protected buf: Uint8Array = new Uint8Array(0)) {} + + append(buf: Uint8Array) { + const newBuf = new Uint8Array(this.buf.length + buf.length) + newBuf.set(this.buf) + newBuf.set(buf, this.buf.length) + this.buf = newBuf + } + + hasNextByte(): boolean { + return this.p < this.buf.length + } + + readUint(): number | null { + let p = this.p, r = 0, s = 1, b + do { + if (p >= this.buf.length) { + return null + } + b = this.buf[ p++ ] + r += (b & 0x7F) * s + s *= 128; + } while (b >= 0x80) + this.p = p + return r; + } + + readInt(): number | null { + let u = this.readUint(); + if (u === null) { return u } + if (u % 2) { + u = (u + 1) / -2; + } else { + u = u / 2; + } + return u; + } + + readString(): string | null { + var l = this.readUint(); + if (l === null || this.p + l > this.buf.length) { return null } + return new TextDecoder().decode(this.buf.subarray(this.p, this.p+=l)); + } + + readBoolean(): boolean | null { + if (this.p >= this.buf.length) { return null } + return !!this.buf[this.p++]; + } + + skip(n: number) { + this.p += n; + } +} diff --git a/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts b/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts new file mode 100644 index 000000000..867d80755 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts @@ -0,0 +1,758 @@ +// Auto-generated, do not edit + +import PrimitiveReader from './PrimitiveReader' +import type { RawMessage } from './raw' + +export interface RawMessageReaderI { + readMessage(): RawMessage | null +} + +export default class RawMessageReader extends PrimitiveReader { + readMessage(): RawMessage | null { + const p = this.p + const resetPointer = () => { + this.p = p + return null + } + + const tp = this.readUint() + if (tp === null) { return resetPointer() } + + switch (tp) { + + case 80: { + const pageNo = this.readUint(); if (pageNo === null) { return resetPointer() } + const firstIndex = this.readUint(); if (firstIndex === null) { return resetPointer() } + const timestamp = this.readInt(); if (timestamp === null) { return resetPointer() } + return { + tp: "batch_meta", + pageNo, + firstIndex, + timestamp, + }; + } + + case 0: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + return { + tp: "timestamp", + timestamp, + }; + } + + case 2: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + return { + tp: "session_disconnect", + timestamp, + }; + } + + case 4: { + const url = this.readString(); if (url === null) { return resetPointer() } + const referrer = this.readString(); if (referrer === null) { return resetPointer() } + const navigationStart = this.readUint(); if (navigationStart === null) { return resetPointer() } + return { + tp: "set_page_location", + url, + referrer, + navigationStart, + }; + } + + case 5: { + const width = this.readUint(); if (width === null) { return resetPointer() } + const height = this.readUint(); if (height === null) { return resetPointer() } + return { + tp: "set_viewport_size", + width, + height, + }; + } + + case 6: { + const x = this.readInt(); if (x === null) { return resetPointer() } + const y = this.readInt(); if (y === null) { return resetPointer() } + return { + tp: "set_viewport_scroll", + x, + y, + }; + } + + case 7: { + + return { + tp: "create_document", + + }; + } + + case 8: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + const tag = this.readString(); if (tag === null) { return resetPointer() } + const svg = this.readBoolean(); if (svg === null) { return resetPointer() } + return { + tp: "create_element_node", + id, + parentID, + index, + tag, + svg, + }; + } + + case 9: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "create_text_node", + id, + parentID, + index, + }; + } + + case 10: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "move_node", + id, + parentID, + index, + }; + } + + case 11: { + const id = this.readUint(); if (id === null) { return resetPointer() } + return { + tp: "remove_node", + id, + }; + } + + case 12: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "set_node_attribute", + id, + name, + value, + }; + } + + case 13: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + return { + tp: "remove_node_attribute", + id, + name, + }; + } + + case 14: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + return { + tp: "set_node_data", + id, + data, + }; + } + + case 15: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + return { + tp: "set_css_data", + id, + data, + }; + } + + case 16: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const x = this.readInt(); if (x === null) { return resetPointer() } + const y = this.readInt(); if (y === null) { return resetPointer() } + return { + tp: "set_node_scroll", + id, + x, + y, + }; + } + + case 17: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + return { + tp: "set_input_target", + id, + label, + }; + } + + case 18: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + const mask = this.readInt(); if (mask === null) { return resetPointer() } + return { + tp: "set_input_value", + id, + value, + mask, + }; + } + + case 19: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const checked = this.readBoolean(); if (checked === null) { return resetPointer() } + return { + tp: "set_input_checked", + id, + checked, + }; + } + + case 20: { + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + return { + tp: "mouse_move", + x, + y, + }; + } + + case 22: { + const level = this.readString(); if (level === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "console_log", + level, + value, + }; + } + + case 23: { + const requestStart = this.readUint(); if (requestStart === null) { return resetPointer() } + const responseStart = this.readUint(); if (responseStart === null) { return resetPointer() } + const responseEnd = this.readUint(); if (responseEnd === null) { return resetPointer() } + const domContentLoadedEventStart = this.readUint(); if (domContentLoadedEventStart === null) { return resetPointer() } + const domContentLoadedEventEnd = this.readUint(); if (domContentLoadedEventEnd === null) { return resetPointer() } + const loadEventStart = this.readUint(); if (loadEventStart === null) { return resetPointer() } + const loadEventEnd = this.readUint(); if (loadEventEnd === null) { return resetPointer() } + const firstPaint = this.readUint(); if (firstPaint === null) { return resetPointer() } + const firstContentfulPaint = this.readUint(); if (firstContentfulPaint === null) { return resetPointer() } + return { + tp: "page_load_timing", + requestStart, + responseStart, + responseEnd, + domContentLoadedEventStart, + domContentLoadedEventEnd, + loadEventStart, + loadEventEnd, + firstPaint, + firstContentfulPaint, + }; + } + + case 24: { + const speedIndex = this.readUint(); if (speedIndex === null) { return resetPointer() } + const visuallyComplete = this.readUint(); if (visuallyComplete === null) { return resetPointer() } + const timeToInteractive = this.readUint(); if (timeToInteractive === null) { return resetPointer() } + return { + tp: "page_render_timing", + speedIndex, + visuallyComplete, + timeToInteractive, + }; + } + + case 25: { + const name = this.readString(); if (name === null) { return resetPointer() } + const message = this.readString(); if (message === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "js_exception", + name, + message, + payload, + }; + } + + case 27: { + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "raw_custom_event", + name, + payload, + }; + } + + case 28: { + const id = this.readString(); if (id === null) { return resetPointer() } + return { + tp: "user_id", + id, + }; + } + + case 29: { + const id = this.readString(); if (id === null) { return resetPointer() } + return { + tp: "user_anonymous_id", + id, + }; + } + + case 30: { + const key = this.readString(); if (key === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "metadata", + key, + value, + }; + } + + case 37: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const rule = this.readString(); if (rule === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "css_insert_rule", + id, + rule, + index, + }; + } + + case 38: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "css_delete_rule", + id, + index, + }; + } + + case 39: { + const method = this.readString(); if (method === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const request = this.readString(); if (request === null) { return resetPointer() } + const response = this.readString(); if (response === null) { return resetPointer() } + const status = this.readUint(); if (status === null) { return resetPointer() } + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "fetch", + method, + url, + request, + response, + status, + timestamp, + duration, + }; + } + + case 40: { + const name = this.readString(); if (name === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const args = this.readString(); if (args === null) { return resetPointer() } + const result = this.readString(); if (result === null) { return resetPointer() } + return { + tp: "profiler", + name, + duration, + args, + result, + }; + } + + case 41: { + const key = this.readString(); if (key === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "o_table", + key, + value, + }; + } + + case 42: { + const type = this.readString(); if (type === null) { return resetPointer() } + return { + tp: "state_action", + type, + }; + } + + case 44: { + const action = this.readString(); if (action === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "redux", + action, + state, + duration, + }; + } + + case 45: { + const mutation = this.readString(); if (mutation === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + return { + tp: "vuex", + mutation, + state, + }; + } + + case 46: { + const type = this.readString(); if (type === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "mob_x", + type, + payload, + }; + } + + case 47: { + const action = this.readString(); if (action === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "ng_rx", + action, + state, + duration, + }; + } + + case 48: { + const operationKind = this.readString(); if (operationKind === null) { return resetPointer() } + const operationName = this.readString(); if (operationName === null) { return resetPointer() } + const variables = this.readString(); if (variables === null) { return resetPointer() } + const response = this.readString(); if (response === null) { return resetPointer() } + return { + tp: "graph_ql", + operationKind, + operationName, + variables, + response, + }; + } + + case 49: { + const frames = this.readInt(); if (frames === null) { return resetPointer() } + const ticks = this.readInt(); if (ticks === null) { return resetPointer() } + const totalJSHeapSize = this.readUint(); if (totalJSHeapSize === null) { return resetPointer() } + const usedJSHeapSize = this.readUint(); if (usedJSHeapSize === null) { return resetPointer() } + return { + tp: "performance_track", + frames, + ticks, + totalJSHeapSize, + usedJSHeapSize, + }; + } + + case 53: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const ttfb = this.readUint(); if (ttfb === null) { return resetPointer() } + const headerSize = this.readUint(); if (headerSize === null) { return resetPointer() } + const encodedBodySize = this.readUint(); if (encodedBodySize === null) { return resetPointer() } + const decodedBodySize = this.readUint(); if (decodedBodySize === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const initiator = this.readString(); if (initiator === null) { return resetPointer() } + return { + tp: "resource_timing", + timestamp, + duration, + ttfb, + headerSize, + encodedBodySize, + decodedBodySize, + url, + initiator, + }; + } + + case 54: { + const downlink = this.readUint(); if (downlink === null) { return resetPointer() } + const type = this.readString(); if (type === null) { return resetPointer() } + return { + tp: "connection_information", + downlink, + type, + }; + } + + case 55: { + const hidden = this.readBoolean(); if (hidden === null) { return resetPointer() } + return { + tp: "set_page_visibility", + hidden, + }; + } + + case 59: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const context = this.readUint(); if (context === null) { return resetPointer() } + const containerType = this.readUint(); if (containerType === null) { return resetPointer() } + const containerSrc = this.readString(); if (containerSrc === null) { return resetPointer() } + const containerId = this.readString(); if (containerId === null) { return resetPointer() } + const containerName = this.readString(); if (containerName === null) { return resetPointer() } + return { + tp: "long_task", + timestamp, + duration, + context, + containerType, + containerSrc, + containerId, + containerName, + }; + } + + case 60: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "set_node_attribute_url_based", + id, + name, + value, + baseURL, + }; + } + + case 61: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "set_css_data_url_based", + id, + data, + baseURL, + }; + } + + case 63: { + const type = this.readString(); if (type === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "technical_info", + type, + value, + }; + } + + case 64: { + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "custom_issue", + name, + payload, + }; + } + + case 65: { + + return { + tp: "page_close", + + }; + } + + case 67: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const rule = this.readString(); if (rule === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "css_insert_rule_url_based", + id, + rule, + index, + baseURL, + }; + } + + case 69: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const hesitationTime = this.readUint(); if (hesitationTime === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + const selector = this.readString(); if (selector === null) { return resetPointer() } + return { + tp: "mouse_click", + id, + hesitationTime, + label, + selector, + }; + } + + case 70: { + const frameID = this.readUint(); if (frameID === null) { return resetPointer() } + const id = this.readUint(); if (id === null) { return resetPointer() } + return { + tp: "create_i_frame_document", + frameID, + id, + }; + } + + case 90: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const projectID = this.readUint(); if (projectID === null) { return resetPointer() } + const trackerVersion = this.readString(); if (trackerVersion === null) { return resetPointer() } + const revID = this.readString(); if (revID === null) { return resetPointer() } + const userUUID = this.readString(); if (userUUID === null) { return resetPointer() } + const userOS = this.readString(); if (userOS === null) { return resetPointer() } + const userOSVersion = this.readString(); if (userOSVersion === null) { return resetPointer() } + const userDevice = this.readString(); if (userDevice === null) { return resetPointer() } + const userDeviceType = this.readString(); if (userDeviceType === null) { return resetPointer() } + const userCountry = this.readString(); if (userCountry === null) { return resetPointer() } + return { + tp: "ios_session_start", + timestamp, + projectID, + trackerVersion, + revID, + userUUID, + userOS, + userOSVersion, + userDevice, + userDeviceType, + userCountry, + }; + } + + case 93: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "ios_custom_event", + timestamp, + length, + name, + payload, + }; + } + + case 96: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + const width = this.readUint(); if (width === null) { return resetPointer() } + const height = this.readUint(); if (height === null) { return resetPointer() } + return { + tp: "ios_screen_changes", + timestamp, + length, + x, + y, + width, + height, + }; + } + + case 100: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + return { + tp: "ios_click_event", + timestamp, + length, + label, + x, + y, + }; + } + + case 102: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readUint(); if (value === null) { return resetPointer() } + return { + tp: "ios_performance_event", + timestamp, + length, + name, + value, + }; + } + + case 103: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const severity = this.readString(); if (severity === null) { return resetPointer() } + const content = this.readString(); if (content === null) { return resetPointer() } + return { + tp: "ios_log", + timestamp, + length, + severity, + content, + }; + } + + case 105: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const headers = this.readString(); if (headers === null) { return resetPointer() } + const body = this.readString(); if (body === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const success = this.readBoolean(); if (success === null) { return resetPointer() } + const method = this.readString(); if (method === null) { return resetPointer() } + const status = this.readUint(); if (status === null) { return resetPointer() } + return { + tp: "ios_network_call", + timestamp, + length, + duration, + headers, + body, + url, + success, + method, + status, + }; + } + + default: + throw new Error(`Unrecognizable message type: ${ tp }`) + return null; + } + } +} diff --git a/frontend/app/player/MessageDistributor/messages/index.ts b/frontend/app/player/MessageDistributor/messages/index.ts new file mode 100644 index 000000000..2619b58cd --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/index.ts @@ -0,0 +1 @@ +export * from './message' \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/message.ts b/frontend/app/player/MessageDistributor/messages/message.ts new file mode 100644 index 000000000..1c21bbfd2 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/message.ts @@ -0,0 +1,184 @@ +// Auto-generated, do not edit + +import type { Timed } from './timed' +import type { RawMessage } from './raw' +import type { RawBatchMeta, + RawTimestamp, + RawSessionDisconnect, + RawSetPageLocation, + RawSetViewportSize, + RawSetViewportScroll, + RawCreateDocument, + RawCreateElementNode, + RawCreateTextNode, + RawMoveNode, + RawRemoveNode, + RawSetNodeAttribute, + RawRemoveNodeAttribute, + RawSetNodeData, + RawSetCssData, + RawSetNodeScroll, + RawSetInputTarget, + RawSetInputValue, + RawSetInputChecked, + RawMouseMove, + RawConsoleLog, + RawPageLoadTiming, + RawPageRenderTiming, + RawJsException, + RawRawCustomEvent, + RawUserID, + RawUserAnonymousID, + RawMetadata, + RawCssInsertRule, + RawCssDeleteRule, + RawFetch, + RawProfiler, + RawOTable, + RawStateAction, + RawRedux, + RawVuex, + RawMobX, + RawNgRx, + RawGraphQl, + RawPerformanceTrack, + RawResourceTiming, + RawConnectionInformation, + RawSetPageVisibility, + RawLongTask, + RawSetNodeAttributeURLBased, + RawSetCssDataURLBased, + RawTechnicalInfo, + RawCustomIssue, + RawPageClose, + RawCssInsertRuleURLBased, + RawMouseClick, + RawCreateIFrameDocument, + RawIosSessionStart, + RawIosCustomEvent, + RawIosScreenChanges, + RawIosClickEvent, + RawIosPerformanceEvent, + RawIosLog, + RawIosNetworkCall, } from './raw' + +export type Message = RawMessage & Timed + + +export type BatchMeta = RawBatchMeta & Timed + +export type Timestamp = RawTimestamp & Timed + +export type SessionDisconnect = RawSessionDisconnect & Timed + +export type SetPageLocation = RawSetPageLocation & Timed + +export type SetViewportSize = RawSetViewportSize & Timed + +export type SetViewportScroll = RawSetViewportScroll & Timed + +export type CreateDocument = RawCreateDocument & Timed + +export type CreateElementNode = RawCreateElementNode & Timed + +export type CreateTextNode = RawCreateTextNode & Timed + +export type MoveNode = RawMoveNode & Timed + +export type RemoveNode = RawRemoveNode & Timed + +export type SetNodeAttribute = RawSetNodeAttribute & Timed + +export type RemoveNodeAttribute = RawRemoveNodeAttribute & Timed + +export type SetNodeData = RawSetNodeData & Timed + +export type SetCssData = RawSetCssData & Timed + +export type SetNodeScroll = RawSetNodeScroll & Timed + +export type SetInputTarget = RawSetInputTarget & Timed + +export type SetInputValue = RawSetInputValue & Timed + +export type SetInputChecked = RawSetInputChecked & Timed + +export type MouseMove = RawMouseMove & Timed + +export type ConsoleLog = RawConsoleLog & Timed + +export type PageLoadTiming = RawPageLoadTiming & Timed + +export type PageRenderTiming = RawPageRenderTiming & Timed + +export type JsException = RawJsException & Timed + +export type RawCustomEvent = RawRawCustomEvent & Timed + +export type UserID = RawUserID & Timed + +export type UserAnonymousID = RawUserAnonymousID & Timed + +export type Metadata = RawMetadata & Timed + +export type CssInsertRule = RawCssInsertRule & Timed + +export type CssDeleteRule = RawCssDeleteRule & Timed + +export type Fetch = RawFetch & Timed + +export type Profiler = RawProfiler & Timed + +export type OTable = RawOTable & Timed + +export type StateAction = RawStateAction & Timed + +export type Redux = RawRedux & Timed + +export type Vuex = RawVuex & Timed + +export type MobX = RawMobX & Timed + +export type NgRx = RawNgRx & Timed + +export type GraphQl = RawGraphQl & Timed + +export type PerformanceTrack = RawPerformanceTrack & Timed + +export type ResourceTiming = RawResourceTiming & Timed + +export type ConnectionInformation = RawConnectionInformation & Timed + +export type SetPageVisibility = RawSetPageVisibility & Timed + +export type LongTask = RawLongTask & Timed + +export type SetNodeAttributeURLBased = RawSetNodeAttributeURLBased & Timed + +export type SetCssDataURLBased = RawSetCssDataURLBased & Timed + +export type TechnicalInfo = RawTechnicalInfo & Timed + +export type CustomIssue = RawCustomIssue & Timed + +export type PageClose = RawPageClose & Timed + +export type CssInsertRuleURLBased = RawCssInsertRuleURLBased & Timed + +export type MouseClick = RawMouseClick & Timed + +export type CreateIFrameDocument = RawCreateIFrameDocument & Timed + +export type IosSessionStart = RawIosSessionStart & Timed + +export type IosCustomEvent = RawIosCustomEvent & Timed + +export type IosScreenChanges = RawIosScreenChanges & Timed + +export type IosClickEvent = RawIosClickEvent & Timed + +export type IosPerformanceEvent = RawIosPerformanceEvent & Timed + +export type IosLog = RawIosLog & Timed + +export type IosNetworkCall = RawIosNetworkCall & Timed diff --git a/frontend/app/player/MessageDistributor/messages/raw.ts b/frontend/app/player/MessageDistributor/messages/raw.ts new file mode 100644 index 000000000..e86181b2d --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/raw.ts @@ -0,0 +1,491 @@ +// Auto-generated, do not edit + +export const TP_MAP = { + 80: "batch_meta", + 0: "timestamp", + 2: "session_disconnect", + 4: "set_page_location", + 5: "set_viewport_size", + 6: "set_viewport_scroll", + 7: "create_document", + 8: "create_element_node", + 9: "create_text_node", + 10: "move_node", + 11: "remove_node", + 12: "set_node_attribute", + 13: "remove_node_attribute", + 14: "set_node_data", + 15: "set_css_data", + 16: "set_node_scroll", + 17: "set_input_target", + 18: "set_input_value", + 19: "set_input_checked", + 20: "mouse_move", + 22: "console_log", + 23: "page_load_timing", + 24: "page_render_timing", + 25: "js_exception", + 27: "raw_custom_event", + 28: "user_id", + 29: "user_anonymous_id", + 30: "metadata", + 37: "css_insert_rule", + 38: "css_delete_rule", + 39: "fetch", + 40: "profiler", + 41: "o_table", + 42: "state_action", + 44: "redux", + 45: "vuex", + 46: "mob_x", + 47: "ng_rx", + 48: "graph_ql", + 49: "performance_track", + 53: "resource_timing", + 54: "connection_information", + 55: "set_page_visibility", + 59: "long_task", + 60: "set_node_attribute_url_based", + 61: "set_css_data_url_based", + 63: "technical_info", + 64: "custom_issue", + 65: "page_close", + 67: "css_insert_rule_url_based", + 69: "mouse_click", + 70: "create_i_frame_document", + 90: "ios_session_start", + 93: "ios_custom_event", + 96: "ios_screen_changes", + 100: "ios_click_event", + 102: "ios_performance_event", + 103: "ios_log", + 105: "ios_network_call", +} + + +export interface RawBatchMeta { + tp: "batch_meta", + pageNo: number, + firstIndex: number, + timestamp: number, +} + +export interface RawTimestamp { + tp: "timestamp", + timestamp: number, +} + +export interface RawSessionDisconnect { + tp: "session_disconnect", + timestamp: number, +} + +export interface RawSetPageLocation { + tp: "set_page_location", + url: string, + referrer: string, + navigationStart: number, +} + +export interface RawSetViewportSize { + tp: "set_viewport_size", + width: number, + height: number, +} + +export interface RawSetViewportScroll { + tp: "set_viewport_scroll", + x: number, + y: number, +} + +export interface RawCreateDocument { + tp: "create_document", + +} + +export interface RawCreateElementNode { + tp: "create_element_node", + id: number, + parentID: number, + index: number, + tag: string, + svg: boolean, +} + +export interface RawCreateTextNode { + tp: "create_text_node", + id: number, + parentID: number, + index: number, +} + +export interface RawMoveNode { + tp: "move_node", + id: number, + parentID: number, + index: number, +} + +export interface RawRemoveNode { + tp: "remove_node", + id: number, +} + +export interface RawSetNodeAttribute { + tp: "set_node_attribute", + id: number, + name: string, + value: string, +} + +export interface RawRemoveNodeAttribute { + tp: "remove_node_attribute", + id: number, + name: string, +} + +export interface RawSetNodeData { + tp: "set_node_data", + id: number, + data: string, +} + +export interface RawSetCssData { + tp: "set_css_data", + id: number, + data: string, +} + +export interface RawSetNodeScroll { + tp: "set_node_scroll", + id: number, + x: number, + y: number, +} + +export interface RawSetInputTarget { + tp: "set_input_target", + id: number, + label: string, +} + +export interface RawSetInputValue { + tp: "set_input_value", + id: number, + value: string, + mask: number, +} + +export interface RawSetInputChecked { + tp: "set_input_checked", + id: number, + checked: boolean, +} + +export interface RawMouseMove { + tp: "mouse_move", + x: number, + y: number, +} + +export interface RawConsoleLog { + tp: "console_log", + level: string, + value: string, +} + +export interface RawPageLoadTiming { + tp: "page_load_timing", + requestStart: number, + responseStart: number, + responseEnd: number, + domContentLoadedEventStart: number, + domContentLoadedEventEnd: number, + loadEventStart: number, + loadEventEnd: number, + firstPaint: number, + firstContentfulPaint: number, +} + +export interface RawPageRenderTiming { + tp: "page_render_timing", + speedIndex: number, + visuallyComplete: number, + timeToInteractive: number, +} + +export interface RawJsException { + tp: "js_exception", + name: string, + message: string, + payload: string, +} + +export interface RawRawCustomEvent { + tp: "raw_custom_event", + name: string, + payload: string, +} + +export interface RawUserID { + tp: "user_id", + id: string, +} + +export interface RawUserAnonymousID { + tp: "user_anonymous_id", + id: string, +} + +export interface RawMetadata { + tp: "metadata", + key: string, + value: string, +} + +export interface RawCssInsertRule { + tp: "css_insert_rule", + id: number, + rule: string, + index: number, +} + +export interface RawCssDeleteRule { + tp: "css_delete_rule", + id: number, + index: number, +} + +export interface RawFetch { + tp: "fetch", + method: string, + url: string, + request: string, + response: string, + status: number, + timestamp: number, + duration: number, +} + +export interface RawProfiler { + tp: "profiler", + name: string, + duration: number, + args: string, + result: string, +} + +export interface RawOTable { + tp: "o_table", + key: string, + value: string, +} + +export interface RawStateAction { + tp: "state_action", + type: string, +} + +export interface RawRedux { + tp: "redux", + action: string, + state: string, + duration: number, +} + +export interface RawVuex { + tp: "vuex", + mutation: string, + state: string, +} + +export interface RawMobX { + tp: "mob_x", + type: string, + payload: string, +} + +export interface RawNgRx { + tp: "ng_rx", + action: string, + state: string, + duration: number, +} + +export interface RawGraphQl { + tp: "graph_ql", + operationKind: string, + operationName: string, + variables: string, + response: string, +} + +export interface RawPerformanceTrack { + tp: "performance_track", + frames: number, + ticks: number, + totalJSHeapSize: number, + usedJSHeapSize: number, +} + +export interface RawResourceTiming { + tp: "resource_timing", + timestamp: number, + duration: number, + ttfb: number, + headerSize: number, + encodedBodySize: number, + decodedBodySize: number, + url: string, + initiator: string, +} + +export interface RawConnectionInformation { + tp: "connection_information", + downlink: number, + type: string, +} + +export interface RawSetPageVisibility { + tp: "set_page_visibility", + hidden: boolean, +} + +export interface RawLongTask { + tp: "long_task", + timestamp: number, + duration: number, + context: number, + containerType: number, + containerSrc: string, + containerId: string, + containerName: string, +} + +export interface RawSetNodeAttributeURLBased { + tp: "set_node_attribute_url_based", + id: number, + name: string, + value: string, + baseURL: string, +} + +export interface RawSetCssDataURLBased { + tp: "set_css_data_url_based", + id: number, + data: string, + baseURL: string, +} + +export interface RawTechnicalInfo { + tp: "technical_info", + type: string, + value: string, +} + +export interface RawCustomIssue { + tp: "custom_issue", + name: string, + payload: string, +} + +export interface RawPageClose { + tp: "page_close", + +} + +export interface RawCssInsertRuleURLBased { + tp: "css_insert_rule_url_based", + id: number, + rule: string, + index: number, + baseURL: string, +} + +export interface RawMouseClick { + tp: "mouse_click", + id: number, + hesitationTime: number, + label: string, + selector: string, +} + +export interface RawCreateIFrameDocument { + tp: "create_i_frame_document", + frameID: number, + id: number, +} + +export interface RawIosSessionStart { + tp: "ios_session_start", + timestamp: number, + projectID: number, + trackerVersion: string, + revID: string, + userUUID: string, + userOS: string, + userOSVersion: string, + userDevice: string, + userDeviceType: string, + userCountry: string, +} + +export interface RawIosCustomEvent { + tp: "ios_custom_event", + timestamp: number, + length: number, + name: string, + payload: string, +} + +export interface RawIosScreenChanges { + tp: "ios_screen_changes", + timestamp: number, + length: number, + x: number, + y: number, + width: number, + height: number, +} + +export interface RawIosClickEvent { + tp: "ios_click_event", + timestamp: number, + length: number, + label: string, + x: number, + y: number, +} + +export interface RawIosPerformanceEvent { + tp: "ios_performance_event", + timestamp: number, + length: number, + name: string, + value: number, +} + +export interface RawIosLog { + tp: "ios_log", + timestamp: number, + length: number, + severity: string, + content: string, +} + +export interface RawIosNetworkCall { + tp: "ios_network_call", + timestamp: number, + length: number, + duration: number, + headers: string, + body: string, + url: string, + success: boolean, + method: string, + status: number, +} + + +export type RawMessage = RawBatchMeta | RawTimestamp | RawSessionDisconnect | RawSetPageLocation | RawSetViewportSize | RawSetViewportScroll | RawCreateDocument | RawCreateElementNode | RawCreateTextNode | RawMoveNode | RawRemoveNode | RawSetNodeAttribute | RawRemoveNodeAttribute | RawSetNodeData | RawSetCssData | RawSetNodeScroll | RawSetInputTarget | RawSetInputValue | RawSetInputChecked | RawMouseMove | RawConsoleLog | RawPageLoadTiming | RawPageRenderTiming | RawJsException | RawRawCustomEvent | RawUserID | RawUserAnonymousID | RawMetadata | RawCssInsertRule | RawCssDeleteRule | RawFetch | RawProfiler | RawOTable | RawStateAction | RawRedux | RawVuex | RawMobX | RawNgRx | RawGraphQl | RawPerformanceTrack | RawResourceTiming | RawConnectionInformation | RawSetPageVisibility | RawLongTask | RawSetNodeAttributeURLBased | RawSetCssDataURLBased | RawTechnicalInfo | RawCustomIssue | RawPageClose | RawCssInsertRuleURLBased | RawMouseClick | RawCreateIFrameDocument | RawIosSessionStart | RawIosCustomEvent | RawIosScreenChanges | RawIosClickEvent | RawIosPerformanceEvent | RawIosLog | RawIosNetworkCall; diff --git a/frontend/app/player/MessageDistributor/messages/timed.ts b/frontend/app/player/MessageDistributor/messages/timed.ts new file mode 100644 index 000000000..2dd4cc707 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/timed.ts @@ -0,0 +1 @@ +export interface Timed { readonly time: number }; diff --git a/frontend/app/player/MessageDistributor/messages/urlResolve.ts b/frontend/app/player/MessageDistributor/messages/urlResolve.ts new file mode 100644 index 000000000..b80ff4f9a --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/urlResolve.ts @@ -0,0 +1,57 @@ +export function resolveURL(baseURL: string, relURL: string): string { + if (relURL.startsWith('#') || relURL === "") { + return relURL; + } + return new URL(relURL, baseURL).toString(); +} + + +var match = /bar/.exec("foobar"); +const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g +const re2 = /@import "(.*?)"/g +function cssUrlsIndex(css: string): Array<[number, number]> { + const idxs: Array<[number, number]> = []; + const i1 = css.matchAll(re1); + // @ts-ignore + for (let m of i1) { + // @ts-ignore + const s: number = m.index + m[0].indexOf(m[1]); + const e: number = s + m[1].length; + idxs.push([s, e]); + } + const i2 = css.matchAll(re2); + // @ts-ignore + for (let m of i2) { + // @ts-ignore + const s = m.index + m[0].indexOf(m[1]); + const e = s + m[1].length; + idxs.push([s, e]) + } + return idxs; +} +function unquote(str: string): [string, string] { + str = str.trim(); + if (str.length <= 2) { + return [str, ""] + } + if (str[0] == '"' && str[str.length-1] == '"') { + return [ str.substring(1, str.length-1), "\""]; + } + if (str[0] == '\'' && str[str.length-1] == '\'') { + return [ str.substring(1, str.length-1), "'" ]; + } + return [str, ""] +} +function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { + for (let idx of cssUrlsIndex(css)) { + const f = idx[0] + const t = idx[1] + const [ rawurl, q ] = unquote(css.substring(f, t)); + css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); + } + return css +} + +export function resolveCSS(baseURL: string, css: string): string { + return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); +} \ No newline at end of file diff --git a/frontend/app/player/ios/Parser.ts b/frontend/app/player/ios/Parser.ts index f202e9306..15b750df6 100644 --- a/frontend/app/player/ios/Parser.ts +++ b/frontend/app/player/ios/Parser.ts @@ -1,12 +1,11 @@ -import readMessage from '../MessageDistributor/messages'; -import PrimitiveReader from '../MessageDistributor/PrimitiveReader'; +import RawMessageReader from '../MessageDistributor/messages/RawMessageReader'; export default class Parser { - private reader: PrimitiveReader + private reader: RawMessageReader private error: boolean = false constructor(byteArray) { - this.reader = new PrimitiveReader(byteArray) + this.reader = new RawMessageReader(byteArray) } parseEach(cb) { @@ -19,12 +18,12 @@ export default class Parser { } hasNext() { - return !this.error && this.reader.hasNext(); + return !this.error && this.reader.hasNextByte(); } next() { try { - return readMessage(this.reader) + return this.reader.readMessage() } catch(e) { console.warn(e) this.error = true diff --git a/frontend/app/player/singletone.js b/frontend/app/player/singletone.js index 619f9b02b..9d811023e 100644 --- a/frontend/app/player/singletone.js +++ b/frontend/app/player/singletone.js @@ -28,11 +28,11 @@ document.addEventListener("visibilitychange", function() { } }); -export function init(session, jwt, config) { - const live = session.live; +export function init(session, jwt, config, live = false) { + // const live = session.live; const endTime = !live && session.duration.valueOf(); - instance = new Player(session, jwt, config); + instance = new Player(session, jwt, config, live); update({ initialized: true, live, diff --git a/frontend/app/svg/icons/os/fedora.svg b/frontend/app/svg/icons/os/fedora.svg new file mode 100644 index 000000000..86e9f115e --- /dev/null +++ b/frontend/app/svg/icons/os/fedora.svg @@ -0,0 +1,5 @@ + + + Fedora logo (2021) + + diff --git a/frontend/app/types/session/session.js b/frontend/app/types/session/session.js index 1fabc79a6..61ca9e489 100644 --- a/frontend/app/types/session/session.js +++ b/frontend/app/types/session/session.js @@ -88,6 +88,7 @@ export default Record({ ...session }) => { const duration = Duration.fromMillis(session.duration < 1000 ? 1000 : session.duration); + const durationSeconds = duration.valueOf(); const startedAt = +startTs; const userDevice = session.userDevice || session.userDeviceType || 'Other'; @@ -96,7 +97,7 @@ export default Record({ const events = List(session.events) .map(e => SessionEvent({ ...e, time: e.timestamp - startedAt })) - .filter(({ type }) => type !== TYPES.CONSOLE); + .filter(({ type, time }) => type !== TYPES.CONSOLE && time <= durationSeconds); let resources = List(session.resources) .map(Resource); diff --git a/frontend/build.sh b/frontend/build.sh index 010b69a34..7b656bc8f 100644 --- a/frontend/build.sh +++ b/frontend/build.sh @@ -18,11 +18,6 @@ check_prereq() { } function build(){ - # Copy enterprise code - [[ $1 == "ee" ]] && { - cp ../ee/frontend/* ./ - ee="true" - } # Run docker as the same user, else we'll run in to permission issues. docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "npm install && npm run build:oss" } diff --git a/frontend/env.js b/frontend/env.js index 549d5ec07..85984b376 100644 --- a/frontend/env.js +++ b/frontend/env.js @@ -13,7 +13,7 @@ const oss = { ORIGIN: () => 'window.location.origin', API_EDP: "https://foss.openreplay.com/api", ASSETS_HOST: () => 'window.location.origin + "/assets"', - VERSION: '1.3.6', + VERSION: '1.4.0', SOURCEMAP: true, MINIO_ENDPOINT: process.env.MINIO_ENDPOINT, MINIO_PORT: process.env.MINIO_PORT, @@ -21,7 +21,7 @@ const oss = { MINIO_ACCESS_KEY: process.env.MINIO_ACCESS_KEY, MINIO_SECRET_KEY: process.env.MINIO_SECRET_KEY, ICE_SERVERS: process.env.ICE_SERVERS, - TRACKER_VERSION: '3.4.16', // trackerInfo.version, + TRACKER_VERSION: '3.4.17', // trackerInfo.version, } module.exports = { diff --git a/scripts/README.md b/scripts/README.md deleted file mode 100644 index be8b97b11..000000000 --- a/scripts/README.md +++ /dev/null @@ -1,33 +0,0 @@ -### Installing OpenReplay on any VM (Debian based, preferably Ubuntu 20.04) - -You can start testing OpenReplay by installing it on any VM (at least `2 vCPUs, 8 GB of RAM and 50 GB of storage`). We'll initialize a single node kubernetes cluster with [k3s](https://k3s.io) and install OpenReplay on the cluster. - -```bash -cd helm && bash install.sh -``` - -### Installing OpenReplay on Kubernetes - -OpenReplay runs 100% on kubernetes. So if you've got a kubernetes cluster, preferably, a cluster dedicated to OpenReplay (on a single node of `4 vCPUs, 8 GB of RAM and 50 GB of storage`). You can run the script, which internally uses helm to install OpenReplay. - -We hope your cluster has provision to create a [service type](https://kubernetes.io/docs/concepts/services-networking/service/#loadbalancer) `LoadBalancer` for exposing OpenReplay on the internet. - -```bash -cd helm && bash kube-install.sh -``` - -### OpenReplay CLI - -The CLI is helpful for managing basic aspects of your OpenReplay instance, things such as restarting or reinstalling a service, accessing a component's logs or simply checking the status of your backend services. Below the list of covered operations: - - - status: status of the running services - - logs: logs of a specific service - - stop: stop one or all services - - start: start one or all services - - restart: restart one or all services - - For more information: - - ```bash - cd helm && openreplay-cli -h - ``` diff --git a/scripts/helm/README.md b/scripts/helm/README.md deleted file mode 100644 index 2ac1a3556..000000000 --- a/scripts/helm/README.md +++ /dev/null @@ -1,47 +0,0 @@ -## Helm charts for installing OpenReplay components - -Installation components are separated by namespaces. - -**Namespace:** - -- **app:** Core OpenReplay application related components. - - alerts - - utilities - - assets - - chalice - - ender - - sink - - storage - - http - - integrations - - db - -- **db:** Contains following databases and backend components. - - kafka (ee) - - redis - - postgresql - - clickhouse (ee) - - minio - - nfs-server - -- **longhorn:** Storage solution for kubernetes PVs. - -- **nginx-ingress:** Nginx ingress for internet traffic to enter the kubernetes cluster. - -**Scripts:** -- **install.sh** - - Installs OpenReplay in a single node machine. - - This script is a wrapper around the `install.sh` with [k3s](https://k3s.io/) as kubernetes distro. - - Note: As of now this script support only Ubuntu, as we've to install some packages to enable `NFS`. - -- **kube-install.sh:** - - Installs OpenReplay on any given kubernetes cluster. Has 3 configuration types: - - small (2cores 8G RAM) - - medium (4cores 16G RAM) - - recommended (8cores 32G RAM) - - For all options, `bash kube-install.sh -h` diff --git a/scripts/helm/app/alerts.yaml b/scripts/helm/app/alerts.yaml index f992a7cee..59e6bc18b 100644 --- a/scripts/helm/app/alerts.yaml +++ b/scripts/helm/app/alerts.yaml @@ -22,7 +22,25 @@ resources: memory: 1Mi env: - ALERT_NOTIFICATION_STRING: http://chalice-openreplay.app.svc.cluster.local:8000/alerts/notifications - CLICKHOUSE_STRING: tcp://clickhouse.db.svc.cluster.local:9000/default - POSTGRES_STRING: postgres://postgres:asayerPostgres@postgresql.db.svc.cluster.local:5432 + pg_host: postgresql.db.svc.cluster.local + pg_port: 5432 + pg_dbname: postgres + pg_user: postgres + pg_password: asayerPostgres + EMAIL_HOST: '' + EMAIL_PORT: '587' + EMAIL_USER: '' + EMAIL_PASSWORD: '' + EMAIL_USE_TLS: 'true' + EMAIL_USE_SSL: 'false' + EMAIL_SSL_KEY: '' + EMAIL_SSL_CERT: '' + EMAIL_FROM: OpenReplay + SITE_URL: '' + S3_HOST: 'http://minio.db.svc.cluster.local:9000' + S3_KEY: minios3AccessKeyS3cr3t + S3_SECRET: m1n10s3CretK3yPassw0rd + AWS_DEFAULT_REGION: us-east-1 LICENSE_KEY: "" + PYTHONUNBUFFERED: '0' + version_number: '1.3.6' diff --git a/scripts/helm/app/chalice.yaml b/scripts/helm/app/chalice.yaml index fcbea8ed6..2d6b53ead 100644 --- a/scripts/helm/app/chalice.yaml +++ b/scripts/helm/app/chalice.yaml @@ -64,5 +64,6 @@ env: idp_x509cert: '' idp_sls_url: '' idp_name: '' + idp_tenantKey: '' assist_secret: '' iceServers: '' diff --git a/scripts/helm/db/bucket_policy.sh b/scripts/helm/db/bucket_policy.sh index 67e6adb77..65ea068e5 100644 --- a/scripts/helm/db/bucket_policy.sh +++ b/scripts/helm/db/bucket_policy.sh @@ -1,5 +1,5 @@ #!/bin/bash -buckets=("mobs" "sessions-assets" "static" "sourcemaps") +buckets=("mobs" "sessions-assets" "static" "sourcemaps" "sessions-mobile-assets") mc alias set minio http://localhost:9000 $1 $2 diff --git a/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql b/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql new file mode 100644 index 000000000..2f021d1ee --- /dev/null +++ b/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql @@ -0,0 +1,89 @@ +BEGIN; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0' +$$ LANGUAGE sql IMMUTABLE; + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); + +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); + +CREATE INDEX IF NOT EXISTS errors_timestamp_idx ON events.errors (timestamp); +CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key); + +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_medium text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_campaign text NULL DEFAULT NULL; + +CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); +CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; + + +DROP INDEX IF EXISTS sessions_project_id_user_browser_idx1; +DROP INDEX IF EXISTS sessions_project_id_user_country_idx1; +ALTER INDEX IF EXISTS platform_idx RENAME TO sessions_platform_idx; +ALTER INDEX IF EXISTS events.resources_duration_idx RENAME TO resources_duration_durationgt0_idx; +DROP INDEX IF EXISTS projects_project_key_idx1; +CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id); + +CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id); +CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp); +CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); +CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; +CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; + +CREATE TABLE IF NOT EXISTS metrics +( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); +CREATE TABLE IF NOT EXISTS metric_series +( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id); + +CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id); + + +CREATE TABLE IF NOT EXISTS searches +( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False +); + +CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public); +CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id); +CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id); + +ALTER TABLE alerts + ADD COLUMN IF NOT EXISTS series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE; + +CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id); +UPDATE alerts +SET options=jsonb_set(options, '{change}', '"change"') +WHERE detection_method = 'change' + AND options -> 'change' ISNULL; +COMMIT; \ No newline at end of file diff --git a/scripts/helm/db/init_dbs/postgresql/init_schema.sql b/scripts/helm/db/init_dbs/postgresql/init_schema.sql index 80b2a9135..4607c2759 100644 --- a/scripts/helm/db/init_dbs/postgresql/init_schema.sql +++ b/scripts/helm/db/init_dbs/postgresql/init_schema.sql @@ -3,6 +3,12 @@ BEGIN; CREATE SCHEMA IF NOT EXISTS events_common; CREATE SCHEMA IF NOT EXISTS events; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0' +$$ LANGUAGE sql IMMUTABLE; + -- --- accounts.sql --- CREATE OR REPLACE FUNCTION generate_api_key(length integer) RETURNS text AS @@ -108,7 +114,7 @@ $$ CREATE EXTENSION IF NOT EXISTS pgcrypto; -- --- accounts.sql --- - CREATE TABLE IF NOT EXISTS public.tenants + CREATE TABLE tenants ( tenant_id integer NOT NULL DEFAULT 1, user_id text NOT NULL DEFAULT generate_api_key(20), @@ -256,38 +262,13 @@ $$ }'::jsonb -- ?????? ); + CREATE INDEX projects_project_key_idx ON public.projects (project_key); CREATE TRIGGER on_insert_or_update AFTER INSERT OR UPDATE ON projects FOR EACH ROW EXECUTE PROCEDURE notify_project(); --- --- alerts.sql --- - - CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); - - CREATE TABLE alerts - ( - alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, - name text NOT NULL, - description text NULL DEFAULT NULL, - active boolean NOT NULL DEFAULT TRUE, - detection_method alert_detection_method NOT NULL, - query jsonb NOT NULL, - deleted_at timestamp NULL DEFAULT NULL, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - options jsonb NOT NULL DEFAULT '{ - "renotifyInterval": 1440 - }'::jsonb - ); - - - CREATE TRIGGER on_insert_or_update_or_delete - AFTER INSERT OR UPDATE OR DELETE - ON alerts - FOR EACH ROW - EXECUTE PROCEDURE notify_alert(); -- --- webhooks.sql --- @@ -347,7 +328,8 @@ $$ is_public boolean NOT NULL DEFAULT False ); - CREATE INDEX ON public.funnels (user_id, is_public); + CREATE INDEX funnels_user_id_is_public_idx ON public.funnels (user_id, is_public); + CREATE INDEX funnels_project_id_idx ON public.funnels (project_id); -- --- announcements.sql --- @@ -431,7 +413,7 @@ $$ context_string text NOT NULL, context jsonb DEFAULT NULL ); - CREATE INDEX ON issues (issue_id, type); + CREATE INDEX issues_issue_id_type_idx ON issues (issue_id, type); CREATE INDEX issues_context_string_gin_idx ON public.issues USING GIN (context_string gin_trgm_ops); CREATE INDEX issues_project_id_idx ON issues (project_id); @@ -452,7 +434,7 @@ $$ stacktrace jsonb, --to save the stacktrace and not query S3 another time stacktrace_parsed_at timestamp ); - CREATE INDEX ON errors (project_id, source); + CREATE INDEX errors_project_id_source_idx ON errors (project_id, source); CREATE INDEX errors_message_gin_idx ON public.errors USING GIN (message gin_trgm_ops); CREATE INDEX errors_name_gin_idx ON public.errors USING GIN (name gin_trgm_ops); CREATE INDEX errors_project_id_idx ON public.errors (project_id); @@ -461,6 +443,7 @@ $$ CREATE INDEX errors_project_id_error_id_idx ON public.errors (project_id, error_id); CREATE INDEX errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; CREATE INDEX errors_error_id_idx ON errors (error_id); + CREATE INDEX errors_parent_error_id_idx ON errors (parent_error_id); CREATE TABLE user_favorite_errors ( @@ -513,6 +496,9 @@ $$ watchdogs_score bigint NOT NULL DEFAULT 0, issue_score bigint NOT NULL DEFAULT 0, issue_types issue_type[] NOT NULL DEFAULT '{}'::issue_type[], + utm_source text NULL DEFAULT NULL, + utm_medium text NULL DEFAULT NULL, + utm_campaign text NULL DEFAULT NULL, metadata_1 text DEFAULT NULL, metadata_2 text DEFAULT NULL, metadata_3 text DEFAULT NULL, @@ -523,28 +509,25 @@ $$ metadata_8 text DEFAULT NULL, metadata_9 text DEFAULT NULL, metadata_10 text DEFAULT NULL --- , --- rehydration_id integer REFERENCES rehydrations(rehydration_id) ON DELETE SET NULL ); - CREATE INDEX ON sessions (project_id, start_ts); - CREATE INDEX ON sessions (project_id, user_id); - CREATE INDEX ON sessions (project_id, user_anonymous_id); - CREATE INDEX ON sessions (project_id, user_device); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); - CREATE INDEX ON sessions (project_id, metadata_1); - CREATE INDEX ON sessions (project_id, metadata_2); - CREATE INDEX ON sessions (project_id, metadata_3); - CREATE INDEX ON sessions (project_id, metadata_4); - CREATE INDEX ON sessions (project_id, metadata_5); - CREATE INDEX ON sessions (project_id, metadata_6); - CREATE INDEX ON sessions (project_id, metadata_7); - CREATE INDEX ON sessions (project_id, metadata_8); - CREATE INDEX ON sessions (project_id, metadata_9); - CREATE INDEX ON sessions (project_id, metadata_10); --- CREATE INDEX ON sessions (rehydration_id); - CREATE INDEX ON sessions (project_id, watchdogs_score DESC); - CREATE INDEX platform_idx ON public.sessions (platform); + CREATE INDEX sessions_project_id_start_ts_idx ON sessions (project_id, start_ts); + CREATE INDEX sessions_project_id_user_id_idx ON sessions (project_id, user_id); + CREATE INDEX sessions_project_id_user_anonymous_id_idx ON sessions (project_id, user_anonymous_id); + CREATE INDEX sessions_project_id_user_device_idx ON sessions (project_id, user_device); + CREATE INDEX sessions_project_id_user_country_idx ON sessions (project_id, user_country); + CREATE INDEX sessions_project_id_user_browser_idx ON sessions (project_id, user_browser); + CREATE INDEX sessions_project_id_metadata_1_idx ON sessions (project_id, metadata_1); + CREATE INDEX sessions_project_id_metadata_2_idx ON sessions (project_id, metadata_2); + CREATE INDEX sessions_project_id_metadata_3_idx ON sessions (project_id, metadata_3); + CREATE INDEX sessions_project_id_metadata_4_idx ON sessions (project_id, metadata_4); + CREATE INDEX sessions_project_id_metadata_5_idx ON sessions (project_id, metadata_5); + CREATE INDEX sessions_project_id_metadata_6_idx ON sessions (project_id, metadata_6); + CREATE INDEX sessions_project_id_metadata_7_idx ON sessions (project_id, metadata_7); + CREATE INDEX sessions_project_id_metadata_8_idx ON sessions (project_id, metadata_8); + CREATE INDEX sessions_project_id_metadata_9_idx ON sessions (project_id, metadata_9); + CREATE INDEX sessions_project_id_metadata_10_idx ON sessions (project_id, metadata_10); + CREATE INDEX sessions_project_id_watchdogs_score_idx ON sessions (project_id, watchdogs_score DESC); + CREATE INDEX sessions_platform_idx ON public.sessions (platform); CREATE INDEX sessions_metadata1_gin_idx ON public.sessions USING GIN (metadata_1 gin_trgm_ops); CREATE INDEX sessions_metadata2_gin_idx ON public.sessions USING GIN (metadata_2 gin_trgm_ops); @@ -562,14 +545,15 @@ $$ CREATE INDEX sessions_user_id_gin_idx ON public.sessions USING GIN (user_id gin_trgm_ops); CREATE INDEX sessions_user_anonymous_id_gin_idx ON public.sessions USING GIN (user_anonymous_id gin_trgm_ops); CREATE INDEX sessions_user_country_gin_idx ON public.sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); CREATE INDEX sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; CREATE INDEX sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; CREATE INDEX sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; CREATE INDEX sessions_session_id_project_id_start_ts_durationNN_idx ON sessions (session_id, project_id, start_ts) WHERE duration IS NOT NULL; CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL; CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0; + CREATE INDEX sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); + CREATE INDEX sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); + CREATE INDEX sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); ALTER TABLE public.sessions ADD CONSTRAINT web_browser_constraint CHECK ( @@ -598,7 +582,7 @@ $$ session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, PRIMARY KEY (user_id, session_id) ); - + CREATE INDEX user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); -- --- assignments.sql --- @@ -611,7 +595,7 @@ $$ created_at timestamp default timezone('utc'::text, now()) NOT NULL, provider_data jsonb default '{}'::jsonb NOT NULL ); - CREATE INDEX ON assigned_sessions (session_id); + CREATE INDEX assigned_sessions_session_id_idx ON assigned_sessions (session_id); -- --- events_common.sql --- @@ -629,9 +613,9 @@ $$ level events_common.custom_level NOT NULL DEFAULT 'info', PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.customs (name); + CREATE INDEX customs_name_idx ON events_common.customs (name); CREATE INDEX customs_name_gin_idx ON events_common.customs USING GIN (name gin_trgm_ops); - CREATE INDEX ON events_common.customs (timestamp); + CREATE INDEX customs_timestamp_idx ON events_common.customs (timestamp); CREATE TABLE events_common.issues @@ -657,10 +641,10 @@ $$ success boolean NOT NULL, PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.requests (url); - CREATE INDEX ON events_common.requests (duration); + CREATE INDEX requests_url_idx ON events_common.requests (url); + CREATE INDEX requests_duration_idx ON events_common.requests (duration); CREATE INDEX requests_url_gin_idx ON events_common.requests USING GIN (url gin_trgm_ops); - CREATE INDEX ON events_common.requests (timestamp); + CREATE INDEX requests_timestamp_idx ON events_common.requests (timestamp); CREATE INDEX requests_url_gin_idx2 ON events_common.requests USING GIN (RIGHT(url, length(url) - (CASE WHEN url LIKE 'http://%' THEN 7 @@ -668,7 +652,7 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); - + CREATE INDEX requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; -- --- events.sql --- CREATE SCHEMA IF NOT EXISTS events; @@ -695,10 +679,11 @@ $$ ttfb integer DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.pages (session_id); + CREATE INDEX pages_session_id_idx ON events.pages (session_id); CREATE INDEX pages_base_path_gin_idx ON events.pages USING GIN (base_path gin_trgm_ops); CREATE INDEX pages_base_referrer_gin_idx ON events.pages USING GIN (base_referrer gin_trgm_ops); - CREATE INDEX ON events.pages (timestamp); + CREATE INDEX pages_timestamp_idx ON events.pages (timestamp); + CREATE INDEX pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); CREATE INDEX pages_base_path_gin_idx2 ON events.pages USING GIN (RIGHT(base_path, length(base_path) - 1) gin_trgm_ops); CREATE INDEX pages_base_path_idx ON events.pages (base_path); CREATE INDEX pages_base_path_idx2 ON events.pages (RIGHT(base_path, length(base_path) - 1)); @@ -711,8 +696,8 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); - CREATE INDEX ON events.pages (response_time); - CREATE INDEX ON events.pages (response_end); + CREATE INDEX pages_response_time_idx ON events.pages (response_time); + CREATE INDEX pages_response_end_idx ON events.pages (response_end); CREATE INDEX pages_path_gin_idx ON events.pages USING GIN (path gin_trgm_ops); CREATE INDEX pages_path_idx ON events.pages (path); CREATE INDEX pages_visually_complete_idx ON events.pages (visually_complete) WHERE visually_complete > 0; @@ -746,10 +731,10 @@ $$ selector text DEFAULT '' NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.clicks (session_id); - CREATE INDEX ON events.clicks (label); + CREATE INDEX clicks_session_id_idx ON events.clicks (session_id); + CREATE INDEX clicks_label_idx ON events.clicks (label); CREATE INDEX clicks_label_gin_idx ON events.clicks USING GIN (label gin_trgm_ops); - CREATE INDEX ON events.clicks (timestamp); + CREATE INDEX clicks_timestamp_idx ON events.clicks (timestamp); CREATE INDEX clicks_label_session_id_timestamp_idx ON events.clicks (label, session_id, timestamp); CREATE INDEX clicks_url_idx ON events.clicks (url); CREATE INDEX clicks_url_gin_idx ON events.clicks USING GIN (url gin_trgm_ops); @@ -766,11 +751,11 @@ $$ value text DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.inputs (session_id); - CREATE INDEX ON events.inputs (label, value); + CREATE INDEX inputs_session_id_idx ON events.inputs (session_id); + CREATE INDEX inputs_label_value_idx ON events.inputs (label, value); CREATE INDEX inputs_label_gin_idx ON events.inputs USING GIN (label gin_trgm_ops); CREATE INDEX inputs_label_idx ON events.inputs (label); - CREATE INDEX ON events.inputs (timestamp); + CREATE INDEX inputs_timestamp_idx ON events.inputs (timestamp); CREATE INDEX inputs_label_session_id_timestamp_idx ON events.inputs (label, session_id, timestamp); CREATE TABLE events.errors @@ -781,7 +766,8 @@ $$ error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.errors (session_id); + CREATE INDEX errors_session_id_idx ON events.errors (session_id); + CREATE INDEX errors_timestamp_idx ON events.errors (timestamp); CREATE INDEX errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); CREATE INDEX errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); CREATE INDEX errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); @@ -796,9 +782,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.graphql (name); + CREATE INDEX graphql_name_idx ON events.graphql (name); CREATE INDEX graphql_name_gin_idx ON events.graphql USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.graphql (timestamp); + CREATE INDEX graphql_timestamp_idx ON events.graphql (timestamp); CREATE TABLE events.state_actions ( @@ -808,9 +794,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.state_actions (name); + CREATE INDEX state_actions_name_idx ON events.state_actions (name); CREATE INDEX state_actions_name_gin_idx ON events.state_actions USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.state_actions (timestamp); + CREATE INDEX state_actions_timestamp_idx ON events.state_actions (timestamp); CREATE TYPE events.resource_type AS ENUM ('other', 'script', 'stylesheet', 'fetch', 'img', 'media'); CREATE TYPE events.resource_method AS ENUM ('GET' , 'HEAD' , 'POST' , 'PUT' , 'DELETE' , 'CONNECT' , 'OPTIONS' , 'TRACE' , 'PATCH' ); @@ -833,11 +819,13 @@ $$ decoded_body_size integer NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.resources (session_id); - CREATE INDEX ON events.resources (status); - CREATE INDEX ON events.resources (type); - CREATE INDEX ON events.resources (duration) WHERE duration > 0; - CREATE INDEX ON events.resources (url_host); + CREATE INDEX resources_session_id_idx ON events.resources (session_id); + CREATE INDEX resources_status_idx ON events.resources (status); + CREATE INDEX resources_type_idx ON events.resources (type); + CREATE INDEX resources_duration_durationgt0_idx ON events.resources (duration) WHERE duration > 0; + CREATE INDEX resources_url_host_idx ON events.resources (url_host); + CREATE INDEX resources_timestamp_idx ON events.resources (timestamp); + CREATE INDEX resources_success_idx ON events.resources (success); CREATE INDEX resources_url_gin_idx ON events.resources USING GIN (url gin_trgm_ops); CREATE INDEX resources_url_idx ON events.resources (url); @@ -871,6 +859,11 @@ $$ max_used_js_heap_size bigint NOT NULL, PRIMARY KEY (session_id, message_id) ); + CREATE INDEX performance_session_id_idx ON events.performance (session_id); + CREATE INDEX performance_timestamp_idx ON events.performance (timestamp); + CREATE INDEX performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); + CREATE INDEX performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; + CREATE INDEX performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; -- --- autocomplete.sql --- @@ -903,10 +896,74 @@ $$ start_at timestamp NOT NULL, errors text NULL ); - CREATE INDEX ON jobs (status); - CREATE INDEX ON jobs (start_at); + CREATE INDEX jobs_status_idx ON jobs (status); + CREATE INDEX jobs_start_at_idx ON jobs (start_at); CREATE INDEX jobs_project_id_idx ON jobs (project_id); + CREATE TABLE metrics + ( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp + ); + CREATE INDEX metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); + CREATE TABLE metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + CREATE INDEX metric_series_metric_id_idx ON public.metric_series (metric_id); + + + CREATE TABLE searches + ( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + + CREATE INDEX searches_user_id_is_public_idx ON public.searches (user_id, is_public); + CREATE INDEX searches_project_id_idx ON public.searches (project_id); + + CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); + + CREATE TABLE alerts + ( + alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE, + name text NOT NULL, + description text NULL DEFAULT NULL, + active boolean NOT NULL DEFAULT TRUE, + detection_method alert_detection_method NOT NULL, + query jsonb NOT NULL, + deleted_at timestamp NULL DEFAULT NULL, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + options jsonb NOT NULL DEFAULT '{ + "renotifyInterval": 1440 + }'::jsonb + ); + CREATE INDEX alerts_project_id_idx ON alerts (project_id); + CREATE INDEX alerts_series_id_idx ON alerts (series_id); + CREATE TRIGGER on_insert_or_update_or_delete + AFTER INSERT OR UPDATE OR DELETE + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE notify_alert(); raise notice 'DB created'; END IF; diff --git a/scripts/helm/kube-install.sh b/scripts/helm/kube-install.sh index 0a42416bf..056a45de4 100755 --- a/scripts/helm/kube-install.sh +++ b/scripts/helm/kube-install.sh @@ -147,7 +147,7 @@ function enterprise(){ enterprise=1 sed -i "s#enterprise_edition_license.*#enterprise_edition_license: \"${1}\"#g" vars.yaml # Updating image version to be ee - sed -i "s/\(image_tag.*[0-9]\)\"$/\1-ee\"/" vars.yaml + sed 's/\(image_tag.*[0-9]\)\(-pr\)\?"$/\1\2-ee"/' vars.yaml echo "Importing enterprise code..." cp -rf ../../ee/scripts/* ../ } diff --git a/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml b/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml index bf41a28c2..ea59aa82d 100644 --- a/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml +++ b/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml @@ -62,7 +62,7 @@ data: proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Proto $origin_proto; proxy_pass http://chalice-openreplay.app.svc.cluster.local:8000; } location /assist/ { @@ -133,6 +133,10 @@ data: default upgrade; '' close; } + map $http_x_forwarded_proto $origin_proto { + default $http_x_forwarded_proto; + '' $scheme; + } server { listen 80 default_server; listen [::]:80 default_server; diff --git a/scripts/helm/openreplay-cli b/scripts/helm/openreplay-cli index 93478e2d6..b9db53b6c 100755 --- a/scripts/helm/openreplay-cli +++ b/scripts/helm/openreplay-cli @@ -51,7 +51,8 @@ EOF [ -d | --status ] [ -v | --verbose ] [ -l | --logs SERVICE ] - [ -i | --install SERVICE ] + [ -i | --legacy-install SERVICE ] + [ -I | --helm-install SERVICE ] [ -s | --stop SERVICE|all ] [ -S | --start SERVICE|all ] [ -r | --restart SERVICE|all ]" @@ -103,10 +104,14 @@ restart() { kubectl rollout restart -n app deployment $1-openreplay } -install() { +legacyInstall() { bash kube-install.sh --app $1 } +helmInstall() { + helm upgrade --install openreplay -n app openreplay -f vars.yaml +} + upgrade() { sed -i "s/tag:.*/ tag: 'latest'/g" ./app/$1.yaml } @@ -122,7 +127,7 @@ status() { [[ $# -eq 0 ]] && usage && exit 1 -PARSED_ARGUMENTS=$(color getopt -a -n openreplay-cli -o vhds:S:l:r:i: --long verbose,help,status,start:,stop:,logs:,restart:,install: -- "$@") +PARSED_ARGUMENTS=$(color getopt -a -n openreplay-cli -o vhds:S:l:r:i:I --long verbose,help,status,start:,stop:,logs:,restart:,legacy-install:,helm-install -- "$@") VALID_ARGUMENTS=$? if [[ "$VALID_ARGUMENTS" != "0" ]]; then usage @@ -135,11 +140,12 @@ do -v | --verbose) VERBOSE=1 ; shift ;; -h | --help) usage ; shift ;; -d | --status) status ; shift ;; + -I | --helm-install) helmInstall; shift ;; -s | --stop) stop $2 ; shift 2 ;; -S | --start) start $2 ; shift 2 ;; -l | --logs) logs "$2" ; shift 2 ;; -r | --restart) restart "$2" ; shift 2 ;; - -i | --install) install "$2" ; shift 2 ;; + -i | --legacy-install) legacyInstall "$2" ; shift 2 ;; # -- means the end of the arguments; drop this, and break out of the while loop --) shift; break ;; # If invalid options were passed, then getopt should have reported an error, diff --git a/scripts/helm/roles/openreplay/asayer.local.crt b/scripts/helm/roles/openreplay/asayer.local.crt deleted file mode 100644 index 49462c11e..000000000 --- a/scripts/helm/roles/openreplay/asayer.local.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFGTCCAwGgAwIBAgIUH3ILXUbpzJ8NxM1al5AIq1lcR10wDQYJKoZIhvcNAQEL -BQAwHDEaMBgGA1UEAwwRYXNheWVyLmxvY2FsLmhvc3QwHhcNMjEwNDA5MTAwNjUx -WhcNMjIwNDA5MTAwNjUxWjAcMRowGAYDVQQDDBFhc2F5ZXIubG9jYWwuaG9zdDCC -AiIwDQYJKoZIhvcNAQEBBQADggIPADCCAgoCggIBALH673udMEJHrhTm5LdE1/Wy -Q8cD/RTxRwGei3rP4AmWEZrbD7DpgT+DDM5U3JgH7MN7PwDN3taa4cOErJNJzn6l -YQyips/xVMokVi9pcvXdLRElc8xLbF2u/O1JhefsLUFyYO83U7vyFmNy1ARN+SGU -SMMPa0BC9H1g2KsOKS7R5JAJtiNc5Z8bA7NdvZKYeG5AC+9q5mvAQ21CCm0lK2KA -P/txMVd4+Pu6uywqKxcmzLFtHM9AVr2IkobopxzocPdJh1ypqaTIWWvYqo4Iylv9 -DqEwlceRZUd5rOGcmeekptNAqy57U47ohnD8Lf7ZvkMNn3aBUxhI6nl6ZFcZAi23 -BIkYnhT4cQPloih2Z9bnr4HHdHofGAGrBbv0Oqgbxfu5GgCKM11LM10YKgSOEw8I -TG+1E08MVDt4Gxe4NfDDJC9UpkXAb+6b7vuZm4dNbDPRRz6JtUh+n0H+dmBNA+1x -P3FeXO206VuTWlj5jcndKKY0lvcb10i5StYWA1AVxR7TSR6K6lVFkMPWMCY5bNrY -BEzA8A3rCVe5UVdQvcBrovftafD67cSxC5Gz04Xc7H+M2hUBpk/AooTOxunHDcFz -yuvbVQW6L6aM1YOytup2tTOILYcKaPTiUf4J70ypUhlPaJ1qUxK1hDV+gHbSEUlx -KGoJAlg+KGXTt1vzPUphAgMBAAGjUzBRMB0GA1UdDgQWBBRihXjPkaSNtnQW8o1j -bXztF0iK3jAfBgNVHSMEGDAWgBRihXjPkaSNtnQW8o1jbXztF0iK3jAPBgNVHRMB -Af8EBTADAQH/MA0GCSqGSIb3DQEBCwUAA4ICAQAS1JO5tx7AKY2wh/gP6B160ETP -m3P/QWPWkLpAOIPZnk2aFC3AJvNnqfzeq4LHL19xbJ19SZJpuqi/E2DqPCOLmFwT -mTbfjZEmmYkmYqggd20v8TsOgxwF8Uz5oIJwSxPSJnFPFDVsbZrHR9OwPdVgjzr+ -helqNuw9gqyKxq+2sXKxGdMnoNlSfCJXse+3cgIsh6AXmG/XJQ4AldLSQkZvyddX -MVAImYv7oCynVJ4tsLseqmi4fzOj2Gkk8n5JFkvtmXi6e1F6Kgj+7CzTTB+tQiNu -gWDyySK4P9H7ASRcVAoCdwkFjiT9Yd3D8AcykeuP2TW+22PedL2eqEnhFrYxSe7x -jeZzPWm1Rkn8hhzXjm8ks2Mc9m4S+ncCnKKIVRHqmDmhyXZG/6gbwS/JCO9u2qiJ -9xzgNsiRS5VEgQfZy7SIaY8armCKNsUDp8ZxvWpextbBJzEVebLCF/MeMRTMTOMg -P02CHraJDXA64KBrmq1Y394itMxhqpZySrW0Fa7sLXkiLyQY8T0xX08JlUmerRls -msGM58FwZJ8kJ8HXxMs//Gt8S3OtiGjO7Ro/yduTKKjobBTDGWbjVKMzuz6Ly6yD -7Z7cHebqJvQODyFYsswVNV7pVfoj4HdhEybKUlJzGjDkk99wktBExsTD1TAkTP79 -olSG5Nko3vBfgRmuNQ== ------END CERTIFICATE----- diff --git a/scripts/helm/roles/openreplay/asayer.local.pem b/scripts/helm/roles/openreplay/asayer.local.pem deleted file mode 100644 index 805a3a93b..000000000 --- a/scripts/helm/roles/openreplay/asayer.local.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQCx+u97nTBCR64U -5uS3RNf1skPHA/0U8UcBnot6z+AJlhGa2w+w6YE/gwzOVNyYB+zDez8Azd7WmuHD -hKyTSc5+pWEMoqbP8VTKJFYvaXL13S0RJXPMS2xdrvztSYXn7C1BcmDvN1O78hZj -ctQETfkhlEjDD2tAQvR9YNirDiku0eSQCbYjXOWfGwOzXb2SmHhuQAvvauZrwENt -QgptJStigD/7cTFXePj7urssKisXJsyxbRzPQFa9iJKG6Kcc6HD3SYdcqamkyFlr -2KqOCMpb/Q6hMJXHkWVHeazhnJnnpKbTQKsue1OO6IZw/C3+2b5DDZ92gVMYSOp5 -emRXGQIttwSJGJ4U+HED5aIodmfW56+Bx3R6HxgBqwW79DqoG8X7uRoAijNdSzNd -GCoEjhMPCExvtRNPDFQ7eBsXuDXwwyQvVKZFwG/um+77mZuHTWwz0Uc+ibVIfp9B -/nZgTQPtcT9xXlzttOlbk1pY+Y3J3SimNJb3G9dIuUrWFgNQFcUe00keiupVRZDD -1jAmOWza2ARMwPAN6wlXuVFXUL3Aa6L37Wnw+u3EsQuRs9OF3Ox/jNoVAaZPwKKE -zsbpxw3Bc8rr21UFui+mjNWDsrbqdrUziC2HCmj04lH+Ce9MqVIZT2idalMStYQ1 -foB20hFJcShqCQJYPihl07db8z1KYQIDAQABAoICADXI7GhBx1ywRrZAVGvh3RV9 -yqew1+FlTJz7qZDykvSyqh1PB/3hgtKLquoLkSJMestUSmTW0fmukn1AcdqM2l9B -YQOtf5Wli4cO1hC74CbWsRDWbZkWTeg9wmd+6X19zOtTQmHoKWDJtuRUxieK3Le+ -FQMFfNbNpQ7inneK7v0cJuM0XBtxrl6XjlW9BVXwx6whpLm0Z2+2iWnSJE55hhPd -R/iXa17QywVfl8Kv95emfQM9QxCKk+vxAr7MPfwpbkH8sXBaQU7v0L4bmucTVvvX -jH0/rnPBx4aDCbCgX442HsgGafTBBi5QrJgmTUwPx0SCMGdii5byKhGN4BbGEvb2 -I5NaFXgGHxm4ObMUeVgRnr5ym5e8J8wykDzM/VJRqxdav3kRtUcLMWzhoK57kf0C -BkgQBp0DX6kWWEWrLaioRfQbyOGYo7ko547sHAYK54C0EUjqSdCMh4Sl1RR7B+yw -uAoi5qqDeh9D0k5LwRgI7+jS5t7bLgxlLQeXYgtj9g27nz8Q/w9P2/pSf6xVOC9T -oin48ynM8BsLoQGp/+pg3E091gSGHFjrsLTxCvMN/au3k3onI728Fm1dyjNQF54g -486W7za8hbf3SZhKmRFCDM+ePXqQjEWZqH65YRB+9BsP3gmCWoPsoZAgZ3ZC3GNJ -C1FUQkD3kqhcw7C/SthxAoIBAQDp5Rw3uN4kps4sKSCTQqKgHxFj2mR+N2+xgDib -LchvYqh2OLUo/2u0p4+X9/PYfIvhh+gPca7yTp5m3JUxTuFHgjScl/cDqrWkY7mn -7kLnU5qy67V9ouknY+64MUa3w08v8oIz+l0Kv/YWIztM66yY3mMcBaYix18njqM8 -wuXXjXfHs5WRyozNw2L6/ZOf0foNPKuKssPBGFD3rk6R812EbLfOHn8dLjoePolv -KAWCpK1fZcOYHfUCldPxM+vVtQ4/mMauAxYc6vCKC+MCWWKEU0vhuswITAg+r/Cb -gbjlFUlTO9LZ99a9WIW072GyZG63nPo66Fbz5yoBPpbSsdP9AoIBAQDCzQP5qfkI -g3e/FxYainJgbGMv4mNlCSdEAjStVt8KRwEHJq8Je34TMtuW5joRweJ0cwsF7zhH -qv8B/+9OfTlqdYWWMJhsBHJCi2BMdP+K9D4FgD7YJsJFqhYNYtAVjQpcLV7pbyzw -Q6cM+KFGAiudP2uVLbLSgUrFJj8su40hXFp9Ey4ZmwqtF7APj5p8fSdyuKqe3GZA -M4tAaiflnOUO6XFUcXShdn7z1Bg8MR3RfOl3kdg1tquIQyRygCT9DvD1OJofRaJ5 -prxU5kBZRy8NkKMDJwMpN2HNNv6A4E9tKPiFfKS2IDx8DHKVQqokzGeqJFjKD6X7 -mtMnlscURTM1AoIBAEN3auRH1wwLcX0bf+H6Xp6BV6rN3B7sLrOsoKdiVWaLtrwj -L15cDt1VV/4l75iKlG7BnRaXXDdYpihViFNoxWKzFCn3S7ErFKoOegcOmw3O99iU -VQ2gaFCMateGnzdyhuz1rsj/dscbCAjqsLhHYsnVnBEBMCLoD3KBmmR0c4fxrpNU -Idij7GmN6ocrjeGT1+TpkzG2zvBr6y8GgHNpF5iTI5G5CkGIjAsdTtdZW/2LMtdJ -q1m9imbsgO4aY/SI/Usq7mRTT5WXzzqCo0CSN/CjdkfucdwS0m4Cogy5JnQt57Cy -mOd2reKAHlFKm4uxnJ4qEaPcUPiZvWwOecLcuKECggEBAJQGRecJifs2K2BBosI4 -QgGTWyuf0/eKpRPAuVaelI2yts1AT/HcrUjZdJOODsI/mlzyroWr6hBLl8tlN+tk -rKSA+SnPc4NhPZHjZyGyJhzoy9hSQ27sxOM7A9bYJ1a4/g5dS6oGmRdZ0S7CH4qb -S5kKAC4zs5bSOIbnMUpEmlGnY/t2/6jlb49oWJj6fmHqmHdMbBqbT/epMz+XawnH -1yclrMCGFiI7cOzHVkMm74moaaA6axkl3eKFYsS/K1xAZklw4Wyx9WyLjsDU1kEB -HngoWDalBnnjY6YVC//Sc4h3gaGtvVvH0y5QJyHUroWX7Y4UYIPqsC4o0BtdL5nv -m6ECggEBAK15GYZSh9DE1YBt+507tmNbMFPxYnbx6ued/pNunA2uP6r/B+Ky5voG -kwFbBM73xTIIeweFG8WrYK5p/9ESfj4csVvFnwk4NDWwDquKKCeO1POMVYFcu+hl -si/mGdsIaA83LuWI4uGCVNmknaHNQ9Qr5QixMO7RHPYD4Ktej+HEGdiOt5g9L9Px -9rczzA8xiqalOXgLYzksiwiiPSpsMCKDcgLIwqNeaHTz9N2WFmG9MALjjJkyGAdD -OJ93XooOqww1OIPZQTK1bgEBZjevuzlMoI5aD68W94Vbkpy8GebcFSnrQkd/8Qn9 -FqR3uiHJh3vCo3I9vsS1ewNQgUgmGOk= ------END PRIVATE KEY----- diff --git a/scripts/helm/roles/openreplay/openreplay.local.crt b/scripts/helm/roles/openreplay/openreplay.local.crt deleted file mode 100644 index d4f9e5ce3..000000000 --- a/scripts/helm/roles/openreplay/openreplay.local.crt +++ /dev/null @@ -1,30 +0,0 @@ ------BEGIN CERTIFICATE----- -MIIFITCCAwmgAwIBAgIUbXel1z1h6hUtPLFlRfmYDjiLoXkwDQYJKoZIhvcNAQEL -BQAwIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2NhbC5ob3N0MB4XDTIxMDUwOTEz -MjkzOVoXDTIyMDUwOTEzMjkzOVowIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2Nh -bC5ob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAsH4BQXN8bDIR -D9RO59KIpKK4Qv1oppZ2k/ZbOXBzlYqLmWfdbarAMJDoQZvrwbPUlDDvudHwVeGt -HjW6jB9H0JFBZ20fhH0Uxf+4OKQ+r59IJMo8iPSQ03ZOA74KuHga1MpSZa1VB1hZ -dfZWL21vyVcllP4jMLNEfB0L63S3k0pSCHfatDKeQmMIkUO0zFZcsjcxHmZEGfnt -rWTvE7sQoJdwzTq59Bpxpylvga48Mex1pU9c4utdMMLazMBlPCP26P8OKYu5LXN9 -0nNbEGFoolMbL8lUa63vNB8u1i+26+ESNcq/6T4R2VGcvg7qb5C90Spo6pOJVbmh -mX3I6ZQ8lP4Svv3VGb4nYObFf+Obu5X8c6w69f+AExwasYlrVnXDFCXVK8QGbvFE -WCBEVOYMJRA/zXbOi6VVO4PkFuAV8zIqWuGr++tb1sjdHg/xy6c/pahbqjG+mZ2m -oOpD5hmiYrQ8PPdDpSnufY/B86ldB6it5/cJgnC9VrZzRXUEOvCurx0a977SG5wh -Qg6Pnc309NCJMW/5vsLDK9/OPsOIsCm0JF9hNgqyzox2kyBacML0pws9Pn9ldeqF -dmyaQ98oIWSpTp/eMx2dJwPloEaom/e4tMxiLrt7zeHwWvv3UZNvDorijnshY66H -fq1gbhb4OCVOIQHSLqGMqQMBObBPIpUCAwEAAaNTMFEwHQYDVR0OBBYEFCjsH7qJ -jYUa5zKxggCdFW5Kqo1kMB8GA1UdIwQYMBaAFCjsH7qJjYUa5zKxggCdFW5Kqo1k -MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAIVPXyyScg1vILqZ -gJcJKskRqPAmKnwP1nBFw9uvCm5Js75nU/DhzCk6QGdevuSwDIXqquIlmvMzkubF -XZgZXEW2oq801h74jVe2ZFghvUOPchcS4ROXwHJRwORrYAidg0WFzG+HDD9qxuuK -T3J1t02O/O8EZ55BOgJXkHcSTfrVoRzkvOGgry243tnXAAeJFAeuQMEhpEianzFF -KDWykEQz7EnFRMqoaD5dvZ4IiHzPrsZ43yoewBOSwDvp/uXXhFgS4vJ0vEOZW2/L -JZd8/kK5JvxaLan3n7ALkLMH4ojQ9OIxWIY3aUoj/Oy/QnCK2vpzWIwwjpjLOxVF -UNhXZGwW4FbCtUPp27ctlEokjFmRvH6fQOGUhF5KCOXWC3zzwq3PnLCdBQjlG3by -NlcI5aIWUmKrx2xDcbtkJclqXxFsWx9kHUMurJ5Vf1ZVOh1+mAOy0F6+72AQN7AO -8YtzVjpdj29Sl3WbJx6DHHUD2T/EgPtkXiAu107Fd+tQbxO+Fp0X6oiAfRn/7nq0 -fQGlQbtleIPSjepHBEYwjCpRxtEPdvRj93w/M8Q9RW8w9shhGO6QElCuL3okOqNB -YMP3NSOjAInDwqn1aSj/R+G4S2HvaWP2n3wJONFxhxgFISoaTOBXuH/dQySDICq0 -vy50cJ+11ukSuYOwIKJGaKzlsUvg ------END CERTIFICATE----- diff --git a/scripts/helm/roles/openreplay/openreplay.local.pem b/scripts/helm/roles/openreplay/openreplay.local.pem deleted file mode 100644 index aeaafcc13..000000000 --- a/scripts/helm/roles/openreplay/openreplay.local.pem +++ /dev/null @@ -1,52 +0,0 @@ ------BEGIN PRIVATE KEY----- -MIIJQgIBADANBgkqhkiG9w0BAQEFAASCCSwwggkoAgEAAoICAQCwfgFBc3xsMhEP -1E7n0oikorhC/WimlnaT9ls5cHOViouZZ91tqsAwkOhBm+vBs9SUMO+50fBV4a0e -NbqMH0fQkUFnbR+EfRTF/7g4pD6vn0gkyjyI9JDTdk4Dvgq4eBrUylJlrVUHWFl1 -9lYvbW/JVyWU/iMws0R8HQvrdLeTSlIId9q0Mp5CYwiRQ7TMVlyyNzEeZkQZ+e2t -ZO8TuxCgl3DNOrn0GnGnKW+Brjwx7HWlT1zi610wwtrMwGU8I/bo/w4pi7ktc33S -c1sQYWiiUxsvyVRrre80Hy7WL7br4RI1yr/pPhHZUZy+DupvkL3RKmjqk4lVuaGZ -fcjplDyU/hK+/dUZvidg5sV/45u7lfxzrDr1/4ATHBqxiWtWdcMUJdUrxAZu8URY -IERU5gwlED/Nds6LpVU7g+QW4BXzMipa4av761vWyN0eD/HLpz+lqFuqMb6Znaag -6kPmGaJitDw890OlKe59j8HzqV0HqK3n9wmCcL1WtnNFdQQ68K6vHRr3vtIbnCFC -Do+dzfT00Ikxb/m+wsMr384+w4iwKbQkX2E2CrLOjHaTIFpwwvSnCz0+f2V16oV2 -bJpD3yghZKlOn94zHZ0nA+WgRqib97i0zGIuu3vN4fBa+/dRk28OiuKOeyFjrod+ -rWBuFvg4JU4hAdIuoYypAwE5sE8ilQIDAQABAoICAAYuM28OU0IasrxCLA00MzLI -u/kklKCYzkg6LVGinXmUI3VYzMlFXHuW53s8sZHT8VLLL+nq7yaiw4Q2T3UOt14X -aBnuGVdzZeHVJadeIQ/XXRhkuJ/3KIcvMmYxJoD1O8NN3nrPChY6ws89tCDvDSYY -1ikUiTjwXiqWfc9eA/u1c4/2WoNK7n3Wp/smStOJBq/6/M5auHHMbcVProubVma5 -Rur47oAc2PQIoG4DIvqAlFIYqGEN6vKFMFJRPnpPrBrWq7FKwk5UKAjRn1PAhYCB -tocC3uDyc1j46pP3kclK1W9qzTOmx8s7b2i8bvs/L1Z+zLPCPOHyoTUV0Y5UcZab -LzjX7DhPtX+wDX6TMnYe1dDcSa0Dt1bf98qSMJ3VCgim2VNqOHiv1BINJLbP+OBx -ezqc+MCDX/PgyCe0m20pKKVEIGh3U9nnwMOKTJkeZDj2gO4O0aUT8TQw61INHSSQ -2MGFTOnIk2WOu/N1+gYTWsU49GoNyXoNXRIkRIxHGvfCuIjDlIrPIp9id+pBJbze -ajRYyJYa2M1iHXH8HTgfSVrhB+H3MYooviUyRK/YSBDxeU9RKnNDDMRjYhIm74Fy -gDH5sTuD/JC/mPVP8xvu9+FY5Pp77rTZ5oJx2uKeYSqBK4jYyzgeZI6NNJvVMRN3 -tdIufHm9B5/nwWULRnDVAoIBAQDjdrYYLZLtvP7SLPpa23eAy0HOfSQeoCBZQVfd -LTwkspRegd9IOBGao5URsRBwtVqw78/+2O1gG8k1lgDBOEvVyB5/T2WcSesd6v5M -VswtmWA9KOVKDGpW/WWjOlOuvJKCfd1zyAHQQ0LVWltr2hIrxfooAjwj2vR/8C0t -jtDMsOGi1F1y4ji+X73PehKyR0kCOoP/0XwTFtgjzzRpogy3KNRZhwpGh1anuWHV -M4eFnCME4BLk6Q+2MmmuTAOyqs737mtyqMegoYl/8NauXsZo6J0PSugbjJDYGyvP -m/bzSYk2IDUx2iaCApbE6xsGegNmX9akuXDBxBAXYwAwYGuTAoIBAQDGokc8UbiS -aqxrsYy1JILZvquNXjDC1EgPaEsP8c1jQvPpdhJrFjYKW2SqGQRamjriFPimUnuQ -sIziNmJVZcRMOyWncgywhsuyre+uLEv/Wykdy/jNZ3W1SLqJ6I6wJYrQNz+3albG -S55vuR0ZwzQ/IE6pekdII+EryjSTAq8A8DV5WbwVIiZ/5ZmKrfKBTGaEraFkc8pH -YyBumhRT91gYzqsdFJ81ce01k5elvbW1YU2HnlZF2QwEzV4n46nrYmV/nw8jex0L -3Yb2/mP8aRSSvN8ZFKsG3voK6HGAwtEPDFL8aOLl/oUqNn6iY5WRkBduztipxH+v -qpk2brpfh6I3AoIBABqMQ2qE/y4SLeeX6kxqawM0NeUExA9y/vOL5dZhrjBWS2zQ -WaATMzLKPt7GvLV65S/bfXowFVxvQceoT0Q97tPvvGd9e82G7xMZ1z5PrxrvWMUq -ncBLvKmUf6VSbGvrhGoIiK03vcZx4z4FgsAOvqovPCgpQp2os3qvLIaxsXsCdgNM -J+/9qUPG5t9tTvtRyJqi+78C0yljNCzRxTtC95r0//vHABMFPr7qqxl4Hywf8hJi -mLHw5a2NKrp2EsRqnSdgw1epjUsZL3QoRtYpJ50c8R/UzUxR1qHyYeWkPWldvs77 -aVn9LRtlc/Vdv1S9LuJm8yVco2VqHbqtClubFVMCggEBAIxcA4ZmIMylwGzhK5Kc -2DHRgBwHIOGra7gndFMyBHZNy1l/a4hsS4eKCVoBD4iOT2mOdB3jTxlxCDyZPWcm -4E0VxofKGZlrxujd6+3hs1ogKq+5gHh00UJmwBnPUXS7MpnrUITlCahZqv07i4q2 -22Z4B93OYCxj3is5WWlAjjVViBPWCwfL9/SMY7ERNbmUKSU8JD19bMSBEVvWWOL0 -dzYgY93Kwq3bcUlYn+IoagxfU4+vnTsCInIKRKxfPaTtBNfnp6TkzCOkSEikfjbQ -cg82k1d/uVLDoWCv2/SmtV+yz7k4zIzFC4hGtbk+MWSIs2ZDl+puxeN144oJOH1k -tIUCggEAEKTCMtYFjNT+AdLnBUqS7r1UQ/z34Jmx8HF9jGSeE8WblNPxt7HPsDpw -d9ahwf5oLMyRNZe9TCpZH+BDGMTutQbPqmhqHPwPUfgUpqY3FKD6v46DIfoJ4k9w -vrqoS+FVJmFqwvvN5Z1Z61f2b6mEVXb5mBHogsk0rhCHP44aIPNB8h3Al3xX2UNE -GheUuGxdcEAhWzaGqAw5Y/TtgA8FFa8M7lL1xZcDXMW778oE1/Cu1y//6fJW5ssz -YLClXiE7AgTPOW/1kpI+YBauxXdcCeqDeHuBPzZ/FtzlzL3g24E84yEZvb9dzZW8 -h0BrE8NFZlEyg2ntATIT1MWhpIlsdA== ------END PRIVATE KEY----- diff --git a/scripts/helm/roles/openreplay/templates/alerts.yaml b/scripts/helm/roles/openreplay/templates/alerts.yaml index b2a91832b..97178c1ce 100644 --- a/scripts/helm/roles/openreplay/templates/alerts.yaml +++ b/scripts/helm/roles/openreplay/templates/alerts.yaml @@ -4,8 +4,32 @@ image: tag: {{ image_tag }} {% endif %} env: + S3_KEY: "{{ minio_access_key }}" + S3_SECRET: "{{ minio_secret_key }}" + SITE_URL: "https://{{ domain_name }}" + pg_host: "{{ postgres_endpoint }}" + pg_port: "{{ postgres_port }}" + pg_dbname: "{{ postgres_db_name }}" + pg_user: "{{ postgres_db_user }}" + pg_password: "{{ postgres_db_password }}" + EMAIL_HOST: "{{ email_host }}" + EMAIL_PORT: "{{ email_port }}" + EMAIL_USER: "{{ email_user }}" + EMAIL_PASSWORD: "{{ email_password }}" + EMAIL_USE_TLS: "{{ email_use_tls }}" + EMAIL_USE_SSL: "{{ email_use_ssl }}" + EMAIL_SSL_KEY: "{{ email_ssl_key }}" + EMAIL_SSL_CERT: "{{ email_ssl_cert }}" + EMAIL_FROM: "{{ email_from }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" LICENSE_KEY: "{{ enterprise_edition_license }}" - POSTGRES_STRING: "postgres://{{postgres_db_user}}:{{postgres_db_password}}@{{postgres_endpoint}}:{{postgres_port}}/{{ postgres_db_name }}" + # In case of minio, the instance is running inside kuberntes, + # which is accessible via nginx ingress. +{% if s3_endpoint == "http://minio.db.svc.cluster.local:9000" %} + S3_HOST: "https://{{ domain_name }}" +{% else %} + S3_HOST: "{{ s3_endpoint }}" +{% endif %} {% if not (docker_registry_username is defined and docker_registry_username and docker_registry_password is defined and docker_registry_password) %} imagePullSecrets: [] diff --git a/scripts/helm/upgrade.sh b/scripts/helm/upgrade.sh index f57d2fb07..3c437db9a 100644 --- a/scripts/helm/upgrade.sh +++ b/scripts/helm/upgrade.sh @@ -81,6 +81,7 @@ patch installation_type=1 if [[ ${ENTERPRISE} -eq 1 ]]; then cp -rf ../../ee/scripts/* ../../scripts/ + sed 's/\(image_tag.*[0-9]\)\(-pr\)\?"$/\1\2-ee"/' vars.yaml echo -e "Migrating clickhouse" migration clickhouse fi diff --git a/scripts/helm/vars.yaml b/scripts/helm/vars.yaml index f106f03af..f17ec904c 100644 --- a/scripts/helm/vars.yaml +++ b/scripts/helm/vars.yaml @@ -24,8 +24,8 @@ domain_name: "" docker_registry_username: "" docker_registry_password: "" docker_registry_url: "rg.fr-par.scw.cloud/foss" -image_tag: "v1.3.6" -openreplay_version: "v1.3.6" +image_tag: "v1.4.0" +openreplay_version: "v1.4.0" # Nginx ssl certificates. # in cert format diff --git a/scripts/helmcharts/databases/.helmignore b/scripts/helmcharts/databases/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/databases/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/Chart.yaml b/scripts/helmcharts/databases/Chart.yaml new file mode 100644 index 000000000..0aa62594a --- /dev/null +++ b/scripts/helmcharts/databases/Chart.yaml @@ -0,0 +1,42 @@ +apiVersion: v2 +name: databases +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + - name: kafka + repository: file://charts/kafka + version: 11.8.6 + condition: kafka.enabled + - name: clickhouse + repository: file://charts/clickhouse + version: 1.16.0 + condition: clickhouse.enabled + - name: postgresql + repository: file://charts/postgresql + version: 9.8.2 + condition: postgresql.enabled + - name: redis + repository: file://charts/redis + version: 12.10.1 + condition: redis.enabled diff --git a/scripts/helmcharts/databases/charts/clickhouse/.helmignore b/scripts/helmcharts/databases/charts/clickhouse/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/charts/clickhouse/Chart.yaml b/scripts/helmcharts/databases/charts/clickhouse/Chart.yaml new file mode 100644 index 000000000..c7a0eb3d6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: clickhouse +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/scripts/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl new file mode 100644 index 000000000..44cfadff0 --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "clickhouse.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "clickhouse.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "clickhouse.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "clickhouse.labels" -}} +helm.sh/chart: {{ include "clickhouse.chart" . }} +{{ include "clickhouse.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "clickhouse.selectorLabels" -}} +app.kubernetes.io/name: {{ include "clickhouse.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "clickhouse.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "clickhouse.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/clickhouse/templates/service.yaml b/scripts/helmcharts/databases/charts/clickhouse/templates/service.yaml new file mode 100644 index 000000000..4496f556c --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: clickhouse + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.service.webPort }} + targetPort: web + protocol: TCP + name: web + - port: {{ .Values.service.dataPort }} + targetPort: data + protocol: TCP + name: data + selector: + {{- include "clickhouse.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml new file mode 100644 index 000000000..1f1183598 --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "clickhouse.serviceAccountName" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml new file mode 100644 index 000000000..392976eec --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "clickhouse.fullname" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "clickhouse.fullname" . }} + selector: + matchLabels: + {{- include "clickhouse.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "clickhouse.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "clickhouse.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 9000 + name: web + - containerPort: 8123 + name: data + volumeMounts: + - name: ch-volume + mountPath: /var/lib/mydata + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: ch-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storageSize }} diff --git a/scripts/helmcharts/databases/charts/clickhouse/values.yaml b/scripts/helmcharts/databases/charts/clickhouse/values.yaml new file mode 100644 index 000000000..4cba1c1f8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/clickhouse/values.yaml @@ -0,0 +1,62 @@ +# Default values for clickhouse. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: yandex/clickhouse-server + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "20.9" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +env: {} + +service: + webPort: 9000 + dataPort: 8123 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +storageSize: 8G diff --git a/scripts/helmcharts/databases/charts/kafka/.helmignore b/scripts/helmcharts/databases/charts/kafka/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helmcharts/databases/charts/kafka/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/Chart.yaml new file mode 100755 index 000000000..165e70d55 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2.6.0 +description: Apache Kafka is a distributed streaming platform. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kafka +sources: +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 11.8.6 diff --git a/scripts/helmcharts/databases/charts/kafka/README.md b/scripts/helmcharts/databases/charts/kafka/README.md new file mode 100755 index 000000000..5584bd43d --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/README.md @@ -0,0 +1,737 @@ +# Kafka + +[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. + +## TL;DR + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the Kafka chart and their default values per section/component: + +### Global parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override kafka.fullname | `nil` | +| `fullnameOverride` | String to fully override kafka.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) | + +### Kafka parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image name | `bitnami/kafka` | +| `image.tag` | Kafka image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` | +| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` | +| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | +| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` | +| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | +| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` | +| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` | +| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` | +| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` | +| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` | +| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` | +| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` | +| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` | +| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of Kafka nodes | `1` | +| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `podSecurityContext` | Kafka pods' Security Context | `{}` | +| `containerSecurityContext` | Kafka containers' Security Context | `{}` | +| `resources.limits` | The resources limits for Kafka containers | `{}` | +| `resources.requests` | The requested resources for Kafka containers | `{}` | +| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` | +| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` | +| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | +| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | +| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) | +| `args` | Override kafka container arguments | `[]` (evaluated as a template) | +| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Kafka port for client connections | `9092` | +| `service.internalPort` | Kafka port for inter-broker connections | `9093` | +| `service.externalPort` | Kafka port for external connections | `9094` | +| `service.nodePorts.client` | Nodeport for client connections | `""` | +| `service.nodePorts.external` | Nodeport for external connections | `""` | +| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `service.annotations` | Service annotations | `{}`(evaluated as a template) | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) | + +### RBAC parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` | +| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | +| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | +| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | +| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | +| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | +| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | +| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | +| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | +| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) | +| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | +| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` | +| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + bitnami/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml bitnami/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of Kafka nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Allow to use the PLAINTEXT listener: + +```diff +- allowPlaintextListener: true ++ allowPlaintextListener: false +``` + +- Default replication factors for automatically created topics: + +```diff +- defaultReplicationFactor: 1 ++ defaultReplicationFactor: 3 +``` + +- Allow auto creation of topics. + +```diff +- autoCreateTopicsEnable: true ++ autoCreateTopicsEnable: false +``` + +- The replication factor for the offsets topic: + +```diff +- offsetsTopicReplicationFactor: 1 ++ offsetsTopicReplicationFactor: 3 +``` + +- The replication factor for the transaction topic: + +```diff +- transactionStateLogReplicationFactor: 1 ++ transactionStateLogReplicationFactor: 3 +``` + +- Overridden min.insync.replicas config for the transaction topic: + +```diff +- transactionStateLogMinIsr: 1 ++ transactionStateLogMinIsr: 3 +``` + +- Switch to enable the Kafka SASAL authentication on client and inter-broker communications: + +```diff +- auth.clientProtocol: plaintext ++ auth.clientProtocol: sasl +- auth.interBrokerProtocol: plaintext ++ auth.interBrokerProtocol: sasl +``` + +- Enable Zookeeper authentication: + +```diff ++ auth.jaas.zookeeperUser: zookeeperUser ++ auth.jaas.zookeeperPassword: zookeeperPassword +- zookeeper.auth.enabled: false ++ zookeeper.auth.enabled: true ++ zookeeper.auth.clientUser: zookeeperUser ++ zookeeper.auth.clientPassword: zookeeperPassword ++ zookeeper.auth.serverUsers: zookeeperUser ++ zookeeper.auth.serverPasswords: zookeeperPassword +``` + +- Enable Pod Disruption Budget: + +```diff +- pdb.create: false ++ pdb.create: true +``` + +- Create a separate Kafka metrics exporter: + +```diff +- metrics.kafka.enabled: false ++ metrics.kafka.enabled: true +``` + +- Expose JMX metrics to Prometheus: + +```diff +- metrics.jmx.enabled: false ++ metrics.jmx.enabled: true +``` + +- Enable Zookeeper metrics: + +```diff ++ zookeeper.metrics.enabled: true +``` + +To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|-------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret: + +```console +kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it. + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.certificatesSecret=kafka-jks +auth.certificatesPassword=jksPassword +auth.jaas.clientUsers[0]=brokerUser +auth.jaas.clientPassword[0]=brokerPassword +auth.jaas.zookeeperUser=zookeeperUser +auth.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.serivce.nodePorts[0]='node-port-1' +externalAccess.serivce.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: |- + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 10 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /opt/bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 11.8.0 + +External access to brokers can now be archived through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100755 index 000000000..c3b15dc5c --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 3.6.2 +description: A centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services for distributed + applications. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png +keywords: +- zookeeper +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 5.21.9 diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md new file mode 100755 index 000000000..0291875ed --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,297 @@ +# ZooKeeper + +[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component: + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override zookeeper.fullname | `nil` | +| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` | +| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` | +| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` | +| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` | +| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` | +| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` | +| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` | +| `auth.enabled` | Enable ZooKeeper auth | `false` | +| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` | +| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` | +| `auth.serverUsers` | List of user to be created | `nil` | +| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` | +| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` | +| `logLevel` | Log level of ZooKeeper server | `ERROR` | +| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` | +| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` | +| `dataLogDir` | Data log directory | `""` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podManagementPolicy` | Pod management policy | `Parallel` | +| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` | +| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` | +| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` | +| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file | +| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file | +| `extraVolumes` | Extra volumes | `nil` | +| `extraVolumeMounts` | Mount extra volume(s) | `nil` | +| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | ZooKeeper port | `2181` | +| `service.followerPort` | ZooKeeper follower port | `2888` | +| `service.electionPort` | ZooKeeper election port | `3888` | +| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template | +| `service.tls.client_enable` | Enable tls for client connections | `false` | +| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` | +| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` | +| `service.tls.client_port` | Service port for tls client connections | `3181` | +| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.annotations` | Annotations for the Service | `{}` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | + +### Metrics parameters + +| Parameter | Description | Default | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` | +| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` | +| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` | +| `metrics.service.port` | Prometheus metrics service port | `9141` | +| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace | +| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of ZooKeeper nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Enable prometheus metrics: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection. + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Data Log Directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +## Upgrading + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100755 index 000000000..3cc2edbed --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,57 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.auth.clientPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster: + + {{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + zkCli.sh $SERVICE_IP:2181 + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 & + zkCli.sh 127.0.0.1:2181 + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100755 index 000000000..f82502d69 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,212 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Zookeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper.labels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +helm.sh/chart: {{ include "zookeeper.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "zookeeper.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "zookeeper.matchLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return ZooKeeper Client Password +*/}} +{{- define "zookeeper.clientPassword" -}} +{{- if .Values.auth.clientPassword -}} + {{- .Values.auth.clientPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return ZooKeeper Servers Passwords +*/}} +{{- define "zookeeper.serverPasswords" -}} +{{- if .Values.auth.serverPasswords -}} + {{- .Values.auth.serverPasswords -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "zookeeper.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100755 index 000000000..1a4061565 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- +{{ .Values.config | indent 4 }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100755 index 000000000..3e26ed6c8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100755 index 000000000..f7e30b4bc --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections to zookeeper + - ports: + - port: {{ .Values.service.port }} + from: + {{- if not .Values.networkPolicy.allowExternal }} + - podSelector: + matchLabels: + {{ include "zookeeper.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + {{- else }} + - podSelector: + matchLabels: {} + {{- end }} + # Internal ports + - ports: &intranodes_ports + - port: {{ .Values.service.followerPort }} + - port: {{ .Values.service.electionPort }} + from: + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + egress: + - ports: *intranodes_ports + # Allow outbound connections from zookeeper nodes + +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..818950c66 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if gt $replicaCount 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml new file mode 100755 index 000000000..9cda3985c --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "zookeeper.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.prometheusRule.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "zookeeper.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }} +{{- end }} + diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100755 index 000000000..b3d727fec --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }} + server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100755 index 000000000..3f7ef39fd --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100755 index 000000000..5782dad59 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "zookeeper.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100755 index 000000000..fa1e5231f --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,334 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "zookeeper.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if (eq "Recreate" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + template: + metadata: + name: {{ template "zookeeper.fullname" . }} + labels: {{- include "zookeeper.labels" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "zookeeper.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "zookeeper.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "zookeeper.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "zookeeper.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - chown + args: + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + - /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - {{ .Values.dataLogDir }} + {{- end }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.service.port | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $followerPort := int .Values.service.followerPort }} + {{- $electionPort := int .Values.service.electionPort }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $zookeeperFullname := include "zookeeper.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "yes" "no" .Values.allowAnonymousLogin | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.service.tls.client_enable }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.service.tls.client_enable | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.service.tls.client_keystore_path | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.client_keystore_password | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.service.tls.client_truststore_path | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.client_truststore_password | quote }} + {{ end }} + {{- if .Values.service.tls.quorum_enable }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.service.tls.quorum_enable | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.service.tls.quorum_keystore_path | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_keystore_password | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.service.tls.quorum_truststore_path | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_truststore_password | quote }} + {{ end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- toYaml .Values.extraEnvVars | nindent 12 }} + {{- end }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: client + containerPort: {{ .Values.service.port }} + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: client-tls + containerPort: {{ .Values.service.tls.client_port }} + {{ end }} + - name: follower + containerPort: {{ .Values.service.followerPort }} + - name: election + containerPort: {{ .Values.service.electionPort }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if .Values.config }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.config }} + - name: config + configMap: + name: {{ template "zookeeper.fullname" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) )}} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100755 index 000000000..972efb51d --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100755 index 000000000..da3a2895a --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml new file mode 100755 index 000000000..7d678603f --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 3 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: true + quorum_enable: true + disable_base_client_port: true + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml new file mode 100755 index 000000000..a40decb54 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 1 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: false + quorum_enable: false + disable_base_client_port: false + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + enabled: true + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/scripts/helmcharts/databases/charts/kafka/files/jks/README.md b/scripts/helmcharts/databases/charts/kafka/files/jks/README.md new file mode 100755 index 000000000..e110a8825 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/files/jks/README.md @@ -0,0 +1,10 @@ +# Java Key Stores + +You can copy here your Java Key Stores (JKS) files so a secret is created including them. Remember to use a truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. For instance, if you have 3 brokers you need to copy here the following files: + +- kafka.truststore.jks +- kafka-0.keystore.jks +- kafka-1.keystore.jks +- kafka-2.keystore.jks + +Find more info in [this section](https://github.com/bitnami/charts/tree/master/bitnami/kafka#enable-security-for-kafka-and-zookeeper) of the README.md file. diff --git a/scripts/helmcharts/databases/charts/kafka/kafka.yaml b/scripts/helmcharts/databases/charts/kafka/kafka.yaml new file mode 100644 index 000000000..acd718957 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/kafka.yaml @@ -0,0 +1,521 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-scripts + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm +data: + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"kafka-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + exec /entrypoint.sh /run.sh +--- +# Source: kafka/charts/zookeeper/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper-headless + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/charts/zookeeper/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/templates/kafka-metrics-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-metrics + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + annotations: + + prometheus.io/path: /metrics + prometheus.io/port: '9308' + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 9308 + protocol: TCP + targetPort: metrics + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics +--- +# Source: kafka/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: 9093 + protocol: TCP + targetPort: kafka-internal + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/kafka-metrics-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-exporter + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + spec: + containers: + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.2.0-debian-10-r220 + imagePullPolicy: "IfNotPresent" + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + --kafka.server=kafka-0.kafka-headless.db.svc.cluster.local:9092 \ + --kafka.server=kafka-1.kafka-headless.db.svc.cluster.local:9092 \ + --web.listen-address=:9308 + ports: + - name: metrics + containerPort: 9308 + resources: + limits: {} + requests: {} +--- +# Source: kafka/charts/zookeeper/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + role: zookeeper +spec: + serviceName: kafka-zookeeper-headless + replicas: 1 + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper + template: + metadata: + name: kafka-zookeeper + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + spec: + + serviceAccountName: default + securityContext: + fsGroup: 1001 + containers: + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.6.2-debian-10-r10 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: ZOO_DATA_LOG_DIR + value: "" + - name: ZOO_PORT_NUMBER + value: "2181" + - name: ZOO_TICK_TIME + value: "2000" + - name: ZOO_INIT_LIMIT + value: "10" + - name: ZOO_SYNC_LIMIT + value: "5" + - name: ZOO_MAX_CLIENT_CNXNS + value: "60" + - name: ZOO_4LW_COMMANDS_WHITELIST + value: "srvr, mntr, ruok" + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: "no" + - name: ZOO_AUTOPURGE_INTERVAL + value: "0" + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: "3" + - name: ZOO_MAX_SESSION_TIMEOUT + value: "40000" + - name: ZOO_SERVERS + value: kafka-zookeeper-0.kafka-zookeeper-headless.db.svc.cluster.local:2888:3888 + - name: ZOO_ENABLE_AUTH + value: "no" + - name: ZOO_HEAP_SIZE + value: "1024" + - name: ZOO_LOG_LEVEL + value: "ERROR" + - name: ALLOW_ANONYMOUS_LOGIN + value: "yes" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + + - name: client + containerPort: 2181 + + + - name: follower + containerPort: 2888 + - name: election + containerPort: 3888 + livenessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + volumes: + volumeClaimTemplates: + - metadata: + name: data + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" +--- +# Source: kafka/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka + serviceName: kafka-headless + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka + spec: + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: kafka + containers: + - name: kafka + image: docker.io/bitnami/kafka:2.6.0-debian-10-r30 + imagePullPolicy: "IfNotPresent" + command: + - /scripts/setup.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + value: "kafka-zookeeper" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INTERNAL" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT" + - name: KAFKA_CFG_LISTENERS + value: "INTERNAL://:9093,CLIENT://:9092" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "INTERNAL://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9092" + - name: ALLOW_PLAINTEXT_LISTENER + value: "yes" + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: "false" + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_HEAP_OPTS + value: "-Xmx1024m -Xms1024m" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: "10000" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: "1000" + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: "300000" + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: "168" + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: "1000012" + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_DIRS + value: "/bitnami/kafka/data" + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: "1" + - name: KAFKA_CFG_NUM_IO_THREADS + value: "8" + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: "3" + - name: KAFKA_CFG_NUM_PARTITIONS + value: "1" + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: "1" + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: "104857600" + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: "6000" + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: 9093 + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: + periodSeconds: + successThreshold: + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + timeoutSeconds: 5 + failureThreshold: 6 + periodSeconds: + successThreshold: + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + volumes: + - name: scripts + configMap: + name: kafka-scripts + defaultMode: 0755 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/scripts/helmcharts/databases/charts/kafka/requirements.lock b/scripts/helmcharts/databases/charts/kafka/requirements.lock new file mode 100755 index 000000000..115d0b229 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 5.21.9 +digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e +generated: "2020-09-29T07:43:56.483358254Z" diff --git a/scripts/helmcharts/databases/charts/kafka/requirements.yaml b/scripts/helmcharts/databases/charts/kafka/requirements.yaml new file mode 100755 index 000000000..533875258 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: zookeeper.enabled diff --git a/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt b/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt new file mode 100755 index 000000000..0347c21c4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/NOTES.txt @@ -0,0 +1,181 @@ +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $servicePort := int .Values.service.port -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +############################################################################### +### ERROR: You enabled external access to Kafka brokers without specifying ### +### the array of load balancer IPs for Kafka brokers. ### +############################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for Kafka brokers. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $i, $e := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ + --set replicaCount={{ $replicaCount }} \ + --set externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq .Values.auth.clientProtocol "plaintext") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} + + +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files by executing these commands: + + - kafka_jaas.conf: + +cat > kafka_jaas.conf < client.properties <<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100755 index 000000000..83edd8422 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.jmx.service.type }} + {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} + {{- if .Values.metrics.jmx.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} + nodePort: {{ .Values.metrics.jmx.service.nodePort }} + {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100755 index 000000000..c547fbb39 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,87 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $servicePort := int .Values.service.port -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kafka.fullname" . }}-exporter + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: metrics + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username="$SASL_USERNAME" \ + --sasl.password="${sasl_passwords[0]}" \ + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.ca-file="/opt/bitnami/kafka-exporter/certs/ca-file" \ + --tls.cert-file="/opt/bitnami/kafka-exporter/certs/cert-file" \ + --tls.key-file="/opt/bitnami/kafka-exporter/certs/key-file" \ + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:9308 + {{- if (include "kafka.client.saslAuthentication" .) }} + env: + - name: SASL_USERNAME + value: {{ index .Values.auth.jaas.clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + ports: + - name: metrics + containerPort: 9308 + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + {{- if and (include "kafka.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + volumeMounts: + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + volumes: + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100755 index 000000000..54a4ccb0b --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.kafka.service.type }} + {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} + {{- if .Values.metrics.kafka.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} + nodePort: {{ .Values.metrics.kafka.service.nodePort }} + {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml b/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml new file mode 100755 index 000000000..0a34d50dd --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,16 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{ .Values.log4j | nindent 4 }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml b/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..cf515becb --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/role.yaml b/scripts/helmcharts/databases/charts/kafka/templates/role.yaml new file mode 100755 index 000000000..943c5bf3c --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml b/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml new file mode 100755 index 000000000..78f940f85 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "kafka.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml b/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml new file mode 100755 index 000000000..705545a61 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kafka.fullname" . }}-scripts + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "kafka.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.internalPort }} + {{- $clientPort := .Values.service.port }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliar functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + if [[ -f "/certs/kafka.truststore.jks" ]] && [[ -f "/certs/kafka-${ID}.keystore.jks" ]]; then + mkdir -p /opt/bitnami/kafka/config/certs + cp "/certs/kafka.truststore.jks" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "/certs/kafka-${ID}.keystore.jks" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml new file mode 100755 index 000000000..790790b3f --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100755 index 000000000..250bb5306 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100755 index 000000000..951bf7c41 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml new file mode 100755 index 000000000..e9b5ce8f9 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,435 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "kafka.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.internalPort }} +{{- $clientPort := .Values.service.port }} +{{- $interBrokerProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.interBrokerProtocol ) -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: Parallel + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ template "kafka.fullname" . }}-headless + updateStrategy: + type: {{ .Values.updateStrategy | quote }} + {{- if (eq "OnDelete" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "kafka.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "kafka.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ template "kafka.serviceAccountName" . }} + {{- end }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/kafka + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/kafka" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: {{- include "kafka.tplValue" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- if .Values.args }} + args: {{- include "kafka.tplValue" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ include "kafka.zookeeper.fullname" . | quote }} + {{- else }} + value: {{ join "," .Values.externalZookeeper.servers | quote }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ include "kafka.auth.saslMechanisms" ( dict "type" .Values.auth.saslMechanisms ) }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.saslInterBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: "SASL" + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ .Values.auth.tlsEndpointIdentificationAlgorithm | quote }} + {{- if .Values.auth.jksPassword }} + - name: KAFKA_CERTIFICATE_PASSWORD + value: {{ .Values.auth.jksPassword | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + {{- if .Values.extraEnvVars }} + {{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: {{ $interBrokerPort }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: 9094 + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customlivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customreadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: /bitnami/kafka/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + mountPath: /certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ template "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + ports: + - name: metrics + containerPort: 5556 + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "kafka.tplValue" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "kafka.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + secret: + secretName: {{ include "kafka.jksSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "kafka.tplValue" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml new file mode 100755 index 000000000..eefe0046d --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,52 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "kafka.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" $ }}-{{ $i }}-external + labels: {{- include "kafka.labels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.port }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + selector: {{- include "kafka.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml new file mode 100755 index 000000000..e7c2e5e6e --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-headless + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.internalPort }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml b/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml new file mode 100755 index 000000000..189cb9ffd --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/templates/svc.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/scripts/helmcharts/databases/charts/kafka/values-production.yaml b/scripts/helmcharts/databases/charts/kafka/values-production.yaml new file mode 100755 index 000000000..af6f43dba --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/values-production.yaml @@ -0,0 +1,931 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: false + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 3 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 3 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 3 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 3 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: sasl + interBrokerProtocol: sasl + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords + ## + ## clientPasswords: + ## - password1 + ## - password2 + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + zookeeperUser: zookeeperUser + + ## Kafka Zookeeper password + ## + zookeeperPassword: zookeeperPassword + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-password=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: false + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 3 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: true + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: true + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: true + ## User that will use Zookeeper clients to auth + ## + clientUser: zookeeperUser + ## Password that will use Zookeeper clients to auth + ## + clientPassword: zookeeperPassword + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: zookeeperUser + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: zookeeperPassword + metrics: + enabled: true + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/scripts/helmcharts/databases/charts/kafka/values.yaml b/scripts/helmcharts/databases/charts/kafka/values.yaml new file mode 100755 index 000000000..154d71bd5 --- /dev/null +++ b/scripts/helmcharts/databases/charts/kafka/values.yaml @@ -0,0 +1,934 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: true + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: plaintext + interBrokerProtocol: plaintext + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers. + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + # zookeeperUser: + + ## Kafka Zookeeper password + ## + # zookeeperPassword: + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: true + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 2 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Custom liveness/readiness probes that will override the default ones +## +customLivenessProbe: {} +customReadinessProbe: {} + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: false + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: false + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + # clientUser: + ## Password that will use Zookeeper clients to auth + ## + # clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + # serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + # serverPasswords: + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/scripts/helmcharts/databases/charts/minio/.helmignore b/scripts/helmcharts/databases/charts/minio/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helmcharts/databases/charts/minio/Chart.yaml b/scripts/helmcharts/databases/charts/minio/Chart.yaml new file mode 100755 index 000000000..395e3ac91 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2020.10.9 +description: MinIO is an object storage server, compatible with Amazon S3 cloud storage + service, mainly used for storing unstructured data (such as photos, videos, log + files, etc.) +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/minio +icon: https://bitnami.com/assets/stacks/minio/img/minio-stack-220x234.png +keywords: +- minio +- storage +- object-storage +- s3 +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: minio +sources: +- https://github.com/bitnami/bitnami-docker-minio +- https://min.io +version: 3.7.14 diff --git a/scripts/helmcharts/databases/charts/minio/README.md b/scripts/helmcharts/databases/charts/minio/README.md new file mode 100755 index 000000000..4a4c45c65 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/README.md @@ -0,0 +1,259 @@ +# MinIO + +[MinIO](https://min.io) is an object storage server, compatible with Amazon S3 cloud storage service, mainly used for storing unstructured data (such as photos, videos, log files, etc.) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/minio +``` + +## Introduction + +This chart bootstraps a [MinIO](https://github.com/bitnami/bitnami-docker-minio) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/minio +``` + +These commands deploy MinIO on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the MinIO chart and their default values. + +| Parameter | Description | Default | +|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.minio.existingSecret` | Name of existing secret to use for MinIO credentials (overrides `existingSecret`) | `nil` | +| `global.minio.accessKey` | MinIO Access Key (overrides `accessKey.password`) | `nil` | +| `global.minio.secretKey` | MinIO Secret Key (overrides `secretKey.password`) | `nil` | +| `image.registry` | MinIO image registry | `docker.io` | +| `image.repository` | MinIO image name | `bitnami/minio` | +| `image.tag` | MinIO image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `nameOverride` | String to partially override minio.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override minio.fullname template with a string | `nil` | +| `schedulerName` | Specifies the schedulerName, if it's nil uses kube-scheduler | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | If serviceAccount.create is enabled, what should the serviceAccount name be - otherwise defaults to the fullname | `nil` | +| `clusterDomain` | Kubernetes cluster domain | `cluster.local` | +| `clientImage.registry` | MinIO Client image registry | `docker.io` | +| `clientImage.repository` | MinIO Client image name | `bitnami/minio-client` | +| `clientImage.tag` | MinIO Client image tag | `{TAG_NAME}` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `mode` | MinIO server mode (`standalone` or `distributed`) | `standalone` | +| `statefulset.replicaCount` | Number of pods (only for Minio distributed mode). Should be 4 <= x <= 32 | `4` | +| `statefulset.updateStrategy` | Statefulset update strategy policy | `RollingUpdate` | +| `statefulset.podManagementpolicy` | Statefulset pods management policy | `Parallel` | +| `deployment.updateStrategy` | Deployment update strategy policy | `Recreate` | +| `existingSecret` | Existing secret with MinIO credentials | `nil` | +| `useCredentialsFile` | Have the secret mounted as a file instead of env vars | `false` | +| `forceNewKeys` | Force admin credentials (access and secret key) to be reconfigured every time they change in the secrets | `false` | +| `accessKey.password` | MinIO Access Key. Ignored if existing secret is provided. | _random 10 character alphanumeric string_ | +| `accessKey.forcePassword` | Force users to specify an Access Key | `false` | +| `secretKey.password` | MinIO Secret Key. Ignored if existing secret is provided. | _random 40 character alphanumeric string_ | +| `secretKey.forcePassword` | Force users to specify an Secret Key | `false` | +| `defaultBuckets` | Comma, semi-colon or space separated list of buckets to create (only in standalone mode) | `nil` | +| `disableWebUI` | Disable MinIO Web UI | `false` | +| `extraEnv` | Any extra environment variables you would like to pass to the pods | `{}` | +| `command` | Command for the minio container | `{}` | +| `resources` | Minio containers' resources | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `affinity` | Map of node/pod affinities | `{}` (The value is evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (The value is evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (The value is evaluated as a template) | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.mountPath` | Path to mount the volume at | `/data` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (only in "standalone" mode) | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | MinIO service port | `9000` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `service.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `nil` | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `ingress.enabled` | Enable/disable ingress | `false` | +| `ingress.certManager` | Add annotations for cert-manager | `false` | +| `ingress.annotations` | Ingress annotations | `[]` | +| `ingress.labels` | Ingress additional labels | `{}` | +| `ingress.hosts[0].name` | Hostname to your MinIO installation | `minio.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsHosts` | Array of TLS hosts for ingress record (defaults to `ingress.hosts[0].name` if `nil`) | `nil` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `minio.local-tls` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `prometheusAuthType` | Authentication mode for Prometheus (`jwt` or `public`) | `public` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set accessKey.password=minio-access-key \ + --set secretKey.password=minio-secret-key \ + bitnami/minio +``` + +The above command sets the MinIO Server access key and secret key to `minio-access-key` and `minio-secret-key`, respectively. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- MinIO server mode: +```diff +- mode: standalone ++ mode: distributed +``` + +- Disable MinIO Web UI: +```diff +- disableWebUI: false ++ disableWebUI: true +``` + +- Annotations to be added to pods: +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/prometheus/metrics" ++ prometheus.io/port: "9000" +``` + +- Pod resources: +```diff +- resources: {} ++ resources: ++ requests: ++ memory: 256Mi ++ cpu: 250m +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Don't require client label for connections: +```diff +- networkPolicy.allowExternal: true ++ networkPolicy.allowExternal: false +``` + +- Change Prometheus authentication: +```diff +- prometheusAuthType: public ++ prometheusAuthType: jwt +``` + +### Distributed mode + +You can start the MinIO chart in distributed mode with the following parameter: `mode=distributed` + +This chart sets Minio server in distributed mode with 4 nodes by default. You can change the number of nodes setting the `statefulset.replicaCount` parameter, for example to `statefulset.replicaCount=8` + +> Note: that the number of replicas must even, greater than 4 and lower than 32 + +### Prometheus exporter + +MinIO exports Prometheus metrics at `/minio/prometheus/metrics`. To allow Prometheus collecting your MinIO metrics, modify the `values.yaml` adding the corresponding annotations: + +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/prometheus/metrics" ++ prometheus.io/port: "9000" +``` + +> Find more information about MinIO metrics at https://docs.min.io/docs/how-to-monitor-minio-using-prometheus.html + +## Persistence + +The [Bitnami MinIO](https://github.com/bitnami/bitnami-docker-minio) image stores data at the `/data` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. diff --git a/scripts/helmcharts/databases/charts/minio/ci/values-production.yaml b/scripts/helmcharts/databases/charts/minio/ci/values-production.yaml new file mode 100755 index 000000000..d5e966334 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/ci/values-production.yaml @@ -0,0 +1,27 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +volumePermissions: + enabled: true + +mode: distributed + +useCredentialsFile: true + +disableWebUI: false + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/minio/prometheus/metric" + prometheus.io/port: "9000" + +resources: + requests: + cpu: 250m + memory: 256Mi + +ingress: + enabled: true + +networkPolicy: + enabled: true diff --git a/scripts/helmcharts/databases/charts/minio/templates/NOTES.txt b/scripts/helmcharts/databases/charts/minio/templates/NOTES.txt new file mode 100755 index 000000000..e7492fbe7 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/NOTES.txt @@ -0,0 +1,71 @@ +** Please be patient while the chart is being deployed ** + +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "minio.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get your credentials run: + + export MINIO_ACCESS_KEY=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} -o jsonpath="{.data.access-key}" | base64 --decode) + export MINIO_SECRET_KEY=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} -o jsonpath="{.data.secret-key}" | base64 --decode) + +To connect to your MinIO server using a client: + +- Run a MinIO Client pod and append the desired command (e.g. 'admin info server'): + + kubectl run --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }}-client \ + --rm --tty -i --restart='Never' \ + --env MINIO_SERVER_ACCESS_KEY=$MINIO_ACCESS_KEY \ + --env MINIO_SERVER_SECRET_KEY=$MINIO_SECRET_KEY \ + --env MINIO_SERVER_HOST={{ include "minio.fullname" . }} \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "minio.name" . }}-client=true" \ + {{- end }} + --image {{ template "minio.clientImage" . }} -- admin info server minio + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + + NOTE: Since NetworkPolicy is enabled, only pods with label + "{{ template "minio.fullname" . }}-client=true" will be able to connect to MinIO. +{{- end }} +{{- if not .Values.disableWebUI }} + +To access the MinIO web UI: + +- Get the MinIO URL: + +{{- if .Values.ingress.enabled }} + + You should be able to access your new MinIO web UI through + + {{- range .Values.ingress.hosts }} + {{ if .tls }}https{{ else }}http{{ end }}://{{ .name }}/minio/ + {{- end }} +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "minio.fullname" . }}' + + {{- $port:=.Values.service.port | toString }} + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "MinIO web URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.service.port }}{{ end }}/minio" + +{{- else if contains "ClusterIP" .Values.service.type }} + + echo "MinIO web URL: http://127.0.0.1:9000/minio" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "minio.fullname" . }} 9000:{{ .Values.service.port }} + +{{- else if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "minio.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "MinIO web URL: http://$NODE_IP:$NODE_PORT/minio" + +{{- end }} +{{- else }} + + WARN: MinIO Web UI is disabled. +{{- end }} + +{{ include "minio.validateValues" . }} +{{ include "minio.checkRollingTags" . }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/minio/templates/_helpers.tpl new file mode 100755 index 000000000..dd67e53e6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/_helpers.tpl @@ -0,0 +1,265 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "minio.labels" -}} +app.kubernetes.io/name: {{ include "minio.name" . }} +helm.sh/chart: {{ include "minio.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "minio.matchLabels" -}} +app.kubernetes.io/name: {{ include "minio.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return the proper MinIO image name +*/}} +{{- define "minio.image" -}} +{{- $registryName := coalesce .Values.global.imageRegistry .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper MinIO Client image name +*/}} +{{- define "minio.clientImage" -}} +{{- $registryName := coalesce .Values.global.imageRegistry .Values.clientImage.registry -}} +{{- $repositoryName := .Values.clientImage.repository -}} +{{- $tag := .Values.clientImage.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{- $imagePullSecrets := coalesce .Values.global.imagePullSecrets .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets -}} +{{- if $imagePullSecrets }} +imagePullSecrets: +{{- range $imagePullSecrets }} + - name: {{ . }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return MinIO accessKey +*/}} +{{- define "minio.accessKey" -}} +{{- $accessKey := coalesce .Values.global.minio.accessKey .Values.accessKey.password -}} +{{- if $accessKey }} + {{- $accessKey -}} +{{- else if (not .Values.accessKey.forcePassword) }} + {{- randAlphaNum 10 -}} +{{- else -}} + {{ required "An Access Key is required!" .Values.accessKey.password }} +{{- end -}} +{{- end -}} + +{{/* +Return MinIO secretKey +*/}} +{{- define "minio.secretKey" -}} +{{- $secretKey := coalesce .Values.global.minio.secretKey .Values.secretKey.password -}} +{{- if $secretKey }} + {{- $secretKey -}} +{{- else if (not .Values.secretKey.forcePassword) }} + {{- randAlphaNum 40 -}} +{{- else -}} + {{ required "A Secret Key is required!" .Values.secretKey.password }} +{{- end -}} +{{- end -}} + +{{/* +Get the credentials secret. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.global.minio.existingSecret }} + {{- printf "%s" .Values.global.minio.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "minio.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "minio.createSecret" -}} +{{- if .Values.global.minio.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "minio.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "minio.validateValues.mode" .) -}} +{{- $messages := append $messages (include "minio.validateValues.replicaCount" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of MinIO - must provide a valid mode ("distributed" or "standalone") */}} +{{- define "minio.validateValues.mode" -}} +{{- if and (ne .Values.mode "distributed") (ne .Values.mode "standalone") -}} +minio: mode + Invalid mode selected. Valid values are "distributed" and + "standalone". Please set a valid mode (--set mode="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of MinIO - number of replicas must be even, greater than 4 and lower than 32 */}} +{{- define "minio.validateValues.replicaCount" -}} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- if and (eq .Values.mode "distributed") (or (eq (mod $replicaCount 2) 1) (lt $replicaCount 4) (gt $replicaCount 32)) -}} +minio: replicaCount + Number of replicas must be even, greater than 4 and lower than 32!! + Please set a valid number of replicas (--set statefulset.replicaCount=X) +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "minio.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.clientImage.repository) (not (.Values.clientImage.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.clientImage.repository }}:{{ .Values.clientImage.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "minio.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "minio.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "minio.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "minio.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either minio.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "minio.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "minio.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml b/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml new file mode 100755 index 000000000..23a7232a8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml @@ -0,0 +1,160 @@ +{{- if eq .Values.mode "standalone" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + {{- if .Values.deployment.updateStrategy }} + strategy: {{ toYaml .Values.deployment.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "minio.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "minio.labels" . | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "minio.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} +{{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "minio.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "minio.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "minio.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.forceNewKeys | quote }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_ACCESS_KEY_FILE + value: "/opt/bitnami/minio/secrets/access-key" + {{- else }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: access-key + {{- end }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_SECRET_KEY_FILE + value: "/opt/bitnami/minio/secrets/secret-key" + {{- else }} + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: secret-key + {{- end }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + {{- if .Values.prometheusAuthType }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.prometheusAuthType }} + {{- end }} + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + {{ if .Values.command }} + command: {{- toYaml .Values.command | nindent 12 }} + {{- end }} + ports: + - name: minio + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + path: /minio/health/live + port: minio + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + tcpSocket: + port: minio + {{- end }} + {{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: "data" + mountPath: {{ .Values.persistence.mountPath }} + volumes: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ include "minio.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/ingress.yaml b/scripts/helmcharts/databases/charts/minio/templates/ingress.yaml new file mode 100755 index 000000000..486feb5ac --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/ingress.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.ingress.enabled (not .Values.disableWebUI ) -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +{{- range $key, $value := .Values.ingress.labels }} + {{ $key }}: {{ $value }} +{{- end }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ include "minio.fullname" $ }} + servicePort: minio + {{- end }} + tls: + {{- range .Values.ingress.hosts }} + {{- if .tls }} + - hosts: + {{- if .tlsHosts }} + {{- range $host := .tlsHosts }} + - {{ $host }} + {{- end }} + {{- else }} + - {{ .name }} + {{- end }} + secretName: {{ .tlsSecret }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/networkpolicy.yaml b/scripts/helmcharts/databases/charts/minio/templates/networkpolicy.yaml new file mode 100755 index 000000000..cffc5be13 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/networkpolicy.yaml @@ -0,0 +1,23 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "minio.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + ingress: + # Allow inbound connections + - ports: + - port: 9000 + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "minio.fullname" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml b/scripts/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml new file mode 100755 index 000000000..8e4be7da4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.mode "standalone") }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "minio.storageClass" . }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/secrets.yaml b/scripts/helmcharts/databases/charts/minio/templates/secrets.yaml new file mode 100755 index 000000000..1a813eeda --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/secrets.yaml @@ -0,0 +1,12 @@ +{{- if (include "minio.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +type: Opaque +data: + access-key: {{ include "minio.accessKey" . | b64enc | quote }} + secret-key: {{ include "minio.secretKey" . | b64enc | quote }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/service.yaml b/scripts/helmcharts/databases/charts/minio/templates/service.yaml new file mode 100755 index 000000000..7a6c380de --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} + {{- if .Values.service.annotations }} + annotations: {{- include "minio.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: minio + port: {{ .Values.service.port }} + targetPort: minio + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "minio.matchLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/minio/templates/serviceaccount.yaml new file mode 100755 index 000000000..76efe62f0 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "minio.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +secrets: + - name: {{ include "minio.fullname" . }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/minio/templates/statefulset.yaml new file mode 100755 index 000000000..7add4501f --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/statefulset.yaml @@ -0,0 +1,181 @@ +{{- if eq .Values.mode "distributed" }} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- if and (eq (mod $replicaCount 2) 0) (gt $replicaCount 3) (lt $replicaCount 33) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + selector: + matchLabels: {{- include "minio.matchLabels" . | nindent 6 }} + serviceName: {{ include "minio.fullname" . }}-headless + replicas: {{ .Values.statefulset.replicaCount }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if (eq "Recreate" .Values.statefulset.updateStrategy) }} + rollingUpdate: null + {{- end }} + template: + metadata: + labels: {{- include "minio.labels" . | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "minio.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} +{{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "minio.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "minio.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "minio.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_DISTRIBUTED_MODE_ENABLED + value: "yes" + - name: MINIO_SKIP_CLIENT + value: "yes" + - name: MINIO_DISTRIBUTED_NODES + {{- $minioFullname := include "minio.fullname" . }} + {{- $minioHeadlessServiceName := printf "%s-%s" $minioFullname "headless" | trunc 63 }} + {{- $releaseNamespace := .Release.Namespace }} + value: {{range $i, $e := until $replicaCount }}{{ $minioFullname }}-{{ $e }}.{{ $minioHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $.Values.clusterDomain }},{{ end }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.forceNewKeys | quote }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_ACCESS_KEY_FILE + value: "/opt/bitnami/minio/secrets/access-key" + {{- else }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: access-key + {{- end }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_SECRET_KEY_FILE + value: "/opt/bitnami/minio/secrets/secret-key" + {{- else }} + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: secret-key + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + {{- if .Values.prometheusAuthType }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.prometheusAuthType }} + {{- end }} + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + ports: + - name: minio + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + path: /minio/health/live + port: minio + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + tcpSocket: + port: minio + {{- end }} + {{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + volumes: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "minio.matchLabels" . | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "minio.tplValue" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: {{ toYaml .Values.persistence.accessModes | nindent 10 }} + {{ include "minio.storageClass" . }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/minio/templates/svc-headless.yaml new file mode 100755 index 000000000..41a4cf507 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/svc-headless.yaml @@ -0,0 +1,16 @@ +{{- if eq .Values.mode "distributed" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "minio.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: minio + port: {{ .Values.service.port }} + targetPort: minio + selector: {{- include "minio.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/templates/tls-secrets.yaml b/scripts/helmcharts/databases/charts/minio/templates/tls-secrets.yaml new file mode 100755 index 000000000..68b31d324 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/templates/tls-secrets.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" $ | nindent 4 }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/minio/values-production.yaml b/scripts/helmcharts/databases/charts/minio/values-production.yaml new file mode 100755 index 000000000..d4d639732 --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/values-production.yaml @@ -0,0 +1,389 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + minio: {} +# minio: +# existingSecret: "" +# accessKey: "" +# secretKey: "" +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MinIO image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2020.10.9-debian-10-r6 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override minio.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override minio.fullname template +## +# fullnameOverride: + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Cluster domain +## +clusterDomain: cluster.local + +## Bitnami MinIO Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2020.10.3-debian-10-r9 + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MinIO server mode. Allowed values: standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: distributed + +## MinIO deployment parameters +## Only when mode is 'standalone' +## +deployment: + ## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods is destroyed first. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## Example: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate + +## MinIO statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## Update strategy, can be set to RollingUpdate or OnDelete by default. + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + updateStrategy: RollingUpdate + + ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + + ## Number of replicas, it must even and greater than 4 + ## + replicaCount: 4 + +## Use existing secret (ignores accessKey, and secretKey passwords) +## +# existingSecret: + +## Mount MinIO secret as a file instead of passing environment variable +## +useCredentialsFile: false + +## Force reconfiguring new keys whenever the credentials change +## +forceNewKeys: false + +## MinIO credentials +## +accessKey: + ## MinIO Access Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false +secretKey: + ## MinIO Secret Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + +## Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## +# defaultBuckets: "my-bucket, my-second-bucket" + +## Disable MinIO Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: true + +## Define custom environment variables to pass to the image here +## +extraEnv: {} + +## Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/minio/prometheus/metric" + prometheus.io/port: "9000" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## MinIO containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: + cpu: 250m + memory: 256Mi + +## MinIO containers' liveness and readiness probes +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + + ## Enable persistence using an existing PVC (only in standalone mode) + ## + # existingClaim: + + ## Data volume mount path + ## + mountPath: /data + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + + ## Persistent Volume size + ## + size: 8Gi + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Persistent Volume Claim annotations + ## + annotations: {} + +## MinIO Service properties +## +service: + ## MinIO Service type + ## + type: ClusterIP + + ## MinIO Service port + ## + port: 9000 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## MinIO web browser. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + # annotations: + # kubernetes.io/ingress.class: nginx + + ## Ingress additional labels done as key:value pairs + labels: {} + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: minio.local + path: / + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## Optionally specify the TLS hosts for the ingress record + ## Useful when the Ingress controller supports www-redirection + ## If not specified, the above host name will be used + # tlsHosts: + # - www.minio.local + # - minio.local + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: minio.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: minio.local-tls + # key: + # certificate: + +## NetworkPolicy parameters +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port MinIO is listening + ## on. When true, MinIO will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + +## MinIO supports two authentication modes for Prometheus either jwt or public, by default MinIO runs in jwt mode. +## To allow public access without authentication for prometheus metrics set environment as follows. +prometheusAuthType: jwt diff --git a/scripts/helmcharts/databases/charts/minio/values.yaml b/scripts/helmcharts/databases/charts/minio/values.yaml new file mode 100755 index 000000000..8aee06beb --- /dev/null +++ b/scripts/helmcharts/databases/charts/minio/values.yaml @@ -0,0 +1,391 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + minio: {} +# accessKey: "minios3AccessKeyS3cr3t" +# secretKey: "m1n10s3CretK3yPassw0rd" +# minio: {} +# existingSecret: "" +# accessKey: "" +# secretKey: "" +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MinIO image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2020.10.9-debian-10-r6 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override minio.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override minio.fullname template +## +# fullnameOverride: + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Cluster domain +## +clusterDomain: cluster.local + +## Bitnami MinIO Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2020.10.3-debian-10-r9 + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MinIO server mode. Allowed values: standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone + +## MinIO deployment parameters +## Only when mode is 'standalone' +## +deployment: + ## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods is destroyed first. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## Example: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate + +## MinIO statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## Update strategy, can be set to RollingUpdate or OnDelete by default. + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + updateStrategy: RollingUpdate + + ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + + ## Number of replicas, it must even and greater than 4 + ## + replicaCount: 4 + +## Use existing secret (ignores accessKey, and secretKey passwords) +## +# existingSecret: + +## Mount MinIO secret as a file instead of passing environment variable +## +useCredentialsFile: false + +## Force reconfiguring new keys whenever the credentials change +## +forceNewKeys: false + +## MinIO credentials +## +accessKey: + ## MinIO Access Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false +secretKey: + ## MinIO Secret Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + +## Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## +# defaultBuckets: "my-bucket, my-second-bucket" + +## Disable MinIO Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: false + +## Define custom environment variables to pass to the image here +## +extraEnv: {} + +## Define a custom command for the minio container +command: {} + +## Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## MinIO containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + +## MinIO containers' liveness and readiness probes +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + + ## Enable persistence using an existing PVC (only in standalone mode) + ## + # existingClaim: + + ## Data volume mount path + ## + mountPath: /data + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + + ## Persistent Volume size + ## + size: 50Gi + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Persistent Volume Claim annotations + ## + annotations: {} + +## MinIO Service properties +## +service: + ## MinIO Service type + ## + type: ClusterIP + + ## MinIO Service port + ## + port: 9000 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## MinIO web browser. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + # annotations: + # kubernetes.io/ingress.class: nginx + + ## Ingress additional labels done as key:value pairs + labels: {} + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: minio.local + path: / + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## Optionally specify the TLS hosts for the ingress record + ## Useful when the Ingress controller supports www-redirection + ## If not specified, the above host name will be used + # tlsHosts: + # - www.minio.local + # - minio.local + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: minio.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: minio.local-tls + # key: + # certificate: + +## NetworkPolicy parameters +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port MinIO is listening + ## on. When true, MinIO will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## MinIO supports two authentication modes for Prometheus either jwt or public, by default MinIO runs in jwt mode. +## To allow public access without authentication for prometheus metrics set environment as follows. +prometheusAuthType: public diff --git a/scripts/helmcharts/databases/charts/postgresql/.helmignore b/scripts/helmcharts/databases/charts/postgresql/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helmcharts/databases/charts/postgresql/Chart.yaml b/scripts/helmcharts/databases/charts/postgresql/Chart.yaml new file mode 100755 index 000000000..3ac2d3605 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 9.8.2 diff --git a/scripts/helmcharts/databases/charts/postgresql/README.md b/scripts/helmcharts/databases/charts/postgresql/README.md new file mode 100755 index 000000000..8cdb2ca13 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/README.md @@ -0,0 +1,707 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-postgres-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `postgresqlMaxConnections` | Maximum total connections | `nil` | +| `postgresqlPostgresConnectionLimit` | Maximum total connections for the postgres user | `nil` | +| `postgresqlDbUserConnectionLimit` | Maximum total connections for the non-admin user | `nil` | +| `postgresqlTcpKeepalivesInterval` | TCP keepalives interval | `nil` | +| `postgresqlTcpKeepalivesIdle` | TCP keepalives idle | `nil` | +| `postgresqlTcpKeepalivesCount` | TCP keepalives count | `nil` | +| `postgresqlStatementTimeout` | Statement timeout | `nil` | +| `postgresqlPghbaRemoveFilters` | Comma-separated list of patterns to remove from the pg_hba.conf file | `nil` | +| `customLivenessProbe` | Override default liveness probe | `nil` | +| `customReadinessProbe` | Override default readiness probe | `nil` | +| `audit.logHostname` | Add client hostnames to the log file | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `nil` | +| `audit.clientMinMessages` | Message log level to share with the user | `nil` | +| `audit.logLinePrefix` | Template string for the log line prefix | `nil` | +| `audit.logTimezone` | Timezone for the log timestamps | `nil` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `masterAsStandBy.enabled` | Whether to enable current cluster's Master as standby server of another cluster or not. | `false` | +| `masterAsStandBy.masterHost` | The Host of replication Master in the other cluster. | `nil` | +| `masterAsStandBy.masterPort ` | The Port of replication Master in the other cluster. | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.resources` | CPU/Memory resource requests/limits override for slaves. Will fallback to `values.resources` if not defined. | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the pod | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable container security context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. | `nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template). | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works +```cosnole +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/.helmignore b/scripts/helmcharts/databases/charts/postgresql/charts/common/.helmignore new file mode 100755 index 000000000..50af03172 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml b/scripts/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml new file mode 100755 index 000000000..5566cdc21 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.8.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +version: 0.8.1 diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/README.md b/scripts/helmcharts/databases/charts/postgresql/charts/common/README.md new file mode 100755 index 000000000..acdbe7bfa --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/README.md @@ -0,0 +1,286 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|-----------------------------------------------------------------|----------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Labels + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl new file mode 100755 index 000000000..40f575cb6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100755 index 000000000..143bef2a4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,33 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100755 index 000000000..d6d3ec65a --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl new file mode 100755 index 000000000..aafde9f3b --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100755 index 000000000..252066c7e --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl new file mode 100755 index 000000000..adf2a74f4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100755 index 000000000..8eee91d21 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100755 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100755 index 000000000..2db166851 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100755 index 000000000..74774a3ca --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100755 index 000000000..05d1edbaf --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,278 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100755 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/charts/common/values.yaml b/scripts/helmcharts/databases/charts/postgresql/charts/common/values.yaml new file mode 100755 index 000000000..9ecdc93f5 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/scripts/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml b/scripts/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml new file mode 100755 index 000000000..f6977823c --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/scripts/helmcharts/databases/charts/postgresql/ci/default-values.yaml b/scripts/helmcharts/databases/charts/postgresql/ci/default-values.yaml new file mode 100755 index 000000000..fc2ba605a --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/scripts/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml b/scripts/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100755 index 000000000..347d3b40a --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/scripts/helmcharts/databases/charts/postgresql/files/README.md b/scripts/helmcharts/databases/charts/postgresql/files/README.md new file mode 100755 index 000000000..1813a2fea --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/scripts/helmcharts/databases/charts/postgresql/files/conf.d/README.md b/scripts/helmcharts/databases/charts/postgresql/files/conf.d/README.md new file mode 100755 index 000000000..184c1875d --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/scripts/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/scripts/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 000000000..cba38091e --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/scripts/helmcharts/databases/charts/postgresql/postgresql.yaml b/scripts/helmcharts/databases/charts/postgresql/postgresql.yaml new file mode 100644 index 000000000..4e5e192d7 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/postgresql.yaml @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: db +--- +# Source: postgresql/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + postgresql-password: "YXNheWVyUG9zdGdyZXM=" +--- +# Source: postgresql/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres-postgresql-headless + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres +--- +# Source: postgresql/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: ClusterIP + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres + role: master +--- +# Source: postgresql/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + annotations: +spec: + serviceName: postgres-postgresql-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres + role: master + template: + metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + role: master + spec: + securityContext: + fsGroup: 1001 + containers: + - name: postgres-postgresql + image: docker.io/bitnami/postgresql:11.9.0-debian-10-r48 + imagePullPolicy: "IfNotPresent" + resources: + requests: + cpu: 250m + memory: 256Mi + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: POSTGRESQL_PORT_NUMBER + value: "5432" + - name: POSTGRESQL_VOLUME_DIR + value: "/bitnami/postgresql" + - name: PGDATA + value: "/bitnami/postgresql/data" + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-postgresql + key: postgresql-password + - name: POSTGRESQL_ENABLE_LDAP + value: "no" + - name: POSTGRESQL_ENABLE_TLS + value: "no" + - name: POSTGRESQL_LOG_HOSTNAME + value: "false" + - name: POSTGRESQL_LOG_CONNECTIONS + value: "false" + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: "false" + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: "off" + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: "error" + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: "pgaudit" + ports: + - name: tcp-postgresql + containerPort: 5432 + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + - | + exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432 + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: dshm + mountPath: /dev/shm + - name: data + mountPath: /bitnami/postgresql + subPath: + volumes: + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/scripts/helmcharts/databases/charts/postgresql/requirements.lock b/scripts/helmcharts/databases/charts/postgresql/requirements.lock new file mode 100755 index 000000000..b0b7b0673 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.8.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-10-06T07:04:23.948475694Z" diff --git a/scripts/helmcharts/databases/charts/postgresql/requirements.yaml b/scripts/helmcharts/databases/charts/postgresql/requirements.yaml new file mode 100755 index 000000000..2c28bfe14 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/NOTES.txt b/scripts/helmcharts/databases/charts/postgresql/templates/NOTES.txt new file mode 100755 index 000000000..596e969ce --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 000000000..b6a683ae9 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,488 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/configmap.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/configmap.yaml new file mode 100755 index 000000000..bc78771d4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 000000000..c6793802f --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/extra-list.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/extra-list.yaml new file mode 100755 index 000000000..b28a03c1c --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/extra-list.yaml @@ -0,0 +1,5 @@ +{{- if .Values.extraDeploy }} +apiVersion: v1 +kind: List +items: {{- include "postgresql.tplValue" (dict "value" .Values.extraDeploy "context" $) | nindent 2 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 000000000..2652ce732 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml new file mode 100755 index 000000000..6216eca84 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 000000000..9181ac89a --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 000000000..f2752af77 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..fb4c52f20 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml new file mode 100755 index 000000000..0afd8f41f --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/role.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/role.yaml new file mode 100755 index 000000000..24148aa6b --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml new file mode 100755 index 000000000..a105fb41b --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/secrets.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/secrets.yaml new file mode 100755 index 000000000..8d968864c --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 000000000..1e2a1f2a7 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 000000000..e118002a3 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 000000000..d77142fa3 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,403 @@ +{{- if .Values.replication.enabled }} +{{- $slaveResources := coalesce .Values.slave.resources .Values.resources -}} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if $slaveResources }} + resources: {{- toYaml $slaveResources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/statefulset.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 000000000..10c1af166 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,580 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.masterAsStandBy.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.masterAsStandBy.masterHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.masterAsStandBy.masterPort | quote }} + {{- end }} + {{- if or .Values.replication.enabled .Values.masterAsStandBy.enabled }} + - name: POSTGRES_REPLICATION_MODE + {{- if .Values.masterAsStandBy.enabled }} + value: "slave" + {{- else }} + value: "master" + {{- end }} + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: {{- omit .Values.metrics.securityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 000000000..fb8c838d2 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/svc-read.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 000000000..5ee051c45 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/scripts/helmcharts/databases/charts/postgresql/templates/svc.yaml b/scripts/helmcharts/databases/charts/postgresql/templates/svc.yaml new file mode 100755 index 000000000..3dbfaa12d --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/scripts/helmcharts/databases/charts/postgresql/values-production.yaml b/scripts/helmcharts/databases/charts/postgresql/values-production.yaml new file mode 100755 index 000000000..3e144c131 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/values-production.yaml @@ -0,0 +1,711 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/scripts/helmcharts/databases/charts/postgresql/values.schema.json b/scripts/helmcharts/databases/charts/postgresql/values.schema.json new file mode 100755 index 000000000..7b5e2efc3 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/scripts/helmcharts/databases/charts/postgresql/values.yaml b/scripts/helmcharts/databases/charts/postgresql/values.yaml new file mode 100755 index 000000000..c97200904 --- /dev/null +++ b/scripts/helmcharts/databases/charts/postgresql/values.yaml @@ -0,0 +1,722 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: asayerPostgres + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] diff --git a/scripts/helmcharts/databases/charts/redis/.helmignore b/scripts/helmcharts/databases/charts/redis/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helmcharts/databases/charts/redis/Chart.lock b/scripts/helmcharts/databases/charts/redis/Chart.lock new file mode 100644 index 000000000..69cc5b92f --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.4.2 +digest: sha256:4e3ec38e0e27e9fc1defb2a13f67a0aa12374bf0b15f06a6c13b1b46df6bffeb +generated: "2021-04-05T11:40:59.141264592Z" diff --git a/scripts/helmcharts/databases/charts/redis/Chart.yaml b/scripts/helmcharts/databases/charts/redis/Chart.yaml new file mode 100644 index 000000000..dd36c43e0 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.12 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.10.1 diff --git a/scripts/helmcharts/databases/charts/redis/README.md b/scripts/helmcharts/databases/charts/redis/README.md new file mode 100644 index 000000000..de0a04e18 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/README.md @@ -0,0 +1,738 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `extraVolumes` | Array of extra volumes to be added to master & slave nodes (evaluated as a template) | `[]` | +| `extraVolumeMounts` | Array of extra volume mounts to be added to master & slave nodes (evaluated as a template) | `[]` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.redisTargetHost` | way to specify an alternative redis hostname, if you set a local endpoint in hostAliases to match specific redis server certificate CN/SAN for example. + | `localhost` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `master.extraVolumes` | Array of extra volumes to be added to master pod (evaluated as a template) | `[]` | +| `master.extraVolumeMounts` | Array of extra volume mounts to be added to master pod (evaluated as a template) | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `"10"` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.extraVolumes` | Array of extra volumes to be added to slave pod (evaluated as a template) | `[]` | +| `slave.extraVolumeMounts` | Array of extra volume mounts to be added to slave pod (evaluated as a template) | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.cleanDelaySeconds` | Delay seconds before issuing the the cleaning in the next node | `5` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.extraVolumes` | Array of extra volumes to be added to sentinel node (evaluated as a template) | `[]` | +| `sentinel.extraVolumeMounts` | Array of extra volume mounts to be added to sentinel node (evaluated as a template) | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sentinel.metrics.enabled` | Start a side-car prometheus sentinel exporter | `false` | +| `sentinel.metrics.image.registry` | Redis Sentinel exporter image registry | `docker.io` | +| `sentinel.metrics.image.repository` | Redis Sentinel exporter image name | `bitnami/redis-sentinel-exporter` | +| `sentinel.metrics.image.tag` | Redis Sentinel exporter image tag | `{TAG_NAME}` | +| `sentinel.metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `sentinel.metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/leominov/redis_sentinel_exporter#configuration) | `{}` | +| `sentinel.metrics.resources` | Exporter resource requests/limit | `{}` | +| `sentinel.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `sentinel.metrics.enabled` to be `true`) | `false` | +| `sentinel.metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `sentinel.metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `sentinel.metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `sentinel.metrics.service.type` | Kubernetes Service type (redis sentinel metrics) | `ClusterIP` | +| `sentinel.metrics.service.port` | Kubernetes service port (redis sentinel metrics) | `9355` | +| `sentinel.metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.metrics.service.annotations` | Annotations for the services to monitor (redis sentinel metrics service) | {} | +| `sentinel.metrics.service.labels` | Additional labels for the Sentinel metrics service | {} | +| `sentinel.metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `sentinel.metrics.priorityClassName` | Sentinel metrics exporter pod priorityClassName | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/bitnami-shell` | +| `sysctlImage.tag` | sysctlImage Init container tag | `"10"` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the certificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/.helmignore b/scripts/helmcharts/databases/charts/redis/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/Chart.yaml b/scripts/helmcharts/databases/charts/redis/charts/common/Chart.yaml new file mode 100644 index 000000000..bcc3808d0 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.4.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.4.2 diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/README.md b/scripts/helmcharts/databases/charts/redis/charts/common/README.md new file mode 100644 index 000000000..559788e39 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/README.md @@ -0,0 +1,322 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..493a6dc7e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..4dde56a38 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,95 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 000000000..60f04fd6e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..622ef50e3 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 000000000..adf2a74f4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..60b84a701 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..ea083a249 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..8679ddffb --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..bb5ed7253 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..7d5ecbccb --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB(R) required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB(R) values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB(R) is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB(R) is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..992bcd390 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..3e2a47c03 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/charts/common/values.yaml b/scripts/helmcharts/databases/charts/redis/charts/common/values.yaml new file mode 100644 index 000000000..9ecdc93f5 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/scripts/helmcharts/databases/charts/redis/ci/default-values.yaml b/scripts/helmcharts/databases/charts/redis/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/scripts/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml b/scripts/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 000000000..71132f76e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/scripts/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml b/scripts/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 000000000..009a3718a --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis(TM) Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/scripts/helmcharts/databases/charts/redis/img/redis-cluster-topology.png b/scripts/helmcharts/databases/charts/redis/img/redis-cluster-topology.png new file mode 100644 index 000000000..f0a02a9f8 Binary files /dev/null and b/scripts/helmcharts/databases/charts/redis/img/redis-cluster-topology.png differ diff --git a/scripts/helmcharts/databases/charts/redis/img/redis-topology.png b/scripts/helmcharts/databases/charts/redis/img/redis-topology.png new file mode 100644 index 000000000..3f5280feb Binary files /dev/null and b/scripts/helmcharts/databases/charts/redis/img/redis-topology.png differ diff --git a/scripts/helmcharts/databases/charts/redis/templates/NOTES.txt b/scripts/helmcharts/databases/charts/redis/templates/NOTES.txt new file mode 100644 index 000000000..5c27951d1 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis(TM) can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash +{{- else }} + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/_helpers.tpl b/scripts/helmcharts/databases/charts/redis/templates/_helpers.tpl new file mode 100644 index 000000000..e76b9ce9d --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/_helpers.tpl @@ -0,0 +1,445 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the sentinel metrics image) +*/}} +{{- define "sentinel.metrics.image" -}} +{{- $registryName := .Values.sentinel.metrics.image.registry -}} +{{- $repositoryName := .Values.sentinel.metrics.image.repository -}} +{{- $tag := .Values.sentinel.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml b/scripts/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 000000000..6d7402721 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,430 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + not_exists_dns_entry() { + myip=$(hostname -i) + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep "^${myip}" )" ]]; then + warn "$HEADLESS_SERVICE does not contain the IP of this pod: ${myip}" + return 1 + fi + info "$HEADLESS_SERVICE has my IP: ${myip}" + return 0 + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + # Waits for DNS to add this ip to the service DNS entry + retry_while not_exists_dns_entry + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libfile.sh + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + not_exists_dns_entry() { + myip=$(hostname -i) + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep "^${myip}" )" ]]; then + warn "$HEADLESS_SERVICE does not contain the IP of this pod: ${myip}" + return 1 + fi + info "$HEADLESS_SERVICE has my IP: ${myip}" + return 0 + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + + # Waits for DNS to add this ip to the service DNS entry + retry_while not_exists_dns_entry + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i)")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + # Clean sentineles from the current sentinel nodes + for node in $( getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i)" | cut -f 1 -d ' ' | uniq ); do + info "Cleaning sentinels in sentinel node: $node" + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $node -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel reset "*" + else + redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $node -p {{ .Values.sentinel.port }} sentinel reset "*" + fi + sleep {{ .Values.sentinel.cleanDelaySeconds }} + done + info "Sentinels clean up done" + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + + REDIS_SERVICE="{{ include "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [[ -n "$REDIS_PASSWORD_FILE" ]]; then + password_aux=$(cat "$REDIS_PASSWORD_FILE") + export REDIS_PASSWORD="$password_aux" + fi + + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a ${REDIS_PASSWORD} {{- end }} -h ${REDIS_SERVICE} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a ${REDIS_PASSWORD} {{- end }} -h ${REDIS_SERVICE} -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + + if [[ "$REDIS_MASTER_HOST" == "$(hostname -i)" ]]; then + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli {{- if .Values.usePassword }} -a "$REDIS_PASSWORD" {{- end }} -h "$REDIS_SERVICE" -p {{ .Values.sentinel.port }} --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel failover mymaster + else + redis-cli {{- if .Values.usePassword }} -a "$REDIS_PASSWORD" {{- end }} -h "$REDIS_SERVICE" -p {{ .Values.sentinel.port }} sentinel failover mymaster + fi + fi +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/configmap.yaml b/scripts/helmcharts/databases/charts/redis/templates/configmap.yaml new file mode 100644 index 000000000..77bdc81e8 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/headless-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/headless-svc.yaml new file mode 100644 index 000000000..d758c0d23 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/health-configmap.yaml b/scripts/helmcharts/databases/charts/redis/templates/health-configmap.yaml new file mode 100644 index 000000000..1bb8e74d9 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/scripts/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml b/scripts/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 000000000..ed53dc6e2 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml b/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml new file mode 100644 index 000000000..43cc2aa3c --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentinel.enabled .Values.sentinel.metrics.enabled .Values.sentinel.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "redis.fullname" . }}-sentinel-metrics + {{- if .Values.sentinel.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.sentinel.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ include "redis.name" . }} + chart: {{ include "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.sentinel.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: sentinelmetrics + {{- if .Values.sentinel.metrics.serviceMonitor.interval }} + interval: {{ .Values.sentinel.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ include "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "sentinel-metrics" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml new file mode 100644 index 000000000..25f3770de --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.sentinel.enabled .Values.sentinel.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "redis.fullname" . }}-sentinel-metrics + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "redis.name" . }} + chart: {{ include "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "sentinel-metrics" + {{- if .Values.sentinel.metrics.service.labels -}} + {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.metrics.service.labels "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.sentinel.metrics.service.annotations }} + annotations: {{- toYaml .Values.sentinel.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.sentinel.metrics.service.type }} + {{- if eq .Values.sentinel.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.sentinel.metrics.service.type "LoadBalancer") .Values.sentinel.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.sentinel.metrics.service.port }} + targetPort: sentinelmetrics + protocol: TCP + name: sentinelmetrics + selector: + app: {{ include "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/metrics-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 000000000..767a464e5 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/networkpolicy.yaml b/scripts/helmcharts/databases/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 000000000..0249bc0e6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/pdb.yaml b/scripts/helmcharts/databases/charts/redis/templates/pdb.yaml new file mode 100644 index 000000000..b9dc54b36 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/prometheusrule.yaml b/scripts/helmcharts/databases/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 000000000..48ae017f6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/psp.yaml b/scripts/helmcharts/databases/charts/redis/templates/psp.yaml new file mode 100644 index 000000000..eca04c134 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 000000000..80dc112a5 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,394 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 000000000..8bd2f8c1c --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 000000000..9ede64175 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,556 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.metrics.enabled }} + - name: sentinel-metrics + image: {{ include "sentinel.metrics.image" . }} + imagePullPolicy: {{ .Values.sentinel.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - redis_sentinel_exporter{{- range $key, $value := .Values.sentinel.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + {{- if and .Values.sentinel.usePassword (and .Values.usePassword (not .Values.usePasswordFile)) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "redis.secretName" . }} + key: {{ include "redis.secretPasswordKey" . }} + {{- end }} + {{- if and .Values.sentinel.usePassword .Values.usePassword .Values.usePasswordFile }} + - name: SENTINEL_PASSWORD_FILE + value: /secrets/redis-password + {{- end }} + volumeMounts: + {{- if and .Values.sentinel.usePassword .Values.usePassword .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: sentinelmetrics + containerPort: 9355 + resources: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.resources "context" $) | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-role.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-role.yaml new file mode 100644 index 000000000..080a7f960 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 000000000..835aa0361 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 000000000..081691de6 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 000000000..778cac5cf --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,398 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.slave.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.slave.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.slave.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 000000000..a67ebb05e --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml b/scripts/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 000000000..e1c9073a4 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: tcp-redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helmcharts/databases/charts/redis/templates/secret.yaml b/scripts/helmcharts/databases/charts/redis/templates/secret.yaml new file mode 100644 index 000000000..197aa1890 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/scripts/helmcharts/databases/charts/redis/values.schema.json b/scripts/helmcharts/databases/charts/redis/values.schema.json new file mode 100644 index 000000000..3188d0c93 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/scripts/helmcharts/databases/charts/redis/values.yaml b/scripts/helmcharts/databases/charts/redis/values.yaml new file mode 100644 index 000000000..64e908211 --- /dev/null +++ b/scripts/helmcharts/databases/charts/redis/values.yaml @@ -0,0 +1,1008 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.12-debian-10-r33 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.12-debian-10-r24 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 20000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + + ## Delay seconds when cleaning nodes IPs + ## When starting it will clean the sentinels IP (RESET "*") in all the nodes + ## This is the delay time before sending the command to the next node + ## + cleanDelaySeconds: 5 + + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + + ## Prometheus Exporter / Metrics for Redis Sentinel Exporter + ## + metrics: + enabled: false + + ## Bitnami Redis Sentinel Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel-exporter + tag: 1.7.1-debian-10-r105 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Sentinel metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## e.g: + ## limits: + ## cpu: 500m + ## memory: 1Gi + ## + limits: {} + requests: {} + + ## Extra arguments for Sentinel metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Enable this if you're using https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + service: + type: ClusterIP + + ## Metrics port + ## + port: 9355 + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank + ## + # loadBalancerIP: + annotations: {} + labels: {} + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis(TM) Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: null + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + priorityClassName: null + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.20.0-debian-10-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + # A way to specify an alternative redis hostname, if you set a local endpoint in hostAliases for example + # Useful for certificate CN/SAN matching + redisTargetHost: "localhost" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + priorityClassName: null + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/scripts/helmcharts/databases/templates/NOTES.txt b/scripts/helmcharts/databases/templates/NOTES.txt new file mode 100644 index 000000000..0a725d663 --- /dev/null +++ b/scripts/helmcharts/databases/templates/NOTES.txt @@ -0,0 +1,5 @@ +To get minio credentials run: + + echo "AccessKey: `kubectl get secret --namespace {{ .Release.Namespace }} minio -o jsonpath="{.data.access-key}" | base64 --decode`" + echo "SecretKey: `kubectl get secret --namespace {{ .Release.Namespace }} minio -o jsonpath="{.data.secret-key}" | base64 --decode`" + diff --git a/scripts/helmcharts/databases/templates/_helpers.tpl b/scripts/helmcharts/databases/templates/_helpers.tpl new file mode 100644 index 000000000..066c03a17 --- /dev/null +++ b/scripts/helmcharts/databases/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "databases.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "databases.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "databases.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "databases.labels" -}} +helm.sh/chart: {{ include "databases.chart" . }} +{{ include "databases.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "databases.selectorLabels" -}} +app.kubernetes.io/name: {{ include "databases.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "databases.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "databases.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/databases/values.yaml b/scripts/helmcharts/databases/values.yaml new file mode 100644 index 000000000..6c7f28960 --- /dev/null +++ b/scripts/helmcharts/databases/values.yaml @@ -0,0 +1,142 @@ +# Default values for databases. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + + +## Child charts +redis: + enabled: true + fullnameOverride: redis + usePassword: false + cluster: + enabled: false + redis: + resources: + limits: + cpu: 250m + memory: 2Gi + requests: + cpu: 100m + memory: 128Mi + +postgresql: + # postgresqlPassword: asayerPostgres + fullnameOverride: postgresql + image: + tag: 13.5.0-debian-10-r62 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 250m + memory: 256Mi + +minio: + # global: + # minio: + # accessKey: "{{ minio_access_key }}" + # secretKey: "{{ minio_secret_key }}" + fullnameOverride: minio + resources: + limits: + cpu: 256m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +kafka: + fullnameOverride: kafka + enabled: false + + +# Enterprise dbs +clickhouse: + image: + tag: "21.9.4.35" + enabled: false + +postgreql: + enabled: true + diff --git a/scripts/helmcharts/init.sh b/scripts/helmcharts/init.sh new file mode 100644 index 000000000..2529254b0 --- /dev/null +++ b/scripts/helmcharts/init.sh @@ -0,0 +1,89 @@ +#/bin/bash + +# --- helper functions for logs --- +info() +{ + echo '[INFO] ' "$@" +} +warn() +{ + echo '[WARN] ' "$@" >&2 +} +fatal() +{ + echo '[ERROR] ' "$@" >&2 + exit 1 +} + +version="v1.4.0" +usr=`whoami` + +# Installing k3s +curl -sL https://get.k3s.io | sudo K3S_KUBECONFIG_MODE="644" INSTALL_K3S_VERSION='v1.19.5+k3s2' INSTALL_K3S_EXEC="--no-deploy=traefik" sh - +mkdir ~/.kube +sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config +chmod 0644 ~/.kube/config +sudo chown -R $usr ~/.kube/config + + +## installing kubectl +which kubectl &> /dev/null || { + info "kubectl not installed. Installing it..." + sudo curl -SsL https://dl.k8s.io/release/v1.20.0/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl ; sudo chmod +x /usr/local/bin/kubectl +} + +## installing stern +which stern &> /dev/null || { + info "stern not installed. installing..." + sudo curl -SsL https://github.com/derdanne/stern/releases/download/2.1.16/stern_linux_amd64 -o /usr/local/bin/stern ; sudo chmod +x /usr/local/bin/stern +} + +## installing k9s +which k9s &> /dev/null || { + info "k9s not installed. Installing it..." + sudo curl -SsL https://github.com/derailed/k9s/releases/download/v0.24.2/k9s_Linux_x86_64.tar.gz -o /tmp/k9s.tar.gz + cd /tmp + tar -xf k9s.tar.gz + sudo mv k9s /usr/local/bin/k9s + sudo chmod +x /usr/local/bin/k9s + cd - +} + +## installing helm +which helm &> /dev/null +if [[ $? -ne 0 ]]; then + info "helm not installed. Installing it..." + curl -ssl https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz -o /tmp/helm.tar.gz + tar -xf /tmp/helm.tar.gz + chmod +x linux-amd64/helm + sudo cp linux-amd64/helm /usr/local/bin/helm + rm -rf linux-amd64/helm /tmp/helm.tar.gz +fi + +## Installing openssl +sudo apt update &> /dev/null +sudo apt install openssl -y &> /dev/null + +randomPass() { + openssl rand -hex 10 +} + +## Prepping the infra + +[[ -z $DOMAIN_NAME ]] && { +fatal 'DOMAIN_NAME variable is empty. Rerun the script `DOMAIN_NAME=openreplay.mycomp.org bash init.sh `' +} + +info "Creating dynamic passwords" +sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"$(randomPass)\"/g" vars.yaml +sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"$(randomPass)\"/g" vars.yaml +sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"$(randomPass)\"/g" vars.yaml +sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"$(randomPass)\"/g" vars.yaml +sed -i "s/domainName: \"\"/domainName: \"${DOMAIN_NAME}\"/g" vars.yaml + + +## Installing OpenReplay +info "Installing databases" +helm upgrade --install databases ./databases -n db --create-namespace --wait -f ./vars.yaml --atomic +info "Installing application" +helm upgrade --install openreplay ./openreplay -n app --create-namespace --wait -f ./vars.yaml --atomic diff --git a/scripts/helmcharts/openreplay-cli b/scripts/helmcharts/openreplay-cli new file mode 120000 index 000000000..d299dbb4f --- /dev/null +++ b/scripts/helmcharts/openreplay-cli @@ -0,0 +1 @@ +../helm/openreplay-cli \ No newline at end of file diff --git a/scripts/helmcharts/openreplay/.helmignore b/scripts/helmcharts/openreplay/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/Chart.yaml b/scripts/helmcharts/openreplay/Chart.yaml new file mode 100644 index 000000000..d7bfd0074 --- /dev/null +++ b/scripts/helmcharts/openreplay/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: openreplay +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +# Ref: https://github.com/helm/helm/issues/7858#issuecomment-608114589 +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/alerts/.helmignore b/scripts/helmcharts/openreplay/charts/alerts/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/alerts/Chart.yaml b/scripts/helmcharts/openreplay/charts/alerts/Chart.yaml new file mode 100644 index 000000000..4cda7945b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: alerts +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/alerts/templates/NOTES.txt new file mode 100644 index 000000000..4aded5587 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alerts.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alerts.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alerts.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alerts.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl new file mode 100644 index 000000000..35ad32196 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "alerts.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "alerts.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "alerts.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "alerts.labels" -}} +helm.sh/chart: {{ include "alerts.chart" . }} +{{ include "alerts.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "alerts.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alerts.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "alerts.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "alerts.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml new file mode 100644 index 000000000..f88266e66 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/deployment.yaml @@ -0,0 +1,106 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "alerts.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "alerts.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "alerts.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: version_number + value: '{{ .Chart.AppVersion }}' + - name: pg_host + value: '{{ .Values.global.postgresql.postgresqlHost }}' + - name: pg_port + value: "5432" + - name: pg_dbname + value: "{{ .Values.global.postgresql.postgresqlDatabase }}" + - name: pg_user + value: '{{ .Values.global.postgresql.postgresqlUser }}' + - name: pg_password + value: '{{ .Values.global.postgresql.postgresqlPassword }}' + - name: SITE_URL + value: 'https://{{ .Values.global.domainName }}' + - name: S3_HOST + {{- if eq .Values.global.s3.endpoint "http://minio.db.svc.cluster.local:9000" }} + value: 'https://{{ .Values.global.domainName }}' + {{- else }} + value: '{{ .Values.global.s3.endpoint }}' + {{- end }} + - name: S3_KEY + value: {{ .Values.global.s3.accessKey }} + - name: S3_SECRET + value: {{ .Values.global.s3.secretKey }} + - name: AWS_DEFAULT_REGION + value: '{{ .Values.global.s3.region }}' + - name: EMAIL_HOST + value: '{{ .Values.global.email.emailHost }}' + - name: EMAIL_PORT + value: '{{ .Values.global.email.emailPort }}' + - name: EMAIL_USER + value: '{{ .Values.global.email.emailUser }}' + - name: EMAIL_PASSWORD + value: '{{ .Values.global.email.emailPassword }}' + - name: EMAIL_USE_TLS + value: '{{ .Values.global.email.emailUseTls }}' + - name: EMAIL_USE_SSL + value: '{{ .Values.global.email.emailUseSsl }}' + - name: EMAIL_SSL_KEY + value: '{{ .Values.global.email.emailSslKey }}' + - name: EMAIL_SSL_CERT + value: '{{ .Values.global.email.emailSslCert }}' + - name: EMAIL_FROM + value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end }} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/hpa.yaml new file mode 100644 index 000000000..b25fef03d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "alerts.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/ingress.yaml new file mode 100644 index 000000000..497e2ec3c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "alerts.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "alerts.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/service.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/service.yaml new file mode 100644 index 000000000..a002cfd1b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "alerts.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml new file mode 100644 index 000000000..044787236 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "alerts.serviceAccountName" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml new file mode 100644 index 000000000..05b0f0496 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "alerts.fullname" . }}-test-connection" + labels: + {{- include "alerts.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "alerts.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/alerts/values.yaml b/scripts/helmcharts/openreplay/charts/alerts/values.yaml new file mode 100644 index 000000000..294434fd8 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/alerts/values.yaml @@ -0,0 +1,87 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/alerts + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "alerts" +fullnameOverride: "alerts" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + PYTHONUNBUFFERED: '0' + + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/assets/.helmignore b/scripts/helmcharts/openreplay/charts/assets/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/assets/Chart.yaml b/scripts/helmcharts/openreplay/charts/assets/Chart.yaml new file mode 100644 index 000000000..fe932b71c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: assets +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/assets/templates/NOTES.txt new file mode 100644 index 000000000..1758d5c64 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "assets.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "assets.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "assets.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "assets.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/assets/templates/_helpers.tpl new file mode 100644 index 000000000..6d684eed5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "assets.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "assets.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "assets.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "assets.labels" -}} +helm.sh/chart: {{ include "assets.chart" . }} +{{ include "assets.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "assets.selectorLabels" -}} +app.kubernetes.io/name: {{ include "assets.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "assets.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "assets.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml new file mode 100644 index 000000000..1af5164ca --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/deployment.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "assets.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "assets.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "assets.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.global.s3.accessKey }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ .Values.global.s3.secretKey }} + - name: S3_BUCKET_ASSETS + value: {{ .Values.global.s3.assetsBucket }} + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + # Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter + # We need https://bucketname.s3endpoint + - name: ASSETS_ORIGIN + value: {{ (split "://" .Values.global.s3.endpoint)._0 }}://{{.Values.global.s3.assetsBucket}}.{{ (split "://" .Values.global.s3.endpoint)._1 }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/hpa.yaml new file mode 100644 index 000000000..7b271a20e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "assets.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/ingress.yaml new file mode 100644 index 000000000..62c421a9a --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "assets.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "assets.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/service.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/service.yaml new file mode 100644 index 000000000..5e613c67e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "assets.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml new file mode 100644 index 000000000..90c3f5319 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "assets.serviceAccountName" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml new file mode 100644 index 000000000..ce67efb1c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "assets.fullname" . }}-test-connection" + labels: + {{- include "assets.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "assets.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/assets/values.yaml b/scripts/helmcharts/openreplay/charts/assets/values.yaml new file mode 100644 index 000000000..875d0450a --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/assets/values.yaml @@ -0,0 +1,84 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/assets + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "assets" +fullnameOverride: "assets" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/chalice/.helmignore b/scripts/helmcharts/openreplay/charts/chalice/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/chalice/Chart.yaml b/scripts/helmcharts/openreplay/charts/chalice/Chart.yaml new file mode 100644 index 000000000..42ec55dec --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: chalice +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/chalice/templates/NOTES.txt new file mode 100644 index 000000000..ecc5a589c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "chalice.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "chalice.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "chalice.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "chalice.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl new file mode 100644 index 000000000..27c1ff9f3 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "chalice.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chalice.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chalice.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chalice.labels" -}} +helm.sh/chart: {{ include "chalice.chart" . }} +{{ include "chalice.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chalice.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chalice.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "chalice.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chalice.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml new file mode 100644 index 000000000..f3eb46d1d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -0,0 +1,116 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "chalice.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "chalice.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "chalice.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: version_number + value: '{{ .Chart.AppVersion }}' + - name: pg_host + value: '{{ .Values.global.postgresql.postgresqlHost }}' + - name: pg_port + value: "5432" + - name: pg_dbname + value: "{{ .Values.global.postgresql.postgresqlDatabase }}" + - name: pg_user + value: '{{ .Values.global.postgresql.postgresqlUser }}' + - name: pg_password + value: '{{ .Values.global.postgresql.postgresqlPassword }}' + - name: SITE_URL + value: 'https://{{ .Values.global.domainName }}' + - name: S3_HOST + {{- if eq .Values.global.s3.endpoint "http://minio.db.svc.cluster.local:9000" }} + value: 'https://{{ .Values.global.domainName }}' + {{- else}} + value: '{{ .Values.global.s3.endpoint }}' + {{- end}} + - name: S3_KEY + value: {{ .Values.global.s3.accessKey }} + - name: S3_SECRET + value: {{ .Values.global.s3.secretKey }} + - name: AWS_DEFAULT_REGION + value: '{{ .Values.global.s3.region }}' + - name: sessions_region + value: '{{ .Values.global.s3.region }}' + - name: sessions_bucket + value: {{ .Values.global.s3.recordingsBucket }} + - name: sourcemaps_bucket + value: {{ .Values.global.s3.sourcemapsBucket }} + - name: js_cache_bucket + value: {{ .Values.global.s3.assetsBucket }} + - name: EMAIL_HOST + value: '{{ .Values.global.email.emailHost }}' + - name: EMAIL_PORT + value: '{{ .Values.global.email.emailPort }}' + - name: EMAIL_USER + value: '{{ .Values.global.email.emailUser }}' + - name: EMAIL_PASSWORD + value: '{{ .Values.global.email.emailPassword }}' + - name: EMAIL_USE_TLS + value: '{{ .Values.global.email.emailUseTls }}' + - name: EMAIL_USE_SSL + value: '{{ .Values.global.email.emailUseSsl }}' + - name: EMAIL_SSL_KEY + value: '{{ .Values.global.email.emailSslKey }}' + - name: EMAIL_SSL_CERT + value: '{{ .Values.global.email.emailSslCert }}' + - name: EMAIL_FROM + value: '{{ .Values.global.email.emailFrom }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/hpa.yaml new file mode 100644 index 000000000..dc1cb12c1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "chalice.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/ingress.yaml new file mode 100644 index 000000000..8f680e949 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "chalice.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "chalice.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/service.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/service.yaml new file mode 100644 index 000000000..02b01d1ab --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "chalice.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml new file mode 100644 index 000000000..9af2e3ac4 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chalice.serviceAccountName" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml new file mode 100644 index 000000000..4e76f6cd3 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "chalice.fullname" . }}-test-connection" + labels: + {{- include "chalice.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "chalice.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/chalice/values.yaml b/scripts/helmcharts/openreplay/charts/chalice/values.yaml new file mode 100644 index 000000000..5e76420e8 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/chalice/values.yaml @@ -0,0 +1,106 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/chalice + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "chalice" +fullnameOverride: "chalice" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 8000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + ch_host: clickhouse.db.svc.cluster.local + ch_port: 9000 + captcha_server: '' + captcha_key: '' + async_Token: '' + announcement_url: '' + jwt_secret: "SetARandomStringHere" + jwt_algorithm: HS512 + jwt_exp_delta_seconds: '2592000' + # Enable logging for python app + # Ref: https://stackoverflow.com/questions/43969743/logs-in-kubernetes-pod-not-showing-up + PYTHONUNBUFFERED: '0' + SAML2_MD_URL: '' + idp_entityId: '' + idp_sso_url: '' + idp_x509cert: '' + idp_sls_url: '' + idp_name: '' + idp_tenantKey: '' + assist_secret: '' + iceServers: '' + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/db/.helmignore b/scripts/helmcharts/openreplay/charts/db/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/db/Chart.yaml b/scripts/helmcharts/openreplay/charts/db/Chart.yaml new file mode 100644 index 000000000..af2db8c06 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: db +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/db/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/db/templates/NOTES.txt new file mode 100644 index 000000000..b066541cf --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "db.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "db.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "db.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "db.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/db/templates/_helpers.tpl new file mode 100644 index 000000000..8df84e9e2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "db.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "db.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "db.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "db.labels" -}} +helm.sh/chart: {{ include "db.chart" . }} +{{ include "db.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "db.selectorLabels" -}} +app.kubernetes.io/name: {{ include "db.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "db.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "db.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml new file mode 100644 index 000000000..fd879d92f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "db.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "db.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "db.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/db/templates/hpa.yaml new file mode 100644 index 000000000..8dbac030f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "db.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/db/templates/ingress.yaml new file mode 100644 index 000000000..1cbf3640e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "db.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "db.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/service.yaml b/scripts/helmcharts/openreplay/charts/db/templates/service.yaml new file mode 100644 index 000000000..e0fcac464 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "db.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml new file mode 100644 index 000000000..80decdb84 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "db.serviceAccountName" . }} + labels: + {{- include "db.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml new file mode 100644 index 000000000..9dd1bdac9 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "db.fullname" . }}-test-connection" + labels: + {{- include "db.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "db.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/db/values.yaml b/scripts/helmcharts/openreplay/charts/db/values.yaml new file mode 100644 index 000000000..70c181ce2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/db/values.yaml @@ -0,0 +1,86 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/db + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "db" +fullnameOverride: "db" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + CLICKHOUSE_STRING: tcp://clickhouse.db.svc.cluster.local:9000/default + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/ender/.helmignore b/scripts/helmcharts/openreplay/charts/ender/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/ender/Chart.yaml b/scripts/helmcharts/openreplay/charts/ender/Chart.yaml new file mode 100644 index 000000000..92e286a7d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: ender +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/ender/templates/NOTES.txt new file mode 100644 index 000000000..13aaa8365 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ender.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "ender.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ender.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ender.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/ender/templates/_helpers.tpl new file mode 100644 index 000000000..1d0dc291c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ender.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ender.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ender.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ender.labels" -}} +helm.sh/chart: {{ include "ender.chart" . }} +{{ include "ender.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ender.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ender.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ender.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ender.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml new file mode 100644 index 000000000..bbdb35c00 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "ender.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ender.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "ender.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/hpa.yaml new file mode 100644 index 000000000..62138535e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ender.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/ingress.yaml new file mode 100644 index 000000000..7f6cc557b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "ender.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "ender.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/service.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/service.yaml new file mode 100644 index 000000000..be0b79e9e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "ender.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml new file mode 100644 index 000000000..f85c1d07b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ender.serviceAccountName" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml new file mode 100644 index 000000000..8fa7e3b55 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "ender.fullname" . }}-test-connection" + labels: + {{- include "ender.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "ender.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/ender/values.yaml b/scripts/helmcharts/openreplay/charts/ender/values.yaml new file mode 100644 index 000000000..d5a7c52b1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/ender/values.yaml @@ -0,0 +1,85 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/ender + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "ender" +fullnameOverride: "ender" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/http/.helmignore b/scripts/helmcharts/openreplay/charts/http/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/http/Chart.yaml b/scripts/helmcharts/openreplay/charts/http/Chart.yaml new file mode 100644 index 000000000..91b9ce66f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: http +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/http/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/http/templates/NOTES.txt new file mode 100644 index 000000000..f3b24def1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "http.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "http.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "http.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "http.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/http/templates/_helpers.tpl new file mode 100644 index 000000000..c695edd7e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "http.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "http.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "http.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "http.labels" -}} +helm.sh/chart: {{ include "http.chart" . }} +{{ include "http.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "http.selectorLabels" -}} +app.kubernetes.io/name: {{ include "http.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "http.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "http.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml new file mode 100644 index 000000000..d728b7d02 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/deployment.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "http.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "http.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "http.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.global.s3.accessKey }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ .Values.global.s3.secretKey }} + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + - name: ASSETS_ORIGIN + value: {{ (split "://" .Values.global.s3.endpoint)._0 }}://{{.Values.global.s3.assetsBucket}}.{{ (split "://" .Values.global.s3.endpoint)._1 }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/http/templates/hpa.yaml new file mode 100644 index 000000000..8e88b2807 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "http.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/http/templates/ingress.yaml new file mode 100644 index 000000000..51a20e2a6 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "http.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "http.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/service.yaml b/scripts/helmcharts/openreplay/charts/http/templates/service.yaml new file mode 100644 index 000000000..f33db7394 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "http.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml new file mode 100644 index 000000000..7c55e5954 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "http.serviceAccountName" . }} + labels: + {{- include "http.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml new file mode 100644 index 000000000..0f29e5a06 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "http.fullname" . }}-test-connection" + labels: + {{- include "http.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "http.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/http/values.yaml b/scripts/helmcharts/openreplay/charts/http/values.yaml new file mode 100644 index 000000000..e841b4cac --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/http/values.yaml @@ -0,0 +1,89 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/http + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "http" +fullnameOverride: "http" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + TOKEN_SECRET: secret_token_string # TODO: generate on buld + S3_BUCKET_IOS_IMAGES: sessions-mobile-assets + CACHE_ASSETS: false + HTTP_PORT: 80 + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/integrations/.helmignore b/scripts/helmcharts/openreplay/charts/integrations/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/integrations/Chart.yaml b/scripts/helmcharts/openreplay/charts/integrations/Chart.yaml new file mode 100644 index 000000000..a643daa69 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: integrations +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/integrations/templates/NOTES.txt new file mode 100644 index 000000000..b7af1aaf2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "integrations.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "integrations.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "integrations.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "integrations.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl new file mode 100644 index 000000000..cdeef0f54 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "integrations.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "integrations.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "integrations.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "integrations.labels" -}} +helm.sh/chart: {{ include "integrations.chart" . }} +{{ include "integrations.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "integrations.selectorLabels" -}} +app.kubernetes.io/name: {{ include "integrations.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "integrations.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "integrations.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml new file mode 100644 index 000000000..942f8c0a5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "integrations.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "integrations.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "integrations.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/hpa.yaml new file mode 100644 index 000000000..cb9cf17db --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "integrations.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/ingress.yaml new file mode 100644 index 000000000..236409677 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "integrations.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "integrations.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/service.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/service.yaml new file mode 100644 index 000000000..77f0bb3c7 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "integrations.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml new file mode 100644 index 000000000..0bb50f48d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "integrations.serviceAccountName" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml new file mode 100644 index 000000000..ad5e040b5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "integrations.fullname" . }}-test-connection" + labels: + {{- include "integrations.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "integrations.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/integrations/values.yaml b/scripts/helmcharts/openreplay/charts/integrations/values.yaml new file mode 100644 index 000000000..e35b79308 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/integrations/values.yaml @@ -0,0 +1,86 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/integrations + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "integrations" +fullnameOverride: "integrations" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + TOKEN_SECRET: secret_token_string # TODO: generate on buld + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/.helmignore b/scripts/helmcharts/openreplay/charts/nginx-ingress/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml new file mode 100644 index 000000000..3c824500e --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: nginx-ingress +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.crt b/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.crt new file mode 120000 index 000000000..12e23824a --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.crt @@ -0,0 +1 @@ +../../../files/site.crt \ No newline at end of file diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.key b/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.key new file mode 120000 index 000000000..3805a27d1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/files/site.key @@ -0,0 +1 @@ +../../../files/site.key \ No newline at end of file diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt new file mode 100644 index 000000000..8125afe9c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-ingress.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-ingress.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-ingress.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nginx-ingress.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl new file mode 100644 index 000000000..b0f2808f1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "nginx-ingress.labels" -}} +helm.sh/chart: {{ include "nginx-ingress.chart" . }} +{{ include "nginx-ingress.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "nginx-ingress.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nginx-ingress.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml new file mode 100644 index 000000000..05e9a303d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml @@ -0,0 +1,160 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx + namespace: {{ .Release.Namespace }} +data: + location.list: |- + location ~* /general_stats { + deny all; + } + location /healthz { + return 200 'OK'; + } + location ~ ^/(mobs|sessions-assets|frontend|static|sourcemaps|ios-images)/ { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + + proxy_connect_timeout 300; + # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://minio.db.svc.cluster.local:9000; + } + + location /minio/ { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://minio.db.svc.cluster.local:9000; + } + location /ingest/ { + rewrite ^/ingest/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-For $real_ip; + proxy_set_header X-Forwarded-Host $real_ip; + proxy_set_header X-Real-IP $real_ip; + proxy_set_header Host $host; + proxy_pass http://http-openreplay.app.svc.cluster.local; + proxy_read_timeout 300; + proxy_connect_timeout 120; + proxy_send_timeout 300; + } + location /grafana { + set $target http://monitoring-grafana.monitoring.svc.cluster.local; + rewrite ^/grafana/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass $target; + } + location /api/ { + rewrite ^/api/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $origin_proto; + proxy_pass http://chalice-openreplay.app.svc.cluster.local:8000; + } + location /assist/ { + rewrite ^/assist/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://utilities-openreplay.app.svc.cluster.local:9000; + } + location /assets/ { + rewrite ^/assets/(.*) /sessions-assets/$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://minio.db.svc.cluster.local:9000; + } + location / { + index /index.html; + rewrite ^((?!.(js|css|png|svg|jpg|woff|woff2)).)*$ /frontend/index.html break; + proxy_http_version 1.1; + proxy_set_header Connection ""; + include /etc/nginx/conf.d/compression.conf; + proxy_set_header Host $http_host; + proxy_pass http://minio.db.svc.cluster.local:9000/frontend/; + proxy_intercept_errors on; # see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors + error_page 404 =200 /index.html; + } + compression.conf: |- + # Compression + gzip on; + gzip_comp_level 5; + gzip_min_length 256; # 256Bytes + gzip_proxied any; + gzip_vary on; + # Content types for compression + gzip_types + application/atom+xml + application/javascript + application/json + application/ld+json + application/manifest+json + application/rss+xml + application/vnd.geo+json + application/vnd.ms-fontobject + application/x-font-ttf + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/bmp + image/svg+xml + image/x-icon + text/cache-manifest + text/css + text/plain + ; + + sites.conf: |- + # Ref: https://github.com/openresty/openresty/#resolvconf-parsing + resolver local=on; + # Need real ip address for flags in replay. + # Some LBs will forward real ips as x-forwarded-for + # So making that as priority + map $http_x_forwarded_for $real_ip { + ~^(\d+\.\d+\.\d+\.\d+) $1; + default $remote_addr; + } + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + map $http_x_forwarded_proto $origin_proto { + default $http_x_forwarded_proto; + '' $scheme; + } + server { + listen 80 default_server; + listen [::]:80 default_server; + # server_name _; + {{ .Values.customServerConfigs }} + include /etc/nginx/conf.d/location.list; + client_max_body_size 10M; + } + server { + listen 443 ssl; + ssl_certificate /etc/secrets/site.crt; + ssl_certificate_key /etc/secrets/site.key; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers "EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA HIGH !RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS"; + include /etc/nginx/conf.d/location.list; + client_max_body_size 10M; + } + diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml new file mode 100644 index 000000000..7a64f5886 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "nginx-ingress.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + nginxRolloutID: {{ randAlphaNum 5 | quote }} # Restart nginx after every deployment + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "nginx-ingress.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: https + containerPort: 443 + protocol: TCP + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: http + readinessProbe: + httpGet: + path: /healthz + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: nginx + mountPath: /etc/nginx/conf.d/ + - name: ssl + mountPath: /etc/secrets/ + volumes: + - name: nginx + configMap: + name: nginx + - name: ssl + secret: + secretName: ssl + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml new file mode 100644 index 000000000..348f8f95b --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "nginx-ingress.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml new file mode 100644 index 000000000..63cfce077 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "nginx-ingress.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml new file mode 100644 index 000000000..91b7cc09c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssl +data: + ca.crt: '' + site.crt: '{{ .Files.Get "files/site.crt" | b64enc }}' + site.key: '{{ .Files.Get "files/site.key" | b64enc }}' diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml new file mode 100644 index 000000000..35d6d969c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml @@ -0,0 +1,21 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + {{- if or (eq .Values.service.type "LoadBalancer") (eq .Values.service.type "NodePort")}} + # Make sure to get client ip + externalTrafficPolicy: Local + {{- end}} + ports: + {{- range .Values.service.ports }} + - port: {{ .port }} + targetPort: {{ .targetPort }} + protocol: TCP + name: {{ .targetPort }} + {{- end }} + selector: + {{- include "nginx-ingress.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml new file mode 100644 index 000000000..bc0091029 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "nginx-ingress.serviceAccountName" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml new file mode 100644 index 000000000..074cec518 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "nginx-ingress.fullname" . }}-test-connection" + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "nginx-ingress.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/nginx-ingress/values.yaml b/scripts/helmcharts/openreplay/charts/nginx-ingress/values.yaml new file mode 100644 index 000000000..821ad9e3c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/nginx-ingress/values.yaml @@ -0,0 +1,86 @@ +# Default values for nginx-ingress. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: LoadBalancer + ports: + - port: 80 + targetPort: http + - port: 443 + targetPort: https + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/sink/.helmignore b/scripts/helmcharts/openreplay/charts/sink/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/sink/Chart.yaml b/scripts/helmcharts/openreplay/charts/sink/Chart.yaml new file mode 100644 index 000000000..2b9f71d56 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: sink +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/sink/templates/NOTES.txt new file mode 100644 index 000000000..e49e60d4c --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sink.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "sink.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sink.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sink.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/sink/templates/_helpers.tpl new file mode 100644 index 000000000..39f92ccc1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "sink.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sink.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "sink.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "sink.labels" -}} +helm.sh/chart: {{ include "sink.chart" . }} +{{ include "sink.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "sink.selectorLabels" -}} +app.kubernetes.io/name: {{ include "sink.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "sink.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "sink.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml new file mode 100644 index 000000000..ced1ee0c5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "sink.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "sink.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "sink.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- if eq .Values.pvc.name "hostPath" }} + volumes: + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + - name: datadir + persistentVolumeClaim: + claimName: {{ .Values.pvc.name }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/hpa.yaml new file mode 100644 index 000000000..8f9a98f79 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "sink.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/ingress.yaml new file mode 100644 index 000000000..ac5b25ba2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "sink.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "sink.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/service.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/service.yaml new file mode 100644 index 000000000..d2c0870c3 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "sink.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml new file mode 100644 index 000000000..34986e78d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "sink.serviceAccountName" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml new file mode 100644 index 000000000..248381268 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "sink.fullname" . }}-test-connection" + labels: + {{- include "sink.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "sink.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/sink/values.yaml b/scripts/helmcharts/openreplay/charts/sink/values.yaml new file mode 100644 index 000000000..9d55ee370 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/sink/values.yaml @@ -0,0 +1,92 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/sink + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "sink" +fullnameOverride: "sink" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: hostPath + hostMountPath: /openreplay/storage/nfs + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/storage/.helmignore b/scripts/helmcharts/openreplay/charts/storage/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/storage/Chart.yaml b/scripts/helmcharts/openreplay/charts/storage/Chart.yaml new file mode 100644 index 000000000..329af9ad0 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: storage +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/storage/templates/NOTES.txt new file mode 100644 index 000000000..217426ab3 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "storage.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "storage.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "storage.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "storage.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/storage/templates/_helpers.tpl new file mode 100644 index 000000000..9f87a2965 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "storage.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "storage.labels" -}} +helm.sh/chart: {{ include "storage.chart" . }} +{{ include "storage.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "storage.selectorLabels" -}} +app.kubernetes.io/name: {{ include "storage.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "storage.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "storage.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml new file mode 100644 index 000000000..0004bfc7f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/deployment.yaml @@ -0,0 +1,102 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "storage.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "storage.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "storage.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: {{ .Values.global.s3.accessKey }} + - name: AWS_SECRET_ACCESS_KEY + value: {{ .Values.global.s3.secretKey }} + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION_WEB + value: '{{ .Values.global.s3.region }}' + - name: AWS_REGION_IOS + value: '{{ .Values.global.s3.region }}' + - name: S3_BUCKET_WEB + value: {{ .Values.global.s3.recordingsBucket }} + - name: S3_BUCKET_IOS + value: {{ .Values.global.s3.recordingsBucket }} + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- if eq .Values.pvc.name "hostPath" }} + volumes: + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + - name: datadir + persistentVolumeClaim: + claimName: {{ .Values.pvc.name }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/hpa.yaml new file mode 100644 index 000000000..d015e4bb2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "storage.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/ingress.yaml new file mode 100644 index 000000000..10bf66e98 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "storage.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "storage.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/service.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/service.yaml new file mode 100644 index 000000000..f55083c92 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "storage.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml new file mode 100644 index 000000000..a361acc50 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "storage.serviceAccountName" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml new file mode 100644 index 000000000..84b0a6786 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "storage.fullname" . }}-test-connection" + labels: + {{- include "storage.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "storage.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/storage/values.yaml b/scripts/helmcharts/openreplay/charts/storage/values.yaml new file mode 100644 index 000000000..9f1dbfd91 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/storage/values.yaml @@ -0,0 +1,93 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/storage + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "storage" +fullnameOverride: "storage" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + FS_CLEAN_HRS: 24 + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: hostPath + hostMountPath: /openreplay/storage/nfs + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/charts/utilities/.helmignore b/scripts/helmcharts/openreplay/charts/utilities/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helmcharts/openreplay/charts/utilities/Chart.yaml b/scripts/helmcharts/openreplay/charts/utilities/Chart.yaml new file mode 100644 index 000000000..4c00e90b2 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: utilities +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/NOTES.txt b/scripts/helmcharts/openreplay/charts/utilities/templates/NOTES.txt new file mode 100644 index 000000000..323bc20c1 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "utilities.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "utilities.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "utilities.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "utilities.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl b/scripts/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl new file mode 100644 index 000000000..8999db4be --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "utilities.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "utilities.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "utilities.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "utilities.labels" -}} +helm.sh/chart: {{ include "utilities.chart" . }} +{{ include "utilities.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "utilities.selectorLabels" -}} +app.kubernetes.io/name: {{ include "utilities.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "utilities.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "utilities.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/deployment.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/deployment.yaml new file mode 100644 index 000000000..a1b005253 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/deployment.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "utilities.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "utilities.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "utilities.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_DEFAULT_REGION + value: "{{ .Values.global.s3.region }}" + - name: S3_HOST + {{- if eq .Values.global.s3.endpoint "http://minio.db.svc.cluster.local:9000" }} + value: 'https://{{ .Values.global.domainName }}' + {{- else}} + value: '{{ .Values.global.s3.endpoint }}' + {{- end}} + - name: S3_KEY + value: {{ .Values.global.s3.accessKey }} + - name: S3_SECRET + value: {{ .Values.global.s3.secretKey }} + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/hpa.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/hpa.yaml new file mode 100644 index 000000000..8944056ea --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "utilities.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/ingress.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/ingress.yaml new file mode 100644 index 000000000..567cac846 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "utilities.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "utilities.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/service.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/service.yaml new file mode 100644 index 000000000..c9afad7d5 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "utilities.selectorLabels" . | nindent 4 }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml new file mode 100644 index 000000000..dd5c35012 --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "utilities.serviceAccountName" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml b/scripts/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml new file mode 100644 index 000000000..44b72f68d --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "utilities.fullname" . }}-test-connection" + labels: + {{- include "utilities.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "utilities.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helmcharts/openreplay/charts/utilities/values.yaml b/scripts/helmcharts/openreplay/charts/utilities/values.yaml new file mode 100644 index 000000000..6c3e0056f --- /dev/null +++ b/scripts/helmcharts/openreplay/charts/utilities/values.yaml @@ -0,0 +1,85 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/utilities + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "utililities" +fullnameOverride: "utililities" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helmcharts/openreplay/files/clickhouse.sh b/scripts/helmcharts/openreplay/files/clickhouse.sh new file mode 100644 index 000000000..503fd97bf --- /dev/null +++ b/scripts/helmcharts/openreplay/files/clickhouse.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e + +clickhousedir=/opt/openreplay/openreplay/scripts/helm/db/init_dbs/clickhouse + +function migrate() { + echo "Starting clickhouse migration" + migration_versions=$1 + for version in $migration_versions; do + echo "Migrating clickhouse version $version" + # For now, we can ignore the clickhouse db inject errors. + # TODO: Better error handling in script + clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < ${clickhousedir}/${version}/${version}.sql || true + done +} + +function init() { + echo "Initializing clickhouse" + for file in `ls ${clickhousedir}/create/*.sql`; do + echo "Injecting $file" + clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < $file || true + done +} + +# /bin/bash clickhouse.sh migrate $migration_versions +case "$1" in + migrate) + migrate $2 + ;; + init) + init + ;; + *) + echo "Unknown operation for clickhouse migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helmcharts/openreplay/files/dbops.sh b/scripts/helmcharts/openreplay/files/dbops.sh new file mode 100644 index 000000000..542b6ffcf --- /dev/null +++ b/scripts/helmcharts/openreplay/files/dbops.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +cd $(dirname $0) + +function migration() { + ls -la /opt/openreplay/openreplay + db=$1 + + # Checking if previous app version is set. + if [[ $PREVIOUS_APP_VERSION == "" ]]; then + echo "Previous app version to be migrated is not set. Rerun using --set fromVersion=v1.3.5" + exit 100 + fi + + if [[ $PREVIOUS_APP_VERSION == $CHART_APP_VERSION ]]; then + echo "No application version change. Not upgrading." + exit 0 + fi + + # Checking migration versions + cd /opt/openreplay/openreplay/scripts/helm + migration_versions=(`ls -l db/init_dbs/$db | grep -E ^d | awk -v number=${PREVIOUS_APP_VERSION} '$NF > number {print $NF}' | grep -v create`) + echo "Migration version: $migration_versions" + + cd - + + case "$1" in + postgresql) + /bin/bash postgresql.sh migrate $migration_versions + ;; + minio) + /bin/bash minio.sh migrate $migration_versions + ;; + clickhouse) + /bin/bash clickhouse.sh migrate $migration_versions + ;; + kafka) + /bin/bash kafka.sh migrate $migration_versions + ;; + *) + echo "Unknown operation for db migration; exiting." + exit 1 + ;; + esac +} + +function init(){ + case $1 in + postgresql) + /bin/bash postgresql.sh init + ;; + minio) + /bin/bash minio.sh migrate $migration_versions + ;; + clickhouse) + /bin/bash clickhouse.sh init + ;; + kafka) + /bin/bash kafka.sh init + ;; + *) + echo "Unknown operation for db init; exiting." + exit 1 + ;; + + esac +} + + +# dbops.sh true(upgrade) clickhouse +case "$1" in + "false") + init $2 + ;; + "true") + migration $2 + ;; + *) + echo "Unknown operation for db migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helmcharts/openreplay/files/kafka.sh b/scripts/helmcharts/openreplay/files/kafka.sh new file mode 100644 index 000000000..1c811eb5d --- /dev/null +++ b/scripts/helmcharts/openreplay/files/kafka.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -e + +topics=( + "raw" + "raw-ios" + "trigger" + "cache" + "analytics" + ) + +touch /tmp/config.txt + +if [[ $KAFKA_SSL == "true" ]]; then + echo 'security.protocol=SSL' > /tmp/config.txt +fi + +function init() { + echo "Initializing kafka" + for topic in ${topics[*]}; do + echo "Creating topic: $topic" + # TODO: Have to check an idempotent way of creating topics. + kafka-topics.sh --create --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --replication-factor 2 --partitions 16 --topic ${topic} --command-config /tmp/config.txt || true + kafka-configs.sh --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --entity-type topics --alter --add-config retention.ms=3456000000 --entity-name=${topic} --command-config /tmp/config.txt || true + done +} + +# /bin/bash kafka.sh migrate $migration_versions +case "$1" in + migrate) + init + ;; + init) + init + ;; + *) + echo "Unknown operation for kafka migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helmcharts/openreplay/files/minio.sh b/scripts/helmcharts/openreplay/files/minio.sh new file mode 100644 index 000000000..dc8fbdbb0 --- /dev/null +++ b/scripts/helmcharts/openreplay/files/minio.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -e + + +cd /tmp + +buckets=("mobs" "sessions-assets" "static" "sourcemaps" "sessions-mobile-assets") + +mc alias set minio http://minio.db.svc.cluster.local:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY + +function init() { +echo "Initializing minio" + +for bucket in ${buckets[*]}; do +mc mb minio/${bucket} || true +mc ilm import minio/${bucket} <' + + enterpriseEditionLicense: "" + domainName: "" + +# If there is multiple nodes in the kubernetes cluster, +# we'll have to create a NFS share PVC for both the containers to share data. +# If it's the single node, we'll use hostVolume, which is default for community installation. +# Note: Both PVC name should be same. +# sink: +# pvc: +# name: mysharedpersistence +# storage: +# pvc: +# name: mysharedpersistence + +chalice: + env: + jwt_secret: "SetARandomStringHere" + # captcha_server: '' + # captcha_key: '' + # SAML2_MD_URL: '' + # idp_entityId: '' + # idp_sso_url: '' + # idp_x509cert: '' + # idp_sls_url: '' + # idp_name: '' + # idp_tenantKey: '' + +# If you want to override something +# chartname: +# filedFrom chart/Values.yaml: +# key: value +# +# For example (http): +# http: +# resources: +# limits: +# cpu: 1024m +# memory: 4096Mi +# requests: +# cpu: 512m +# memory: 2056Mi + +## Changes to nginx +# +# nginx-ingress: +# # Key and certificate files must be named site.key and site.crt +# # and copied to ../openreplay/files/ +# sslKey: site.key +# sslCert: site.crt +# # Redirecting http to https +# customServerConfigs: | +# return 301 https://$host$request_uri; diff --git a/scripts/helmcharts/vars_template.yaml b/scripts/helmcharts/vars_template.yaml new file mode 100644 index 000000000..2230e0a75 --- /dev/null +++ b/scripts/helmcharts/vars_template.yaml @@ -0,0 +1,111 @@ +fromVersion: "{{ openreplay_version }}" +# Databases specific variables +postgresql: &postgres + # For generating passwords + # `openssl rand -hex 20` + postgresqlPassword: "{{ postgres_db_password }}" + postgresqlHost: "{{ postgres_endpoint }}" + postgresqlPort: "5432" + postgresqlUser: "{{ postgres_db_user }}" + postgresqlDatabase: "{{ postgres_db_name }}" + # resources: + # requests: + # memory: 256Mi + # cpu: 250m + # limits: + # memory: 3000Mi + # cpu: 2 + +clickhouse: {} + # For enterpriseEdition + # enabled: true + +kafka: &kafka + # For enterpriseEdition + # enabled: true + + kafkaHost: "{{ kafka_endpoint.split(':')[0] }}" + kafkaPort: "{{ kafka_endpoint.split(':')[-1] }}" + kafkaUseSsl: "{{ kafka_ssl }}" + +redis: &redis + # For enterpriseEdition + # enabled: false + redisHost: "{{ redis_endpoint.split(':')[0] }}" + redisPort: "{{ redis_endpoint.split(':')[-1] }}" + +minio: + global: + minio: + # For generating passwords + # `openssl rand -hex 20` + accessKey: "{{ minio_access_key }}" + secretKey: "{{ minio_secret_key }}" + +# Application specific variables +global: + postgresql: *postgres + kafka: *kafka + redis: *redis + s3: + region: "us-east-1" + endpoint: "http://minio.db.svc.cluster.local:9000" + assetsBucket: "sessions-assets" + recordingsBucket: "mobs" + sourcemapsBucket: "sourcemaps" + # if you're using one node installation, where + # you're using local s3, make sure these variables + # are same as minio.global.minio.accesskey and secretKey + accessKey: "{{ minio_access_key }}" + secretKey: "{{ minio_secret_key }}" + email: + emailHost: '{{ email_host }}' + emailPort: '{{ email_port }}' + emailUser: '{{ email_user }}' + emailPassword: '{{ email_password }}' + emailUseTls: '{{ email_use_tls }}' + emailUseSsl: '{{ email_use_ssl }}' + emailSslKey: '{{ email_ssl_key }}' + emailSslCert: '{{ email_ssl_cert }}' + emailFrom: '{{ email_from }}' + + enterpriseEditionLicense: "{{ enterprise_edition_license }}" + domainName: "{{ domain_name }}" + +chalice: + env: + jwt_secret: "{{ jwt_secret_key }}" + # captcha_server: '' + # captcha_key: '' + # SAML2_MD_URL: '' + # idp_entityId: '' + # idp_sso_url: '' + # idp_x509cert: '' + # idp_sls_url: '' + # idp_name: '' + # idp_tenantKey: '' + + +# If you want to override something +# chartname: +# filedFrom chart/Values.yaml: +# key: value + +# For example: +# +# http: +# resources: +# limits: +# cpu: 1024m +# memory: 4096Mi +# requests: +# cpu: 512m +# memory: 2056Mi + +## Changes to nginx +# +# nginx-ingress: +# customServerConfigs: | +# # Redirecting http to https +# return 301 https://$host$request_uri; +# diff --git a/scripts/helmcharts/versionUpdater.sh b/scripts/helmcharts/versionUpdater.sh new file mode 100644 index 000000000..5fc8230a2 --- /dev/null +++ b/scripts/helmcharts/versionUpdater.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# This script will update the version of openreplay components. +currentVersion=$1 +[[ -z $currentVersion ]] && { + echo "Usage: $0 " + echo "eg: $0 v1.5.0" +} +find ./openreplay -type f -iname chart.yaml -exec sed -i "s/AppVersion.*/AppVersion: \"$currentVersion\"/g" {} \; +sed -i "s/fromVersion.*/fromVersion: \"$currentVersion\"/g" vars.yaml +sed -i "s/version.*/version=\"$currentVersion\"/g" init.sh diff --git a/third-party.md b/third-party.md index 03e3d8a90..98aefe5b0 100644 --- a/third-party.md +++ b/third-party.md @@ -1,4 +1,4 @@ -## Licenses (as of October 28, 2021) +## Licenses (as of January 21, 2022) Below is the list of dependencies used in OpenReplay software. Licenses may change between versions, so please keep this up to date with every new library you use. @@ -28,8 +28,16 @@ Below is the list of dependencies used in OpenReplay software. Licenses may chan | pyjwt | MIT | Python | | jsbeautifier | MIT | Python | | psycopg2-binary | LGPL | Python | -| pytz | MIT | Python | +| fastapi | MIT | Python | +| uvicorn | BSD | Python | +| python-decouple | MIT | Python | +| pydantic | MIT | Python | +| apscheduler | MIT | Python | +| python-multipart | Apache | Python | +| elasticsearch-py | Apache2 | Python | +| jira | BSD2 | Python | | clickhouse-driver | MIT | Python | +| python3-saml | MIT | Python | | kubernetes | Apache2 | Python | | chalice | Apache2 | Python | | pandas | BSD3 | Python | @@ -76,15 +84,9 @@ Below is the list of dependencies used in OpenReplay software. Licenses may chan | redux-immutable | BSD3 | JavaScript | | redux-thunk | MIT | JavaScript | | semantic-ui-react | MIT | JavaScript | -| socket.io-client | MIT | JavaScript | +| socketio | MIT | JavaScript | | source-map | BSD3 | JavaScript | | aws-sdk | Apache2 | JavaScript | | serverless | MIT | JavaScript | -| schedule | MIT | Python | -| croniter | MIT | Python | | lib/pq | MIT | Go | | peerjs | MIT | JavaScript | -| antonmedv/finder | MIT | JavaScript | -| elasticsearch-py | Apache2 | Python | -| sentry-python | BSD2 | Python | -| jira | BSD2 | Python | diff --git a/tracker/tracker-assist/README.md b/tracker/tracker-assist/README.md index 2ebac72d4..0c7bfe00f 100644 --- a/tracker/tracker-assist/README.md +++ b/tracker/tracker-assist/README.md @@ -72,7 +72,7 @@ onAgentConnect: () => { Warning: it is possible for the same agent to be connected/disconnected several times during one session due to a bad network. Several agents may connect simultaneously. -A callback `onCallStart` will be fired when the end-user accepts the call. It can return another callback that will be called on call end. +A callback `onCallStart` will be fired when the end-user accepts the call. It can return another callback that will be called on the call end. ```ts onCallStart: () => { console.log("Allo!") diff --git a/tracker/tracker-assist/package.json b/tracker/tracker-assist/package.json index 97973d6dd..4d327410b 100644 --- a/tracker/tracker-assist/package.json +++ b/tracker/tracker-assist/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-assist", "description": "Tracker plugin for screen assistance through the WebRTC", - "version": "3.4.11", + "version": "3.4.16", "keywords": [ "WebRTC", "assistance", diff --git a/tracker/tracker-assist/src/BufferingConnection.ts b/tracker/tracker-assist/src/BufferingConnection.ts index e90970c21..5fb3b7349 100644 --- a/tracker/tracker-assist/src/BufferingConnection.ts +++ b/tracker/tracker-assist/src/BufferingConnection.ts @@ -6,12 +6,13 @@ interface Message { } // 16kb should be max according to specification +// 64kb chrome const crOrFf: boolean = typeof navigator !== "undefined" && (navigator.userAgent.indexOf("Chrom") !== -1 || // Chrome && Chromium navigator.userAgent.indexOf("Firefox") !== -1); -const MESSAGES_PER_SEND = crOrFf ? 500 : 100 +const MESSAGES_PER_SEND = crOrFf ? 200 : 50 // Bffering required in case of webRTC export default class BufferingConnection { @@ -34,7 +35,10 @@ export default class BufferingConnection { send(messages: Message[]) { if (!this.conn.open) { return; } let i = 0; + //@ts-ignore + messages=messages.filter(m => m._id !== 39) while (i < messages.length) { + this.buffer.push(messages.slice(i, i+=this.msgsPerSend)) } if (!this.buffering) { diff --git a/tracker/tracker-assist/src/CallWindow.ts b/tracker/tracker-assist/src/CallWindow.ts index 3499cf7ea..1299008a8 100644 --- a/tracker/tracker-assist/src/CallWindow.ts +++ b/tracker/tracker-assist/src/CallWindow.ts @@ -207,14 +207,6 @@ export default class CallWindow { private toggleAudio() { const enabled = this.localStream?.toggleAudio() || false this.toggleAudioUI(enabled) - // if (!this.audioBtn) { return; } - // if (enabled) { - // this.audioBtn.classList.remove("muted"); - // this.audioBtn.childNodes[1].textContent = "Mute"; - // } else { - // this.audioBtn.classList.add("muted"); - // this.audioBtn.childNodes[1].textContent = "Unmute"; - // } } private toggleVideoUI(enabled: boolean) { diff --git a/tracker/tracker-assist/src/Mouse.ts b/tracker/tracker-assist/src/Mouse.ts index 51fb67e8e..b183413bd 100644 --- a/tracker/tracker-assist/src/Mouse.ts +++ b/tracker/tracker-assist/src/Mouse.ts @@ -1,3 +1,5 @@ +type XY = [number, number] + export default class Mouse { private mouse: HTMLDivElement @@ -14,23 +16,91 @@ export default class Mouse { background: "radial-gradient(red, transparent)", }); document.body.appendChild(this.mouse); + + + window.addEventListener("scroll", this.handleWScroll) + window.addEventListener("resize", this.resetLastScrEl) } - move({x, y}: {x: number, y: number}) { - this.position = [x, y]; + move(pos: XY) { + if (this.position[0] !== pos[0] || this.position[1] !== pos[1]) { + this.resetLastScrEl() + } + + this.position = pos; Object.assign(this.mouse.style, { - left: `${x || 0}px`, - top: `${y || 0}px` + left: `${pos[0] || 0}px`, + top: `${pos[1] || 0}px` }) + } - getPosition(): [ number, number] { + getPosition(): XY { return this.position; } + click(pos: XY) { + const el = document.elementFromPoint(pos[0], pos[1]) + if (el instanceof HTMLElement) { + el.click() + el.focus() + } + } + + private readonly pScrEl = document.scrollingElement || document.documentElement // Is it always correct + private lastScrEl: Element | "window" | null = null + private resetLastScrEl = () => { this.lastScrEl = null } + private handleWScroll = e => { + if (e.target !== this.lastScrEl && + this.lastScrEl !== "window") { + this.resetLastScrEl() + } + } + scroll(delta: XY) { + // what would be the browser-like logic? + const [mouseX, mouseY] = this.position + const [dX, dY] = delta + + let el = this.lastScrEl + // Scroll the same one + if (el instanceof Element) { + el.scrollLeft += dX + el.scrollTop += dY + return // TODO: if not scrolled + } + if (el === "window") { + window.scroll(this.pScrEl.scrollLeft + dX, this.pScrEl.scrollTop + dY) + return + } + + el = document.elementFromPoint( + mouseX-this.pScrEl.scrollLeft, + mouseY-this.pScrEl.scrollTop, + ) + while (el) { + //if(el.scrollWidth > el.clientWidth) // - This check doesn't work in common case + const esl = el.scrollLeft + el.scrollLeft += dX + const est = el.scrollTop + el.scrollTop += dY + if (esl !== el.scrollLeft || est !== el.scrollTop) { + this.lastScrEl = el + return + } else { + el = el.parentElement + } + } + + // If not scrolled + window.scroll(this.pScrEl.scrollLeft + dX, this.pScrEl.scrollTop + dY) + this.lastScrEl = "window" + } + remove() { if (this.mouse.parentElement) { document.body.removeChild(this.mouse); } + window.removeEventListener("scroll", this.handleWScroll) + window.removeEventListener("resize", this.resetLastScrEl) } } \ No newline at end of file diff --git a/tracker/tracker-assist/src/index.ts b/tracker/tracker-assist/src/index.ts index 3ffd4671a..d2067ff91 100644 --- a/tracker/tracker-assist/src/index.ts +++ b/tracker/tracker-assist/src/index.ts @@ -11,13 +11,13 @@ import ConfirmWindow from './ConfirmWindow.js'; import RequestLocalStream from './LocalStream.js'; export interface Options { - onAgentConnect: () => (()=>{} | void), - onCallStart: () => (()=>{} | void), + onAgentConnect: () => ((()=>{}) | void), + onCallStart: () => ((()=>{}) | void), confirmText: string, confirmStyle: Object, // Styles object session_calling_peer_key: string, config: RTCConfiguration, - __messages_per_send?: number, + // __messages_per_send?: number, } enum CallingState { @@ -86,7 +86,7 @@ export default function(opts?: Partial) { host: app.getHost(), path: '/assist', port: location.protocol === 'http:' && appOptions.__DISABLE_SECURE_MODE ? 80 : 443, - //debug: // 0 Print nothing //1 Prints only errors. / 2 Prints errors and warnings. / 3 Prints all logs. + debug: appOptions.__debug_log ? 2 : 0, // 0 Print nothing //1 Prints only errors. / 2 Prints errors and warnings. / 3 Prints all logs. } if (options.config) { _opt['config'] = options.config @@ -102,11 +102,11 @@ export default function(opts?: Partial) { log('Connection opened.') assistDemandedRestart = true; app.stop(); - openDataConnections[conn.peer] = new BufferingConnection(conn, options.__messages_per_send) + openDataConnections[conn.peer] = new BufferingConnection(conn) const onAgentDisconnect = options.onAgentConnect(); conn.on('close', () => { - onAgentDisconnect?.(); + onAgentDisconnect && onAgentDisconnect(); log("Connection close: ", conn.peer) delete openDataConnections[conn.peer] // TODO: check if works properly }) @@ -174,7 +174,7 @@ export default function(opts?: Partial) { let callUI = new CallWindow() const handleCallEnd = () => { - onCallEnd?.() + onCallEnd && onCallEnd() mouse.remove(); callUI.remove(); setCallingState(CallingState.False); @@ -212,46 +212,22 @@ export default function(opts?: Partial) { document.addEventListener("click", onInteraction) }); dataConn.on('data', (data: any) => { + log("Income data: ", data) if (!data) { return } if (data === "call_end") { - log('"call_end" received') - handleCallEnd(); - return; + return handleCallEnd(); } if (data.name === 'string') { - log("Name received: ", data) - callUI.setAssistentName(data.name); + return callUI.setAssistentName(data.name); } if (data.type === "scroll" && Array.isArray(data.delta)) { - const scrEl = document.scrollingElement || document.documentElement - const [mouseX, mouseY] = mouse.getPosition() - const [dX, dY] = data.delta; - const el = document.elementFromPoint(mouseX-scrEl.scrollLeft, mouseY-scrEl.scrollTop) - let scrolled = false // what would be the browser-like logic? - if (el) { - if(el.scrollWidth > el.clientWidth) { - el.scrollLeft += data.delta[0] - scrolled = true - } - if (el && el.scrollHeight > el.clientHeight) { - el.scrollTop += data.delta[1] - scrolled = true - } - } - if (!scrolled) { - window.scroll(scrEl.scrollLeft + data.delta[0], scrEl.scrollTop + data.delta[1]) - } + return mouse.scroll(data.delta) } if (data.type === "click" && typeof data.x === 'number' && typeof data.y === 'number') { - const el = document.elementFromPoint(data.x, data.y) - if (el instanceof HTMLElement) { - el.click() - el.focus() - } - return + return mouse.click([ data.x, data.y ]) } if (typeof data.x === 'number' && typeof data.y === 'number') { - mouse.move(data); + return mouse.move([ data.x, data.y ]) } }); diff --git a/tracker/tracker/package-lock.json b/tracker/tracker/package-lock.json index 287203b30..6dcbc4e81 100644 --- a/tracker/tracker/package-lock.json +++ b/tracker/tracker/package-lock.json @@ -1,6 +1,6 @@ { "name": "@openreplay/tracker", - "version": "3.4.7", + "version": "3.4.12", "lockfileVersion": 1, "requires": true, "dependencies": { diff --git a/tracker/tracker/package.json b/tracker/tracker/package.json index a2dbc60f1..16b10b8f4 100644 --- a/tracker/tracker/package.json +++ b/tracker/tracker/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker", "description": "The OpenReplay tracker main package", - "version": "3.4.12", + "version": "3.4.17", "keywords": [ "logging", "replay" diff --git a/tracker/tracker/src/main/app/context.ts b/tracker/tracker/src/main/app/context.ts new file mode 100644 index 000000000..aa9a5dfb3 --- /dev/null +++ b/tracker/tracker/src/main/app/context.ts @@ -0,0 +1,72 @@ +// TODO: global type +export interface Window extends globalThis.Window { + HTMLInputElement: typeof HTMLInputElement, + HTMLLinkElement: typeof HTMLLinkElement, + HTMLStyleElement: typeof HTMLStyleElement, + SVGStyleElement: typeof SVGStyleElement, + HTMLIFrameElement: typeof HTMLIFrameElement, + Text: typeof Text, + Element: typeof Element, + ShadowRoot: typeof ShadowRoot, + //parent: Window, +} + +type WindowConstructor = + Document | + Element | + Text | + ShadowRoot | + HTMLInputElement | + HTMLLinkElement | + HTMLStyleElement | + HTMLIFrameElement + +// type ConstructorNames = +// 'Element' | +// 'Text' | +// 'HTMLInputElement' | +// 'HTMLLinkElement' | +// 'HTMLStyleElement' | +// 'HTMLIFrameElement' +type Constructor = { new (...args: any[]): T , name: string }; + + // TODO: we need a type expert here so we won't have to ignore the lines + // TODO: use it everywhere (static function; export from which file? <-- global Window typing required) +export function isInstance(node: Node, constr: Constructor): node is T { + const doc = node.ownerDocument; + if (!doc) { // null if Document + return constr.name === 'Document'; + } + let context: Window = + // @ts-ignore (for EI, Safary) + doc.parentWindow || + doc.defaultView; // TODO: smart global typing for Window object + while(context.parent && context.parent !== context) { + // @ts-ignore + if (node instanceof context[constr.name]) { + return true + } + // @ts-ignore + context = context.parent + } + // @ts-ignore + return node instanceof context[constr.name] +} + +export function inDocument(node: Node): boolean { + const doc = node.ownerDocument + if (!doc) { return false } + if (doc.contains(node)) { return true } + let context: Window = + // @ts-ignore (for EI, Safary) + doc.parentWindow || + doc.defaultView; + while(context.parent && context.parent !== context) { + if (context.document.contains(node)) { + return true + } + // @ts-ignore + context = context.parent + } + return false; +} diff --git a/tracker/tracker/src/main/app/index.ts b/tracker/tracker/src/main/app/index.ts index 54fe9050f..b02d15f91 100644 --- a/tracker/tracker/src/main/app/index.ts +++ b/tracker/tracker/src/main/app/index.ts @@ -2,12 +2,15 @@ import { timestamp, log, warn } from "../utils.js"; import { Timestamp, PageClose } from "../../messages/index.js"; import Message from "../../messages/message.js"; import Nodes from "./nodes.js"; -import Observer from "./observer.js"; +import Observer from "./observer/top_observer.js"; +import Sanitizer from "./sanitizer.js"; import Ticker from "./ticker.js"; import { deviceMemory, jsHeapSizeLimit } from "../modules/performance.js"; -import type { Options as ObserverOptions } from "./observer.js"; +import type { Options as ObserverOptions } from "./observer/top_observer.js"; +import type { Options as SanitizerOptions } from "./sanitizer.js"; + import type { Options as WebworkerOptions, WorkerMessageData } from "../../messages/webworker.js"; @@ -17,11 +20,17 @@ export interface OnStartInfo { userUUID: string, } -export type Options = { +export interface StartOptions { + userID?: string, + forceNew: boolean, +} + +type AppOptions = { revID: string; node_id: string; session_token_key: string; session_pageno_key: string; + session_reset_key: string; local_uuid_key: string; ingestPoint: string; resourceBaseHref: string | null, // resourceHref? @@ -30,7 +39,9 @@ export type Options = { __debug_report_edp: string | null; __debug_log: boolean; onStart?: (info: OnStartInfo) => void; -} & ObserverOptions & WebworkerOptions; +} & WebworkerOptions; + +export type Options = AppOptions & ObserverOptions & SanitizerOptions type Callback = () => void; type CommitCallback = (messages: Array) => void; @@ -43,21 +54,23 @@ export default class App { readonly nodes: Nodes; readonly ticker: Ticker; readonly projectKey: string; + readonly sanitizer: Sanitizer; private readonly messages: Array = []; - /*private*/ readonly observer: Observer; // temp, for fast security fix. TODO: separate security/obscure module with nodeCallback that incapsulates `textMasked` functionality from Observer + private readonly observer: Observer; private readonly startCallbacks: Array = []; private readonly stopCallbacks: Array = []; private readonly commitCallbacks: Array = []; - private readonly options: Options; + private readonly options: AppOptions; private readonly revID: string; private _sessionID: string | null = null; + private _userID: string | undefined; private isActive = false; private version = 'TRACKER_VERSION'; private readonly worker?: Worker; constructor( projectKey: string, sessionToken: string | null | undefined, - opts: Partial, + options: Partial, ) { this.projectKey = projectKey; this.options = Object.assign( @@ -66,24 +79,23 @@ export default class App { node_id: '__openreplay_id', session_token_key: '__openreplay_token', session_pageno_key: '__openreplay_pageno', + session_reset_key: '__openreplay_reset', local_uuid_key: '__openreplay_uuid', ingestPoint: DEFAULT_INGEST_POINT, resourceBaseHref: null, __is_snippet: false, __debug_report_edp: null, __debug_log: false, - obscureTextEmails: true, - obscureTextNumbers: false, - captureIFrames: false, }, - opts, + options, ); if (sessionToken != null) { sessionStorage.setItem(this.options.session_token_key, sessionToken); } this.revID = this.options.revID; + this.sanitizer = new Sanitizer(this, options); this.nodes = new Nodes(this.options.node_id); - this.observer = new Observer(this, this.options); + this.observer = new Observer(this, options); this.ticker = new Ticker(this); this.ticker.attach(() => this.commit()); try { @@ -102,7 +114,10 @@ export default class App { this.stop(); } else if (data === "restart") { this.stop(); - this.start(true); + this.start({ + forceNew: true, + userID: this._userID, + }); } }; const alertWorker = () => { @@ -244,104 +259,132 @@ export default class App { active(): boolean { return this.isActive; } - private _start(reset: boolean): Promise { - if (!this.isActive) { - if (!this.worker) { - return Promise.reject("No worker found: perhaps, CSP is not set."); - } - this.isActive = true; - let pageNo: number = 0; - const pageNoStr = sessionStorage.getItem(this.options.session_pageno_key); - if (pageNoStr != null) { - pageNo = parseInt(pageNoStr); - pageNo++; - } - sessionStorage.setItem(this.options.session_pageno_key, pageNo.toString()); - const startTimestamp = timestamp(); - - const messageData: WorkerMessageData = { - ingestPoint: this.options.ingestPoint, - pageNo, - startTimestamp, - connAttemptCount: this.options.connAttemptCount, - connAttemptGap: this.options.connAttemptGap, - } - this.worker.postMessage(messageData); // brings delay of 10th ms? - return window.fetch(this.options.ingestPoint + '/v1/web/start', { - method: 'POST', - headers: { - 'Content-Type': 'application/json', - }, - body: JSON.stringify({ - token: sessionStorage.getItem(this.options.session_token_key), - userUUID: localStorage.getItem(this.options.local_uuid_key), - projectKey: this.projectKey, - revID: this.revID, - timestamp: startTimestamp, - trackerVersion: this.version, - isSnippet: this.options.__is_snippet, - deviceMemory, - jsHeapSizeLimit, - reset, - }), - }) - .then(r => { - if (r.status === 200) { - return r.json() - } else { // TODO: handle canceling && 403 - return r.text().then(text => { - throw new Error(`Server error: ${r.status}. ${text}`); - }); - } - }) - .then(r => { - const { token, userUUID, sessionID, beaconSizeLimit } = r; - if (typeof token !== 'string' || - typeof userUUID !== 'string' || - (typeof beaconSizeLimit !== 'number' && typeof beaconSizeLimit !== 'undefined')) { - throw new Error(`Incorrect server response: ${ JSON.stringify(r) }`); - } - sessionStorage.setItem(this.options.session_token_key, token); - localStorage.setItem(this.options.local_uuid_key, userUUID); - if (typeof sessionID === 'string') { - this._sessionID = sessionID; - } - if (!this.worker) { - throw new Error("no worker found after start request (this might not happen)"); - } - this.worker.postMessage({ token, beaconSizeLimit }); - this.startCallbacks.forEach((cb) => cb()); - this.observer.observe(); - this.ticker.start(); - - log("OpenReplay tracking started."); - const onStartInfo = { sessionToken: token, userUUID, sessionID }; - if (typeof this.options.onStart === 'function') { - this.options.onStart(onStartInfo); - } - return onStartInfo; - }) - .catch(e => { - sessionStorage.removeItem(this.options.session_token_key) - this.stop() - warn("OpenReplay was unable to start. ", e) - this._debug("session_start", e); - throw e - }) + resetNextPageSession(flag: boolean) { + if (flag) { + sessionStorage.setItem(this.options.session_reset_key, 't'); + } else { + sessionStorage.removeItem(this.options.session_reset_key); } - return Promise.reject("Player is already active"); + } + private _start(startOpts: StartOptions): Promise { + if (!this.worker) { + return Promise.reject("No worker found: perhaps, CSP is not set."); + } + if (this.isActive) { + return Promise.reject("OpenReplay: trying to call `start()` on the instance that has been started already.") + } + this.isActive = true; + + let pageNo: number = 0; + const pageNoStr = sessionStorage.getItem(this.options.session_pageno_key); + if (pageNoStr != null) { + pageNo = parseInt(pageNoStr); + pageNo++; + } + sessionStorage.setItem(this.options.session_pageno_key, pageNo.toString()); + const startTimestamp = timestamp(); + + const messageData: WorkerMessageData = { + ingestPoint: this.options.ingestPoint, + pageNo, + startTimestamp, + connAttemptCount: this.options.connAttemptCount, + connAttemptGap: this.options.connAttemptGap, + } + this.worker.postMessage(messageData); // brings delay of 10th ms? + + + // let token = sessionStorage.getItem(this.options.session_token_key) + // const tokenIsActive = localStorage.getItem("__or_at_" + token) + // if (tokenIsActive) { + // token = null + // } + + const sReset = sessionStorage.getItem(this.options.session_reset_key); + sessionStorage.removeItem(this.options.session_reset_key); + + this._userID = startOpts.userID || undefined + return window.fetch(this.options.ingestPoint + '/v1/web/start', { + method: 'POST', + headers: { + 'Content-Type': 'application/json', + }, + body: JSON.stringify({ + token: sessionStorage.getItem(this.options.session_token_key), + userUUID: localStorage.getItem(this.options.local_uuid_key), + projectKey: this.projectKey, + revID: this.revID, + timestamp: startTimestamp, + trackerVersion: this.version, + isSnippet: this.options.__is_snippet, + deviceMemory, + jsHeapSizeLimit, + reset: startOpts.forceNew || sReset !== null, + userID: this._userID, + }), + }) + .then(r => { + if (r.status === 200) { + return r.json() + } else { // TODO: handle canceling && 403 + return r.text().then(text => { + throw new Error(`Server error: ${r.status}. ${text}`); + }); + } + }) + .then(r => { + const { token, userUUID, sessionID, beaconSizeLimit } = r; + if (typeof token !== 'string' || + typeof userUUID !== 'string' || + (typeof beaconSizeLimit !== 'number' && typeof beaconSizeLimit !== 'undefined')) { + throw new Error(`Incorrect server response: ${ JSON.stringify(r) }`); + } + sessionStorage.setItem(this.options.session_token_key, token); + localStorage.setItem(this.options.local_uuid_key, userUUID); + // localStorage.setItem("__or_at_" + token, "true") + // this.attachEventListener(window, 'beforeunload', ()=>{ + // localStorage.removeItem("__or_at_" + token) + // }, false); + // this.attachEventListener(window, 'pagehide', ()=>{ + // localStorage.removeItem("__or_at_" + token) + // }, false); + if (typeof sessionID === 'string') { + this._sessionID = sessionID; + } + if (!this.worker) { + throw new Error("no worker found after start request (this might not happen)"); + } + this.worker.postMessage({ token, beaconSizeLimit }); + this.startCallbacks.forEach((cb) => cb()); + this.observer.observe(); + this.ticker.start(); + + log("OpenReplay tracking started."); + const onStartInfo = { sessionToken: token, userUUID, sessionID }; + if (typeof this.options.onStart === 'function') { + this.options.onStart(onStartInfo); + } + return onStartInfo; + }) + .catch(e => { + sessionStorage.removeItem(this.options.session_token_key) + this.stop() + warn("OpenReplay was unable to start. ", e) + this._debug("session_start", e); + throw e + }) } - start(reset: boolean = false): Promise { + start(options: StartOptions = { forceNew: false }): Promise { if (!document.hidden) { - return this._start(reset); + return this._start(options); } else { return new Promise((resolve) => { const onVisibilityChange = () => { if (!document.hidden) { document.removeEventListener("visibilitychange", onVisibilityChange); - resolve(this._start(reset)); + resolve(this._start(options)); } } document.addEventListener("visibilitychange", onVisibilityChange); @@ -354,6 +397,7 @@ export default class App { if (this.worker) { this.worker.postMessage("stop"); } + this.sanitizer.clear(); this.observer.disconnect(); this.nodes.clear(); this.ticker.stop(); diff --git a/tracker/tracker/src/main/app/observer.ts b/tracker/tracker/src/main/app/observer.ts deleted file mode 100644 index 3ed5088af..000000000 --- a/tracker/tracker/src/main/app/observer.ts +++ /dev/null @@ -1,484 +0,0 @@ -import { stars, hasOpenreplayAttribute } from "../utils.js"; -import { - CreateDocument, - CreateElementNode, - CreateTextNode, - SetNodeData, - SetCSSDataURLBased, - SetNodeAttribute, - SetNodeAttributeURLBased, - RemoveNodeAttribute, - MoveNode, - RemoveNode, - CreateIFrameDocument, -} from "../../messages/index.js"; -import App from "./index.js"; - -interface Window extends WindowProxy { - HTMLInputElement: typeof HTMLInputElement, - HTMLLinkElement: typeof HTMLLinkElement, - HTMLStyleElement: typeof HTMLStyleElement, - SVGStyleElement: typeof SVGStyleElement, - HTMLIFrameElement: typeof HTMLIFrameElement, - Text: typeof Text, - Element: typeof Element, - //parent: Window, -} - - -type WindowConstructor = - Document | - Element | - Text | - HTMLInputElement | - HTMLLinkElement | - HTMLStyleElement | - HTMLIFrameElement - -// type ConstructorNames = -// 'Element' | -// 'Text' | -// 'HTMLInputElement' | -// 'HTMLLinkElement' | -// 'HTMLStyleElement' | -// 'HTMLIFrameElement' -type Constructor = { new (...args: any[]): T , name: string }; - - -function isSVGElement(node: Element): node is SVGElement { - return node.namespaceURI === 'http://www.w3.org/2000/svg'; -} - -export interface Options { - obscureTextEmails: boolean; - obscureTextNumbers: boolean; - captureIFrames: boolean; -} - -export default class Observer { - private readonly observer: MutationObserver; - private readonly commited: Array; - private readonly recents: Array; - private readonly indexes: Array; - private readonly attributesList: Array | undefined>; - private readonly textSet: Set; - private readonly textMasked: Set; - constructor(private readonly app: App, private readonly options: Options, private readonly context: Window = window) { - this.observer = new MutationObserver( - this.app.safe((mutations) => { - for (const mutation of mutations) { - const target = mutation.target; - const type = mutation.type; - - // Special case - // Document 'childList' might happen in case of iframe. - // TODO: generalize as much as possible - if (this.isInstance(target, Document) - && type === 'childList' - //&& new Array(mutation.addedNodes).some(node => this.isInstance(node, HTMLHtmlElement)) - ) { - const parentFrame = target.defaultView?.frameElement - if (!parentFrame) { continue } - this.bindTree(target.documentElement) - const frameID = this.app.nodes.getID(parentFrame) - const docID = this.app.nodes.getID(target.documentElement) - if (frameID === undefined || docID === undefined) { continue } - this.app.send(CreateIFrameDocument(frameID, docID)); - continue; - } - - if (this.isIgnored(target) || !context.document.contains(target)) { - continue; - } - if (type === 'childList') { - for (let i = 0; i < mutation.removedNodes.length; i++) { - this.bindTree(mutation.removedNodes[i]); - } - for (let i = 0; i < mutation.addedNodes.length; i++) { - this.bindTree(mutation.addedNodes[i]); - } - continue; - } - const id = this.app.nodes.getID(target); - if (id === undefined) { - continue; - } - if (id >= this.recents.length) { - this.recents[id] = undefined; - } - if (type === 'attributes') { - const name = mutation.attributeName; - if (name === null) { - continue; - } - let attr = this.attributesList[id]; - if (attr === undefined) { - this.attributesList[id] = attr = new Set(); - } - attr.add(name); - continue; - } - if (type === 'characterData') { - this.textSet.add(id); - continue; - } - } - this.commitNodes(); - }), - ); - this.commited = []; - this.recents = []; - this.indexes = [0]; - this.attributesList = []; - this.textSet = new Set(); - this.textMasked = new Set(); - } - private clear(): void { - this.commited.length = 0; - this.recents.length = 0; - this.indexes.length = 1; - this.attributesList.length = 0; - this.textSet.clear(); - this.textMasked.clear(); - } - - // TODO: we need a type expert here so we won't have to ignore the lines - private isInstance(node: Node, constr: Constructor): node is T { - let context = this.context; - while(context.parent && context.parent !== context) { - // @ts-ignore - if (node instanceof context[constr.name]) { - return true - } - // @ts-ignore - context = context.parent - } - // @ts-ignore - return node instanceof context[constr.name] - } - - private isIgnored(node: Node): boolean { - if (this.isInstance(node, Text)) { - return false; - } - if (!this.isInstance(node, Element)) { - return true; - } - const tag = node.tagName.toUpperCase(); - if (tag === 'LINK') { - const rel = node.getAttribute('rel'); - const as = node.getAttribute('as'); - return !(rel?.includes('stylesheet') || as === "style" || as === "font"); - } - return ( - tag === 'SCRIPT' || - tag === 'NOSCRIPT' || - tag === 'META' || - tag === 'TITLE' || - tag === 'BASE' - ); - } - - private sendNodeAttribute( - id: number, - node: Element, - name: string, - value: string | null, - ): void { - if (isSVGElement(node)) { - if (name.substr(0, 6) === 'xlink:') { - name = name.substr(6); - } - if (value === null) { - this.app.send(new RemoveNodeAttribute(id, name)); - } else if (name === 'href') { - if (value.length > 1e5) { - value = ''; - } - this.app.send(new SetNodeAttributeURLBased(id, name, value, this.app.getBaseHref())); - } else { - this.app.send(new SetNodeAttribute(id, name, value)); - } - return; - } - if ( - name === 'src' || - name === 'srcset' || - name === 'integrity' || - name === 'crossorigin' || - name === 'autocomplete' || - name.substr(0, 2) === 'on' - ) { - return; - } - if ( - name === 'value' && - this.isInstance(node, HTMLInputElement) && - node.type !== 'button' && - node.type !== 'reset' && - node.type !== 'submit' - ) { - return; - } - if (value === null) { - this.app.send(new RemoveNodeAttribute(id, name)); - return; - } - if (name === 'style' || name === 'href' && this.isInstance(node, HTMLLinkElement)) { - this.app.send(new SetNodeAttributeURLBased(id, name, value, this.app.getBaseHref())); - return; - } - if (name === 'href' || value.length > 1e5) { - value = ''; - } - this.app.send(new SetNodeAttribute(id, name, value)); - } - - /* TODO: abstract sanitation */ - getInnerTextSecure(el: HTMLElement): string { - const id = this.app.nodes.getID(el) - if (!id) { return '' } - return this.checkObscure(id, el.innerText) - - } - - private checkObscure(id: number, data: string): string { - if (this.textMasked.has(id)) { - return data.replace( - /[^\f\n\r\t\v\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]/g, - '█', - ); - } - if (this.options.obscureTextNumbers) { - data = data.replace(/\d/g, '0'); - } - if (this.options.obscureTextEmails) { - data = data.replace( - /([^\s]+)@([^\s]+)\.([^\s]+)/g, - (...f: Array) => - stars(f[1]) + '@' + stars(f[2]) + '.' + stars(f[3]), - ); - } - return data - } - - private sendNodeData(id: number, parentElement: Element, data: string): void { - if (this.isInstance(parentElement, HTMLStyleElement) || this.isInstance(parentElement, SVGStyleElement)) { - this.app.send(new SetCSSDataURLBased(id, data, this.app.getBaseHref())); - return; - } - data = this.checkObscure(id, data) - this.app.send(new SetNodeData(id, data)); - } - /* end TODO: abstract sanitation */ - - private bindNode(node: Node): void { - const r = this.app.nodes.registerNode(node); - const id = r[0]; - this.recents[id] = r[1] || this.recents[id] || false; - } - - private bindTree(node: Node): void { - if (this.isIgnored(node)) { - return; - } - this.bindNode(node); - const walker = document.createTreeWalker( - node, - NodeFilter.SHOW_ELEMENT + NodeFilter.SHOW_TEXT, - { - acceptNode: (node) => - this.isIgnored(node) || this.app.nodes.getID(node) !== undefined - ? NodeFilter.FILTER_REJECT - : NodeFilter.FILTER_ACCEPT, - }, - // @ts-ignore - false, - ); - while (walker.nextNode()) { - this.bindNode(walker.currentNode); - } - } - - private unbindNode(node: Node): void { - const id = this.app.nodes.unregisterNode(node); - if (id !== undefined && this.recents[id] === false) { - this.app.send(new RemoveNode(id)); - } - } - - private _commitNode(id: number, node: Node): boolean { - const parent = node.parentNode; - let parentID: number | undefined; - if (this.isInstance(node, HTMLHtmlElement)) { - this.indexes[id] = 0 - } else { - if (parent === null) { - this.unbindNode(node); - return false; - } - parentID = this.app.nodes.getID(parent); - if (parentID === undefined) { - this.unbindNode(node); - return false; - } - if (!this.commitNode(parentID)) { - this.unbindNode(node); - return false; - } - if ( - this.textMasked.has(parentID) || - (this.isInstance(node, Element) && hasOpenreplayAttribute(node, 'masked')) - ) { - this.textMasked.add(id); - } - let sibling = node.previousSibling; - while (sibling !== null) { - const siblingID = this.app.nodes.getID(sibling); - if (siblingID !== undefined) { - this.commitNode(siblingID); - this.indexes[id] = this.indexes[siblingID] + 1; - break; - } - sibling = sibling.previousSibling; - } - if (sibling === null) { - this.indexes[id] = 0; - } - } - const isNew = this.recents[id]; - const index = this.indexes[id]; - if (index === undefined) { - throw 'commitNode: missing node index'; - } - if (isNew === true) { - if (this.isInstance(node, Element)) { - if (parentID !== undefined) { - this.app.send(new - CreateElementNode( - id, - parentID, - index, - node.tagName, - isSVGElement(node), - ), - ); - } - for (let i = 0; i < node.attributes.length; i++) { - const attr = node.attributes[i]; - this.sendNodeAttribute(id, node, attr.nodeName, attr.value); - } - - if (this.isInstance(node, HTMLIFrameElement) && - (this.options.captureIFrames || node.getAttribute("data-openreplay-capture"))) { - this.handleIframe(node); - } - } else if (this.isInstance(node, Text)) { - // for text node id != 0, hence parentID !== undefined and parent is Element - this.app.send(new CreateTextNode(id, parentID as number, index)); - this.sendNodeData(id, parent as Element, node.data); - } - return true; - } - if (isNew === false && parentID !== undefined) { - this.app.send(new MoveNode(id, parentID, index)); - } - const attr = this.attributesList[id]; - if (attr !== undefined) { - if (!this.isInstance(node, Element)) { - throw 'commitNode: node is not an element'; - } - for (const name of attr) { - this.sendNodeAttribute(id, node, name, node.getAttribute(name)); - } - } - if (this.textSet.has(id)) { - if (!this.isInstance(node, Text)) { - throw 'commitNode: node is not a text'; - } - // for text node id != 0, hence parent is Element - this.sendNodeData(id, parent as Element, node.data); - } - return true; - } - private commitNode(id: number): boolean { - const node = this.app.nodes.getNode(id); - if (node === undefined) { - return false; - } - const cmt = this.commited[id]; - if (cmt !== undefined) { - return cmt; - } - return (this.commited[id] = this._commitNode(id, node)); - } - private commitNodes(): void { - let node; - for (let id = 0; id < this.recents.length; id++) { - this.commitNode(id); - if (this.recents[id] === true && (node = this.app.nodes.getNode(id))) { - this.app.nodes.callNodeCallbacks(node); - } - } - this.clear(); - } - - private iframeObservers: Observer[] = []; - private handleIframe(iframe: HTMLIFrameElement): void { - let context: Window | null = null - const handle = this.app.safe(() => { - const id = this.app.nodes.getID(iframe) - if (id === undefined) { return } - if (iframe.contentWindow === context) { return } - context = iframe.contentWindow as Window | null; - if (!context) { return } - const observer = new Observer(this.app, this.options, context) - this.iframeObservers.push(observer) - observer.observeIframe(id, context) - }) - this.app.attachEventListener(iframe, "load", handle) - handle() - } - - // TODO: abstract common functionality, separate FrameObserver - private observeIframe(id: number, context: Window) { - const doc = context.document; - this.observer.observe(doc, { - childList: true, - attributes: true, - characterData: true, - subtree: true, - attributeOldValue: false, - characterDataOldValue: false, - }); - this.bindTree(doc.documentElement); - const docID = this.app.nodes.getID(doc.documentElement); - if (docID === undefined) { - console.log("Wrong") - return; - } - this.app.send(CreateIFrameDocument(id,docID)); - this.commitNodes(); - } - - observe(): void { - this.observer.observe(this.context.document, { - childList: true, - attributes: true, - characterData: true, - subtree: true, - attributeOldValue: false, - characterDataOldValue: false, - }); - this.app.send(new CreateDocument()); - this.bindTree(this.context.document.documentElement); - this.commitNodes(); - } - - disconnect(): void { - this.iframeObservers.forEach(o => o.disconnect()); - this.iframeObservers = []; - this.observer.disconnect(); - this.clear(); - } -} diff --git a/tracker/tracker/src/main/app/observer/iframe_observer.ts b/tracker/tracker/src/main/app/observer/iframe_observer.ts new file mode 100644 index 000000000..be0a7182c --- /dev/null +++ b/tracker/tracker/src/main/app/observer/iframe_observer.ts @@ -0,0 +1,19 @@ +import Observer from "./observer.js"; +import { CreateIFrameDocument } from "../../../messages/index.js"; + +export default class IFrameObserver extends Observer { + observe(iframe: HTMLIFrameElement) { + const doc = iframe.contentDocument; + const hostID = this.app.nodes.getID(iframe); + if (!doc || hostID === undefined) { return } //log TODO common app.logger + // Have to observe document, because the inner might be changed + this.observeRoot(doc, (docID) => { + if (docID === undefined) { + console.log("OpenReplay: Iframe document not bound") + return; + } + this.app.send(CreateIFrameDocument(hostID, docID)); + }); + } + +} \ No newline at end of file diff --git a/tracker/tracker/src/main/app/observer/observer.ts b/tracker/tracker/src/main/app/observer/observer.ts new file mode 100644 index 000000000..0f4ff2994 --- /dev/null +++ b/tracker/tracker/src/main/app/observer/observer.ts @@ -0,0 +1,353 @@ +import { hasOpenreplayAttribute } from "../../utils.js"; +import { + RemoveNodeAttribute, + SetNodeAttribute, + SetNodeAttributeURLBased, + SetCSSDataURLBased, + SetNodeData, + CreateTextNode, + CreateElementNode, + MoveNode, + RemoveNode, +} from "../../../messages/index.js"; +import App from "../index.js"; +import { isInstance, inDocument } from "../context.js"; + + +function isSVGElement(node: Element): node is SVGElement { + return node.namespaceURI === 'http://www.w3.org/2000/svg'; +} + +function isIgnored(node: Node): boolean { + if (isInstance(node, Text)) { + return false; + } + if (!isInstance(node, Element)) { + return true; + } + const tag = node.tagName.toUpperCase(); + if (tag === 'LINK') { + const rel = node.getAttribute('rel'); + const as = node.getAttribute('as'); + return !(rel?.includes('stylesheet') || as === "style" || as === "font"); + } + return ( + tag === 'SCRIPT' || + tag === 'NOSCRIPT' || + tag === 'META' || + tag === 'TITLE' || + tag === 'BASE' + ); +} + +function isRootNode(node: Node): boolean { + return isInstance(node, Document) || isInstance(node, ShadowRoot); +} + +function isObservable(node: Node): boolean { + if (isRootNode(node)) { + return true; + } + return !isIgnored(node); +} + +export default abstract class Observer { + private readonly observer: MutationObserver; + private readonly commited: Array = []; + private readonly recents: Array = []; + private readonly myNodes: Array = []; + private readonly indexes: Array = []; + private readonly attributesList: Array | undefined> = []; + private readonly textSet: Set = new Set(); + private readonly inUpperContext: boolean; + constructor(protected readonly app: App, protected readonly context: Window = window) { + this.inUpperContext = context.parent === context //TODO: get rid of context here + this.observer = new MutationObserver( + this.app.safe((mutations) => { + for (const mutation of mutations) { + const target = mutation.target; + const type = mutation.type; + + if (!isObservable(target) || !inDocument(target)) { + continue; + } + if (type === 'childList') { + for (let i = 0; i < mutation.removedNodes.length; i++) { + this.bindTree(mutation.removedNodes[i]); + } + for (let i = 0; i < mutation.addedNodes.length; i++) { + this.bindTree(mutation.addedNodes[i]); + } + continue; + } + const id = this.app.nodes.getID(target); + if (id === undefined) { + continue; + } + if (id >= this.recents.length) { // TODO: something more convinient + this.recents[id] = undefined; + } + if (type === 'attributes') { + const name = mutation.attributeName; + if (name === null) { + continue; + } + let attr = this.attributesList[id]; + if (attr === undefined) { + this.attributesList[id] = attr = new Set(); + } + attr.add(name); + continue; + } + if (type === 'characterData') { + this.textSet.add(id); + continue; + } + } + this.commitNodes(); + }), + ); + } + private clear(): void { + this.commited.length = 0; + this.recents.length = 0; + this.indexes.length = 1; + this.attributesList.length = 0; + this.textSet.clear(); + } + + private sendNodeAttribute( + id: number, + node: Element, + name: string, + value: string | null, + ): void { + if (isSVGElement(node)) { + if (name.substr(0, 6) === 'xlink:') { + name = name.substr(6); + } + if (value === null) { + this.app.send(new RemoveNodeAttribute(id, name)); + } else if (name === 'href') { + if (value.length > 1e5) { + value = ''; + } + this.app.send(new SetNodeAttributeURLBased(id, name, value, this.app.getBaseHref())); + } else { + this.app.send(new SetNodeAttribute(id, name, value)); + } + return; + } + if ( + name === 'src' || + name === 'srcset' || + name === 'integrity' || + name === 'crossorigin' || + name === 'autocomplete' || + name.substr(0, 2) === 'on' + ) { + return; + } + if ( + name === 'value' && + isInstance(node, HTMLInputElement) && + node.type !== 'button' && + node.type !== 'reset' && + node.type !== 'submit' + ) { + return; + } + if (value === null) { + this.app.send(new RemoveNodeAttribute(id, name)); + return; + } + if (name === 'style' || name === 'href' && isInstance(node, HTMLLinkElement)) { + this.app.send(new SetNodeAttributeURLBased(id, name, value, this.app.getBaseHref())); + return; + } + if (name === 'href' || value.length > 1e5) { + value = ''; + } + this.app.send(new SetNodeAttribute(id, name, value)); + } + + private sendNodeData(id: number, parentElement: Element, data: string): void { + if (isInstance(parentElement, HTMLStyleElement) || isInstance(parentElement, SVGStyleElement)) { + this.app.send(new SetCSSDataURLBased(id, data, this.app.getBaseHref())); + return; + } + data = this.app.sanitizer.sanitize(id, data) + this.app.send(new SetNodeData(id, data)); + } + + private bindNode(node: Node): void { + const r = this.app.nodes.registerNode(node); + const id = r[0]; + this.recents[id] = r[1] || this.recents[id] || false; + + this.myNodes[id] = true; + } + + private bindTree(node: Node): void { + if (!isObservable(node)) { + return + } + this.bindNode(node); + const walker = document.createTreeWalker( + node, + NodeFilter.SHOW_ELEMENT + NodeFilter.SHOW_TEXT, + { + acceptNode: (node) => + isIgnored(node) || this.app.nodes.getID(node) !== undefined + ? NodeFilter.FILTER_REJECT + : NodeFilter.FILTER_ACCEPT, + }, + // @ts-ignore + false, + ); + while (walker.nextNode()) { + this.bindNode(walker.currentNode); + } + } + + private unbindNode(node: Node): void { + const id = this.app.nodes.unregisterNode(node); + if (id !== undefined && this.recents[id] === false) { + this.app.send(new RemoveNode(id)); + } + } + + private _commitNode(id: number, node: Node): boolean { + if (isRootNode(node)) { + return true; + } + const parent = node.parentNode; + let parentID: number | undefined; + // Disable parent check for the upper context HTMLHtmlElement, because it is root there... (before) + // TODO: get rid of "special" cases (there is an issue with CreateDocument altered behaviour though) + // TODO: Clean the logic (though now it workd fine) + if (!isInstance(node, HTMLHtmlElement) || !this.inUpperContext) { + if (parent === null) { + this.unbindNode(node); + return false; + } + parentID = this.app.nodes.getID(parent); + if (parentID === undefined) { + this.unbindNode(node); + return false; + } + if (!this.commitNode(parentID)) { + this.unbindNode(node); + return false; + } + this.app.sanitizer.handleNode(id, parentID, node); + } + let sibling = node.previousSibling; + while (sibling !== null) { + const siblingID = this.app.nodes.getID(sibling); + if (siblingID !== undefined) { + this.commitNode(siblingID); + this.indexes[id] = this.indexes[siblingID] + 1; + break; + } + sibling = sibling.previousSibling; + } + if (sibling === null) { + this.indexes[id] = 0; // + } + const isNew = this.recents[id]; + const index = this.indexes[id]; + if (index === undefined) { + throw 'commitNode: missing node index'; + } + if (isNew === true) { + if (isInstance(node, Element)) { + if (parentID !== undefined) { + this.app.send(new + CreateElementNode( + id, + parentID, + index, + node.tagName, + isSVGElement(node), + ), + ); + } + for (let i = 0; i < node.attributes.length; i++) { + const attr = node.attributes[i]; + this.sendNodeAttribute(id, node, attr.nodeName, attr.value); + } + } else if (isInstance(node, Text)) { + // for text node id != 0, hence parentID !== undefined and parent is Element + this.app.send(new CreateTextNode(id, parentID as number, index)); + this.sendNodeData(id, parent as Element, node.data); + } + return true; + } + if (isNew === false && parentID !== undefined) { + this.app.send(new MoveNode(id, parentID, index)); + } + const attr = this.attributesList[id]; + if (attr !== undefined) { + if (!isInstance(node, Element)) { + throw 'commitNode: node is not an element'; + } + for (const name of attr) { + this.sendNodeAttribute(id, node, name, node.getAttribute(name)); + } + } + if (this.textSet.has(id)) { + if (!isInstance(node, Text)) { + throw 'commitNode: node is not a text'; + } + // for text node id != 0, hence parent is Element + this.sendNodeData(id, parent as Element, node.data); + } + return true; + } + private commitNode(id: number): boolean { + const node = this.app.nodes.getNode(id); + if (node === undefined) { + return false; + } + const cmt = this.commited[id]; + if (cmt !== undefined) { + return cmt; + } + return (this.commited[id] = this._commitNode(id, node)); + } + private commitNodes(): void { + let node; + for (let id = 0; id < this.recents.length; id++) { + // TODO: make things/logic nice here. + // commit required in any case if recents[id] true or false (in case of unbinding) or undefined (in case of attr change). + if (!this.myNodes[id]) { continue } + this.commitNode(id); + if (this.recents[id] === true && (node = this.app.nodes.getNode(id))) { + this.app.nodes.callNodeCallbacks(node); + } + } + this.clear(); + } + + // ISSSUE + protected observeRoot(node: Node, beforeCommit: (id?: number) => unknown, nodeToBind: Node = node) { + this.observer.observe(node, { + childList: true, + attributes: true, + characterData: true, + subtree: true, + attributeOldValue: false, + characterDataOldValue: false, + }); + this.bindTree(nodeToBind); + beforeCommit(this.app.nodes.getID(node)) + this.commitNodes(); + } + + disconnect(): void { + this.observer.disconnect(); + this.clear(); + this.myNodes.length = 0; + } +} diff --git a/tracker/tracker/src/main/app/observer/shadow_root_observer.ts b/tracker/tracker/src/main/app/observer/shadow_root_observer.ts new file mode 100644 index 000000000..244348ea1 --- /dev/null +++ b/tracker/tracker/src/main/app/observer/shadow_root_observer.ts @@ -0,0 +1,18 @@ +import Observer from "./observer.js"; +import { CreateIFrameDocument } from "../../../messages/index.js"; + +export default class ShadowRootObserver extends Observer { + observe(el: Element) { + const shRoot = el.shadowRoot; + const hostID = this.app.nodes.getID(el); + if (!shRoot || hostID === undefined) { return } // log + this.observeRoot(shRoot, (rootID) => { + if (rootID === undefined) { + console.log("OpenReplay: Shadow Root was not bound") + return; + } + this.app.send(CreateIFrameDocument(hostID,rootID)); + }); + } + +} \ No newline at end of file diff --git a/tracker/tracker/src/main/app/observer/top_observer.ts b/tracker/tracker/src/main/app/observer/top_observer.ts new file mode 100644 index 000000000..b35f5d901 --- /dev/null +++ b/tracker/tracker/src/main/app/observer/top_observer.ts @@ -0,0 +1,98 @@ +import Observer from "./observer.js"; +import { isInstance } from "../context.js"; +import type { Window } from "../context.js"; +import IFrameObserver from "./iframe_observer.js"; +import ShadowRootObserver from "./shadow_root_observer.js"; + +import { CreateDocument } from "../../../messages/index.js"; +import App from "../index.js"; +import { IN_BROWSER } from '../../utils.js' + +export interface Options { + captureIFrames: boolean +} + +const attachShadowNativeFn = IN_BROWSER ? Element.prototype.attachShadow : ()=>new ShadowRoot(); + +export default class TopObserver extends Observer { + private readonly options: Options; + constructor(app: App, options: Partial) { + super(app); + this.options = Object.assign({ + captureIFrames: false + }, options); + + // IFrames + this.app.nodes.attachNodeCallback(node => { + if (isInstance(node, HTMLIFrameElement) && + (this.options.captureIFrames || node.getAttribute("data-openreplay-capture")) + ) { + this.handleIframe(node) + } + }) + + // ShadowDOM + this.app.nodes.attachNodeCallback(node => { + if (isInstance(node, Element) && node.shadowRoot !== null) { + this.handleShadowRoot(node.shadowRoot) + } + }) + } + + + private iframeObservers: IFrameObserver[] = []; + private handleIframe(iframe: HTMLIFrameElement): void { + let context: Window | null = null + const handle = this.app.safe(() => { + const id = this.app.nodes.getID(iframe) + if (id === undefined) { return } //log + if (iframe.contentWindow === context) { return } //Does this happen frequently? + context = iframe.contentWindow as Window | null; + if (!context) { return } + const observer = new IFrameObserver(this.app, context) + + this.iframeObservers.push(observer) + observer.observe(iframe) + }) + this.app.attachEventListener(iframe, "load", handle) + handle() + } + + private shadowRootObservers: ShadowRootObserver[] = [] + private handleShadowRoot(shRoot: ShadowRoot) { + const observer = new ShadowRootObserver(this.app, this.context) + + this.shadowRootObservers.push(observer) + observer.observe(shRoot.host) + } + + observe(): void { + // Protection from several subsequent calls? + const observer = this; + Element.prototype.attachShadow = function() { + const shadow = attachShadowNativeFn.apply(this, arguments) + observer.handleShadowRoot(shadow) + return shadow + } + + // Can observe documentElement () here, because it is not supposed to be changing. + // However, it is possible in some exotic cases and may cause an ignorance of the newly created + // In this case context.document have to be observed, but this will cause + // the change in the re-player behaviour caused by CreateDocument message: + // the 0-node ("fRoot") will become #document rather than documentElement as it is now. + // Alternatively - observe(#document) then bindNode(documentElement) + this.observeRoot(this.context.document, () => { + this.app.send(new CreateDocument()) + }, this.context.document.documentElement); + } + + disconnect() { + Element.prototype.attachShadow = attachShadowNativeFn + this.iframeObservers.forEach(o => o.disconnect()) + this.iframeObservers = [] + this.shadowRootObservers.forEach(o => o.disconnect()) + this.shadowRootObservers = [] + super.disconnect() + } + +} \ No newline at end of file diff --git a/tracker/tracker/src/main/app/sanitizer.ts b/tracker/tracker/src/main/app/sanitizer.ts new file mode 100644 index 000000000..d085b5739 --- /dev/null +++ b/tracker/tracker/src/main/app/sanitizer.ts @@ -0,0 +1,66 @@ +import { stars, hasOpenreplayAttribute } from "../utils.js"; +import App from "./index.js"; +import { isInstance } from "./context.js"; + +export interface Options { + obscureTextEmails: boolean; + obscureTextNumbers: boolean; +} + +export default class Sanitizer { + private readonly masked: Set = new Set(); + private readonly options: Options; + + constructor(private readonly app: App, options: Partial) { + this.options = Object.assign({ + obscureTextEmails: true, + obscureTextNumbers: false, + }, options); + } + + handleNode(id: number, parentID: number, node: Node) { + if ( + this.masked.has(parentID) || + (isInstance(node, Element) && hasOpenreplayAttribute(node, 'masked')) + ) { + this.masked.add(id); + } + } + + sanitize(id: number, data: string): string { + if (this.masked.has(id)) { + // TODO: is it the best place to put trim() ? Might trimmed spaces be considered in layout in certain cases? + return data.trim().replace( + /[^\f\n\r\t\v\u00a0\u1680\u2000-\u200a\u2028\u2029\u202f\u205f\u3000\ufeff]/g, + '█', + ); + } + if (this.options.obscureTextNumbers) { + data = data.replace(/\d/g, '0'); + } + if (this.options.obscureTextEmails) { + data = data.replace( + /([^\s]+)@([^\s]+)\.([^\s]+)/g, + (...f: Array) => + stars(f[1]) + '@' + stars(f[2]) + '.' + stars(f[3]), + ); + } + return data + } + + isMasked(id: number): boolean { + return this.masked.has(id); + } + + getInnerTextSecure(el: HTMLElement): string { + const id = this.app.nodes.getID(el) + if (!id) { return '' } + return this.sanitize(id, el.innerText) + + } + + clear(): void { + this.masked.clear(); + } + +} \ No newline at end of file diff --git a/tracker/tracker/src/main/index.ts b/tracker/tracker/src/main/index.ts index 6af325e57..75d195e50 100644 --- a/tracker/tracker/src/main/index.ts +++ b/tracker/tracker/src/main/index.ts @@ -19,14 +19,15 @@ import Longtasks from "./modules/longtasks.js"; import CSSRules from "./modules/cssrules.js"; import { IN_BROWSER, deprecationWarn, DOCS_HOST } from "./utils.js"; -import { Options as AppOptions } from "./app/index.js"; -import { Options as ConsoleOptions } from "./modules/console.js"; -import { Options as ExceptionOptions } from "./modules/exception.js"; -import { Options as InputOptions } from "./modules/input.js"; -import { Options as PerformanceOptions } from "./modules/performance.js"; -import { Options as TimingOptions } from "./modules/timing.js"; - -export type { OnStartInfo } from './app/index.js'; +import type { Options as AppOptions } from "./app/index.js"; +import type { Options as ConsoleOptions } from "./modules/console.js"; +import type { Options as ExceptionOptions } from "./modules/exception.js"; +import type { Options as InputOptions } from "./modules/input.js"; +import type { Options as PerformanceOptions } from "./modules/performance.js"; +import type { Options as TimingOptions } from "./modules/timing.js"; +import type { StartOptions } from './app/index.js' +//TODO: unique options init +import type { OnStartInfo } from './app/index.js'; export type Options = Partial< AppOptions & ConsoleOptions & ExceptionOptions & InputOptions & PerformanceOptions & TimingOptions @@ -35,6 +36,7 @@ export type Options = Partial< projectKey: string; sessionToken?: string; respectDoNotTrack?: boolean; + autoResetOnWindowOpen?: boolean; // dev only __DISABLE_SECURE_MODE?: boolean; }; @@ -84,7 +86,7 @@ export default class API { (navigator.doNotTrack == '1' // @ts-ignore || window.doNotTrack == '1'); - this.app = doNotTrack || + const app = this.app = doNotTrack || !('Map' in window) || !('Set' in window) || !('MutationObserver' in window) || @@ -95,20 +97,35 @@ export default class API { !('Worker' in window) ? null : new App(options.projectKey, options.sessionToken, options); - if (this.app !== null) { - Viewport(this.app); - CSSRules(this.app); - Connection(this.app); - Console(this.app, options); - Exception(this.app, options); - Img(this.app); - Input(this.app, options); - Mouse(this.app); - Timing(this.app, options); - Performance(this.app, options); - Scroll(this.app); - Longtasks(this.app); + if (app !== null) { + Viewport(app); + CSSRules(app); + Connection(app); + Console(app, options); + Exception(app, options); + Img(app); + Input(app, options); + Mouse(app); + Timing(app, options); + Performance(app, options); + Scroll(app); + Longtasks(app); (window as any).__OPENREPLAY__ = this; + + if (options.autoResetOnWindowOpen) { + const wOpen = window.open; + app.attachStartCallback(() => { + // @ts-ignore ? + window.open = function(...args) { + app.resetNextPageSession(true) + wOpen.call(window, ...args) + app.resetNextPageSession(false) + } + }) + app.attachStopCallback(() => { + window.open = wOpen; + }) + } } else { console.log("OpenReplay: browser doesn't support API required for tracking or doNotTrack is set to 1.") const req = new XMLHttpRequest(); @@ -140,7 +157,7 @@ export default class API { return this.isActive(); } - start() /*: Promise*/ { + start(startOpts?: StartOptions) : Promise { if (!IN_BROWSER) { console.error(`OpenReplay: you are trying to start Tracker on a node.js environment. If you want to use OpenReplay with SSR, please, use componentDidMount or useEffect API for placing the \`tracker.start()\` line. Check documentation on ${DOCS_HOST}${DOCS_SETUP}`) return Promise.reject("Trying to start not in browser."); @@ -148,7 +165,7 @@ export default class API { if (this.app === null) { return Promise.reject("Browser doesn't support required api, or doNotTrack is active."); } - return this.app.start(); + return this.app.start(startOpts); } stop(): void { if (this.app === null) { diff --git a/tracker/tracker/src/main/modules/exception.ts b/tracker/tracker/src/main/modules/exception.ts index 45fe37465..848df03be 100644 --- a/tracker/tracker/src/main/modules/exception.ts +++ b/tracker/tracker/src/main/modules/exception.ts @@ -50,7 +50,13 @@ export function getExceptionMessageFromEvent(e: ErrorEvent | PromiseRejectionEve if (e.reason instanceof Error) { return getExceptionMessage(e.reason, []) } else { - return new JSException('Unhandled Promise Rejection', String(e.reason), '[]'); + let message: string; + try { + message = JSON.stringify(e.reason) + } catch(_) { + message = String(e.reason) + } + return new JSException('Unhandled Promise Rejection', message, '[]'); } } return null; diff --git a/tracker/tracker/src/main/modules/img.ts b/tracker/tracker/src/main/modules/img.ts index 61e793b89..8c0f911a8 100644 --- a/tracker/tracker/src/main/modules/img.ts +++ b/tracker/tracker/src/main/modules/img.ts @@ -1,8 +1,21 @@ import { timestamp, isURL } from "../utils.js"; import App from "../app/index.js"; -import { ResourceTiming, SetNodeAttributeURLBased } from "../../messages/index.js"; +import { ResourceTiming, SetNodeAttributeURLBased, SetNodeAttribute } from "../../messages/index.js"; + +const PLACEHOLDER_SRC = "https://static.openreplay.com/tracker/placeholder.jpeg"; export default function (app: App): void { + function sendPlaceholder(id: number, node: HTMLImageElement): void { + app.send(new SetNodeAttribute(id, "src", PLACEHOLDER_SRC)) + const { width, height } = node.getBoundingClientRect(); + if (!node.hasAttribute("width")){ + app.send(new SetNodeAttribute(id, "width", String(width))) + } + if (!node.hasAttribute("height")){ + app.send(new SetNodeAttribute(id, "height", String(height))) + } + } + const sendImgSrc = app.safe(function (this: HTMLImageElement): void { const id = app.nodes.getID(this); if (id === undefined) { @@ -16,7 +29,9 @@ export default function (app: App): void { if (src != null && isURL(src)) { // TODO: How about relative urls ? Src type is null sometimes. app.send(new ResourceTiming(timestamp(), 0, 0, 0, 0, 0, src, 'img')); } - } else if (src.length < 1e5) { + } else if (src.length >= 1e5 || app.sanitizer.isMasked(id)) { + sendPlaceholder(id, this) + } else { app.send(new SetNodeAttributeURLBased(id, 'src', src, app.getBaseHref())); } }); diff --git a/tracker/tracker/src/main/modules/input.ts b/tracker/tracker/src/main/modules/input.ts index 746c26f8f..ad8cda673 100644 --- a/tracker/tracker/src/main/modules/input.ts +++ b/tracker/tracker/src/main/modules/input.ts @@ -2,7 +2,12 @@ import { normSpaces, IN_BROWSER, getLabelAttribute, hasOpenreplayAttribute } fro import App from "../app/index.js"; import { SetInputTarget, SetInputValue, SetInputChecked } from "../../messages/index.js"; -function isInput(node: any): node is HTMLInputElement { +// TODO: take into consideration "contenteditable" attribute +type TextEditableElement = HTMLInputElement | HTMLTextAreaElement +function isTextEditable(node: any): node is TextEditableElement { + if (node instanceof HTMLTextAreaElement) { + return true; + } if (!(node instanceof HTMLInputElement)) { return false; } @@ -16,6 +21,7 @@ function isInput(node: any): node is HTMLInputElement { type === 'range' ); } + function isCheckable(node: any): node is HTMLInputElement { if (!(node instanceof HTMLInputElement)) { return false; @@ -25,7 +31,7 @@ function isCheckable(node: any): node is HTMLInputElement { } const labelElementFor: ( - node: HTMLInputElement, + node: TextEditableElement, ) => HTMLLabelElement | undefined = IN_BROWSER && 'labels' in HTMLInputElement.prototype ? (node): HTMLLabelElement | undefined => { @@ -56,7 +62,7 @@ const labelElementFor: ( } }; -export function getInputLabel(node: HTMLInputElement): string { +export function getInputLabel(node: TextEditableElement): string { let label = getLabelAttribute(node); if (label === null) { const labelElement = labelElementFor(node); @@ -89,13 +95,13 @@ export default function (app: App, opts: Partial): void { }, opts, ); - function sendInputTarget(id: number, node: HTMLInputElement): void { + function sendInputTarget(id: number, node: TextEditableElement): void { const label = getInputLabel(node); if (label !== '') { app.send(new SetInputTarget(id, label)); } } - function sendInputValue(id: number, node: HTMLInputElement): void { + function sendInputValue(id: number, node: TextEditableElement): void { let value = node.value; let inputMode: InputMode = options.defaultInputMode; if (node.type === 'password' || hasOpenreplayAttribute(node, 'hidden')) { @@ -136,7 +142,7 @@ export default function (app: App, opts: Partial): void { app.ticker.attach((): void => { inputValues.forEach((value, id) => { const node = app.nodes.getNode(id); - if (!isInput(node)) { + if (!isTextEditable(node)) { inputValues.delete(id); return; } @@ -169,7 +175,7 @@ export default function (app: App, opts: Partial): void { if (id === undefined) { return; } - if (isInput(node)) { + if (isTextEditable(node)) { inputValues.set(id, node.value); sendInputValue(id, node); return; diff --git a/tracker/tracker/src/main/modules/mouse.ts b/tracker/tracker/src/main/modules/mouse.ts index 3ec70e844..0089fa37f 100644 --- a/tracker/tracker/src/main/modules/mouse.ts +++ b/tracker/tracker/src/main/modules/mouse.ts @@ -92,7 +92,7 @@ export default function (app: App): void { (target as HTMLElement).onclick != null || target.getAttribute('role') === 'button' ) { - const label: string = app.observer.getInnerTextSecure(target as HTMLElement); + const label: string = app.sanitizer.getInnerTextSecure(target as HTMLElement); return normSpaces(label).slice(0, 100); } return ''; diff --git a/tracker/tracker/src/webworker/index.ts b/tracker/tracker/src/webworker/index.ts index 723008c52..cf0d1586a 100644 --- a/tracker/tracker/src/webworker/index.ts +++ b/tracker/tracker/src/webworker/index.ts @@ -47,6 +47,7 @@ function sendBatch(batch: Uint8Array):void { return; // happens simultaneously with onerror TODO: clear codeflow } if (this.status >= 400) { // TODO: test workflow. After 400+ it calls /start for some reason + busy = false; reset(); sendQueue.length = 0; if (this.status === 401) { // Unauthorised (Token expired)