diff --git a/api/.chalice/config.bundle.json b/api/.chalice/config.bundle.json deleted file mode 100644 index 95b29ab50..000000000 --- a/api/.chalice/config.bundle.json +++ /dev/null @@ -1,67 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-foss": { - "api_gateway_stage": "default-fos", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "true", - "isEE": "false", - "stage": "default-foss", - "jwt_issuer": "openreplay-default-foss", - "sentryURL": "", - "pg_host": "postgresql.db.svc.cluster.local", - "pg_port": "5432", - "pg_dbname": "postgres", - "pg_user": "postgres", - "pg_password": "asayerPostgres", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://0.0.0.0:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "js_cache_bucket": "sessions-assets", - "peers": "http://0.0.0.0:9000/assist/peers", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "version_number": "1.3.5" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} diff --git a/api/.chalice/config.json b/api/.chalice/config.json deleted file mode 100644 index d1fe6c36c..000000000 --- a/api/.chalice/config.json +++ /dev/null @@ -1,68 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-foss": { - "api_gateway_stage": "default-fos", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "true", - "isEE": "false", - "stage": "default-foss", - "jwt_issuer": "openreplay-default-foss", - "sentryURL": "", - "pg_host": "postgresql.db.svc.cluster.local", - "pg_port": "5432", - "pg_dbname": "postgres", - "pg_user": "postgres", - "pg_password": "asayerPostgres", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "js_cache_bucket": "sessions-assets", - "peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "iosBucket": "openreplay-ios-images", - "version_number": "1.3.6" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} \ No newline at end of file diff --git a/api/.env.default b/api/.env.default new file mode 100644 index 000000000..3b05ce3d2 --- /dev/null +++ b/api/.env.default @@ -0,0 +1,46 @@ +EMAIL_FROM=OpenReplay +EMAIL_HOST= +EMAIL_PASSWORD= +EMAIL_PORT=587 +EMAIL_SSL_CERT= +EMAIL_SSL_KEY= +EMAIL_USER= +EMAIL_USE_SSL=false +EMAIL_USE_TLS=true +S3_HOST= +S3_KEY= +S3_SECRET= +SITE_URL= +alert_ntf=http://127.0.0.1:8000/async/alerts/notifications/%s +announcement_url= +assign_link=http://127.0.0.1:8000/async/email_assignment +async_Token= +captcha_key= +captcha_server= +change_password_link=/reset-password?invitation=%s&&pass=%s +email_basic=http://127.0.0.1:8000/async/basic/%s +email_signup=http://127.0.0.1:8000/async/email_signup/%s +invitation_link=/api/users/invitation?token=%s +isEE=false +isFOS=true +js_cache_bucket=sessions-assets +jwt_algorithm=HS512 +jwt_exp_delta_seconds=2592000 +jwt_issuer=openreplay-default-foss +jwt_secret="SET A RANDOM STRING HERE" +peers=http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers +pg_dbname=postgres +pg_host=postgresql.db.svc.cluster.local +pg_password=asayerPostgres +pg_port=5432 +pg_user=postgres +pg_timeout=30 +pg_minconn=50 +put_S3_TTL=20 +sentryURL= +sessions_bucket=mobs +sessions_region=us-east-1 +sourcemaps_bucket=sourcemaps +sourcemaps_reader=http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps +stage=default-foss +version_number=1.4.0 \ No newline at end of file diff --git a/api/Dockerfile b/api/Dockerfile index c9c0eaf2c..a8d6210a0 100644 --- a/api/Dockerfile +++ b/api/Dockerfile @@ -1,9 +1,10 @@ -FROM python:3.6-slim +FROM python:3.9.7-slim LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" WORKDIR /work COPY . . -RUN pip install -r requirements.txt -t ./vendor --upgrade -RUN pip install chalice==1.22.2 +RUN pip install -r requirements.txt +RUN mv .env.default .env # Add Tini # Startup daemon diff --git a/api/Dockerfile.alerts b/api/Dockerfile.alerts new file mode 100644 index 000000000..7bff6a9dc --- /dev/null +++ b/api/Dockerfile.alerts @@ -0,0 +1,18 @@ +FROM python:3.9.7-slim +LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" +WORKDIR /work +COPY . . +RUN pip install -r requirements.txt +RUN mv .env.default .env && mv app_alerts.py app.py +ENV pg_minconn 2 + +# Add Tini +# Startup daemon +ENV TINI_VERSION v0.19.0 +ARG envarg +ENV ENTERPRISE_BUILD ${envarg} +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +RUN chmod +x /tini +ENTRYPOINT ["/tini", "--"] +CMD ./entrypoint.sh \ No newline at end of file diff --git a/api/app.py b/api/app.py index e67810de5..d261dadac 100644 --- a/api/app.py +++ b/api/app.py @@ -1,109 +1,70 @@ -import sentry_sdk -from chalice import Chalice, Response -from sentry_sdk import configure_scope +import logging + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from starlette.responses import StreamingResponse -from chalicelib import _overrides -from chalicelib.blueprints import bp_authorizers -from chalicelib.blueprints import bp_core, bp_core_crons -from chalicelib.blueprints.app import v1_api -from chalicelib.blueprints import bp_core_dynamic, bp_core_dynamic_crons -from chalicelib.blueprints.subs import bp_dashboard from chalicelib.utils import helper from chalicelib.utils import pg_client -from chalicelib.utils.helper import environ +from routers import core, core_dynamic +from routers.app import v1_api +from routers.crons import core_crons +from routers.crons import core_dynamic_crons +from routers.subs import dashboard -app = Chalice(app_name='parrot') -app.debug = not helper.is_production() or helper.is_local() - -sentry_sdk.init(environ["sentryURL"]) - -# Monkey-patch print for DataDog hack -import sys -import traceback - -old_tb = traceback.print_exception -old_f = sys.stdout -old_e = sys.stderr -OR_SESSION_TOKEN = None - - -class F: - def write(self, x): - if OR_SESSION_TOKEN is not None and x != '\n' and not helper.is_local(): - old_f.write(f"[or_session_token={OR_SESSION_TOKEN}] {x}") - else: - old_f.write(x) - - def flush(self): - pass - - -def tb_print_exception(etype, value, tb, limit=None, file=None, chain=True): - if OR_SESSION_TOKEN is not None and not helper.is_local(): - value = type(value)(f"[or_session_token={OR_SESSION_TOKEN}] " + str(value)) - - old_tb(etype, value, tb, limit, file, chain) - - -if helper.is_production(): - traceback.print_exception = tb_print_exception - -sys.stdout = F() -sys.stderr = F() -# ---End Monkey-patch - - -_overrides.chalice_app(app) +app = FastAPI() @app.middleware('http') -def or_middleware(event, get_response): +async def or_middleware(request: Request, call_next): global OR_SESSION_TOKEN - OR_SESSION_TOKEN = app.current_request.headers.get('vnd.openreplay.com.sid', - app.current_request.headers.get('vnd.asayer.io.sid')) - if "authorizer" in event.context and event.context["authorizer"] is None: - print("Deleted user!!") - pg_client.close() - return Response(body={"errors": ["Deleted user"]}, status_code=403) + OR_SESSION_TOKEN = request.headers.get('vnd.openreplay.com.sid', request.headers.get('vnd.asayer.io.sid')) try: if helper.TRACK_TIME: import time now = int(time.time() * 1000) - response = get_response(event) - - if response.status_code == 200 and response.body is not None and response.body.get("errors") is not None: - if "not found" in response.body["errors"][0]: - response = Response(status_code=404, body=response.body) - else: - response = Response(status_code=400, body=response.body) - if response.status_code // 100 == 5 and helper.allow_sentry() and OR_SESSION_TOKEN is not None and not helper.is_local(): - with configure_scope() as scope: - scope.set_tag('stage', environ["stage"]) - scope.set_tag('openReplaySessionToken', OR_SESSION_TOKEN) - scope.set_extra("context", event.context) - sentry_sdk.capture_exception(Exception(response.body)) + response: StreamingResponse = await call_next(request) if helper.TRACK_TIME: print(f"Execution time: {int(time.time() * 1000) - now} ms") except Exception as e: - if helper.allow_sentry() and OR_SESSION_TOKEN is not None and not helper.is_local(): - with configure_scope() as scope: - scope.set_tag('stage', environ["stage"]) - scope.set_tag('openReplaySessionToken', OR_SESSION_TOKEN) - scope.set_extra("context", event.context) - sentry_sdk.capture_exception(e) - response = Response(body={"Code": "InternalServerError", - "Message": "An internal server error occurred [level=Fatal]."}, - status_code=500) + pg_client.close() + raise e pg_client.close() return response -# Open source -app.register_blueprint(bp_authorizers.app) -app.register_blueprint(bp_core.app) -app.register_blueprint(bp_core_crons.app) -app.register_blueprint(bp_core_dynamic.app) -app.register_blueprint(bp_core_dynamic_crons.app) -app.register_blueprint(bp_dashboard.app) -app.register_blueprint(v1_api.app) +origins = [ + "*", +] + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(core.public_app) +app.include_router(core.app) +app.include_router(core.app_apikey) +app.include_router(core_dynamic.public_app) +app.include_router(core_dynamic.app) +app.include_router(core_dynamic.app_apikey) +app.include_router(dashboard.app) +# app.include_router(insights.app) +app.include_router(v1_api.app_apikey) + +Schedule = AsyncIOScheduler() +Schedule.start() + +for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs: + Schedule.add_job(id=job["func"].__name__, **job) + +for job in Schedule.get_jobs(): + print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/api/app_alerts.py b/api/app_alerts.py new file mode 100644 index 000000000..57bfcd55d --- /dev/null +++ b/api/app_alerts.py @@ -0,0 +1,27 @@ +import logging + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config +from fastapi import FastAPI + +from chalicelib.core import alerts_processor + +app = FastAPI() +print("============= ALERTS =============") + + +@app.get("/") +async def root(): + return {"status": "Running"} + + +app.schedule = AsyncIOScheduler() +app.schedule.start() +app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval", + "minutes": config("ALERTS_INTERVAL", cast=int, default=5), + "misfire_grace_time": 20}) +for job in app.schedule.get_jobs(): + print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/api/chalicelib/blueprints/__init__.py b/api/auth/__init__.py similarity index 100% rename from api/chalicelib/blueprints/__init__.py rename to api/auth/__init__.py diff --git a/api/auth/auth_apikey.py b/api/auth/auth_apikey.py new file mode 100644 index 000000000..9e385a993 --- /dev/null +++ b/api/auth/auth_apikey.py @@ -0,0 +1,28 @@ +from typing import Optional + +from fastapi import Request +from fastapi.security import APIKeyHeader +from starlette import status +from starlette.exceptions import HTTPException + +from chalicelib.core import authorizers +from schemas import CurrentAPIContext + + +class APIKeyAuth(APIKeyHeader): + def __init__(self, auto_error: bool = True): + super(APIKeyAuth, self).__init__(name="Authorization", auto_error=auto_error) + + async def __call__(self, request: Request) -> Optional[CurrentAPIContext]: + api_key: Optional[str] = await super(APIKeyAuth, self).__call__(request) + r = authorizers.api_key_authorizer(api_key) + if r is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid API Key", + ) + r["authorizer_identity"] = "api_key" + print(r) + request.state.authorizer_identity = "api_key" + request.state.currentContext = CurrentAPIContext(tenant_id=r["tenantId"]) + return request.state.currentContext diff --git a/api/auth/auth_jwt.py b/api/auth/auth_jwt.py new file mode 100644 index 000000000..bf6c1901b --- /dev/null +++ b/api/auth/auth_jwt.py @@ -0,0 +1,39 @@ +from typing import Optional + +from fastapi import Request +from fastapi.security import HTTPBearer, HTTPAuthorizationCredentials +from starlette import status +from starlette.exceptions import HTTPException + +from chalicelib.core import authorizers, users +from schemas import CurrentContext + + +class JWTAuth(HTTPBearer): + def __init__(self, auto_error: bool = True): + super(JWTAuth, self).__init__(auto_error=auto_error) + + async def __call__(self, request: Request) -> Optional[CurrentContext]: + credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request) + if credentials: + if not credentials.scheme == "Bearer": + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authentication scheme.") + jwt_payload = authorizers.jwt_authorizer(credentials.scheme + " " + credentials.credentials) + if jwt_payload is None \ + or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \ + or not users.auth_exists(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"], + jwt_iat=jwt_payload["iat"], jwt_aud=jwt_payload["aud"]): + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="Invalid token or expired token.") + user = users.get(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"]) + if user is None: + raise HTTPException(status_code=status.HTTP_403_FORBIDDEN, detail="User not found.") + jwt_payload["authorizer_identity"] = "jwt" + print(jwt_payload) + request.state.authorizer_identity = "jwt" + request.state.currentContext = CurrentContext(tenant_id=jwt_payload["tenantId"], + user_id=jwt_payload["userId"], + email=user["email"]) + return request.state.currentContext + + else: + raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail="Invalid authorization code.") diff --git a/api/build.sh b/api/build.sh index d66a54ab9..29b8911ca 100644 --- a/api/build.sh +++ b/api/build.sh @@ -22,7 +22,6 @@ function build_api(){ # Copy enterprise code [[ $1 == "ee" ]] && { cp -rf ../ee/api/* ./ - cp -rf ../ee/api/.chalice/* ./.chalice/ envarg="default-ee" tag="ee-" } @@ -31,8 +30,9 @@ function build_api(){ docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1} docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest - } +} } check_prereq build_api $1 +IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1 \ No newline at end of file diff --git a/api/build_alerts.sh b/api/build_alerts.sh new file mode 100644 index 000000000..51504a276 --- /dev/null +++ b/api/build_alerts.sh @@ -0,0 +1,70 @@ +#!/bin/bash + +# Script to build alerts module +# flags to accept: +# envarg: build for enterprise edition. +# Default will be OSS build. + +# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh + +function make_submodule() { + [[ $1 != "ee" ]] && { + # -- this part was generated by modules_lister.py -- + mkdir alerts + cp -R ./{app_alerts,schemas}.py ./alerts/ + mkdir -p ./alerts/chalicelib/ + cp -R ./chalicelib/__init__.py ./alerts/chalicelib/ + mkdir -p ./alerts/chalicelib/core/ + cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,assist,events_ios,sessions_mobs,errors,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/ + mkdir -p ./alerts/chalicelib/utils/ + cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,email_helper,email_handler,smtp,s3,metrics_helper}.py ./alerts/chalicelib/utils/ + # -- end of generated part + } + [[ $1 == "ee" ]] && { + # -- this part was generated by modules_lister.py -- + mkdir alerts + cp -R ./{app_alerts,schemas,schemas_ee}.py ./alerts/ + mkdir -p ./alerts/chalicelib/ + cp -R ./chalicelib/__init__.py ./alerts/chalicelib/ + mkdir -p ./alerts/chalicelib/core/ + cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,dashboard,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/ + mkdir -p ./alerts/chalicelib/utils/ + cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,SAML2_helper,email_helper,email_handler,smtp,s3,args_transformer,ch_client,metrics_helper}.py ./alerts/chalicelib/utils/ + # -- end of generated part + } + cp -R ./{Dockerfile.alerts,requirements.txt,.env.default,entrypoint.sh} ./alerts/ + cp -R ./chalicelib/utils/html ./alerts/chalicelib/utils/html +} + +git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)} +envarg="default-foss" +check_prereq() { + which docker || { + echo "Docker not installed, please install docker." + exit=1 + } + [[ exit -eq 1 ]] && exit 1 +} + +function build_api(){ + tag="" + # Copy enterprise code + [[ $1 == "ee" ]] && { + cp -rf ../ee/api/* ./ + envarg="default-ee" + tag="ee-" + } + make_submodule $1 + cd alerts + docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} . + cd .. + rm -rf alerts + [[ $PUSH_IMAGE -eq 1 ]] && { + docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1} + docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest + docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest + } +} + +check_prereq +build_api $1 \ No newline at end of file diff --git a/api/chalicelib/_overrides.py b/api/chalicelib/_overrides.py deleted file mode 100644 index 2bf0b6d2a..000000000 --- a/api/chalicelib/_overrides.py +++ /dev/null @@ -1,104 +0,0 @@ -from chalice import Chalice, CORSConfig -from chalicelib.blueprints import bp_authorizers -from chalicelib.core import authorizers - -import sched -import threading -import time -from datetime import datetime -import pytz -from croniter import croniter - -base_time = datetime.now(pytz.utc) - -cors_config = CORSConfig( - allow_origin='*', - allow_headers=['vnd.openreplay.com.sid', 'vnd.asayer.io.sid'], - # max_age=600, - # expose_headers=['X-Special-Header'], - allow_credentials=True -) - - -def chalice_app(app): - def app_route(self, path, **kwargs): - kwargs.setdefault('cors', cors_config) - kwargs.setdefault('authorizer', bp_authorizers.jwt_authorizer) - handler_type = 'route' - name = kwargs.pop('name', None) - registration_kwargs = {'path': path, 'kwargs': kwargs, 'authorizer': kwargs.get("authorizer")} - - def _register_handler(user_handler): - handler_name = name - if handler_name is None: - handler_name = user_handler.__name__ - if registration_kwargs is not None: - kwargs = registration_kwargs - else: - kwargs = {} - - if kwargs['authorizer'] == bp_authorizers.jwt_authorizer \ - or kwargs['authorizer'] == bp_authorizers.api_key_authorizer: - def _user_handler(context=None, **args): - if context is not None: - args['context'] = context - else: - authorizer_context = app.current_request.context['authorizer'] - if kwargs['authorizer'] == bp_authorizers.jwt_authorizer: - args['context'] = authorizers.jwt_context(authorizer_context) - else: - args['context'] = authorizer_context - return user_handler(**args) - - wrapped = self._wrap_handler(handler_type, handler_name, _user_handler) - self._register_handler(handler_type, handler_name, _user_handler, wrapped, kwargs) - else: - wrapped = self._wrap_handler(handler_type, handler_name, user_handler) - self._register_handler(handler_type, handler_name, user_handler, wrapped, kwargs) - return wrapped - - return _register_handler - - app.route = app_route.__get__(app, Chalice) - - def app_schedule(self, expression, name=None, description=''): - handler_type = 'schedule' - registration_kwargs = {'expression': expression, - 'description': description} - - def _register_handler(user_handler): - handler_name = name - if handler_name is None: - handler_name = user_handler.__name__ - kwargs = registration_kwargs - cron_expression = kwargs["expression"].to_string()[len("cron("):-1] - if len(cron_expression.split(" ")) > 5: - cron_expression = " ".join(cron_expression.split(" ")[:-1]) - cron_expression = cron_expression.replace("?", "*") - cron_shell(user_handler, cron_expression) - - wrapped = self._wrap_handler(handler_type, handler_name, user_handler) - self._register_handler(handler_type, handler_name, user_handler, wrapped, kwargs) - return wrapped - - return _register_handler - - app.schedule = app_schedule.__get__(app, Chalice) - - def spawn(function, args): - th = threading.Thread(target=function, kwargs=args) - th.setDaemon(True) - th.start() - - def cron_shell(function, cron_expression): - def to_start(): - scheduler = sched.scheduler(time.time, time.sleep) - citer = croniter(cron_expression, base_time) - while True: - next_execution = citer.get_next(datetime) - print(f"{function.__name__} next execution: {next_execution}") - scheduler.enterabs(next_execution.timestamp(), 1, function, argument=(None,)) - scheduler.run() - print(f"{function.__name__} executed: {next_execution}") - - spawn(to_start, None) diff --git a/api/chalicelib/blueprints/app/v1_api.py b/api/chalicelib/blueprints/app/v1_api.py deleted file mode 100644 index 1d69bb8a3..000000000 --- a/api/chalicelib/blueprints/app/v1_api.py +++ /dev/null @@ -1,127 +0,0 @@ -from chalice import Blueprint, Response - -from chalicelib import _overrides -from chalicelib.blueprints import bp_authorizers -from chalicelib.core import sessions, events, jobs, projects -from chalicelib.utils.TimeUTC import TimeUTC - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/v1/{projectKey}/users/{userId}/sessions', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_user_sessions(projectKey, userId, context): - projectId = projects.get_internal_project_id(projectKey) - params = app.current_request.query_params - - if params is None: - params = {} - - return { - 'data': sessions.get_user_sessions( - project_id=projectId, - user_id=userId, - start_date=params.get('start_date'), - end_date=params.get('end_date') - ) - } - - -@app.route('/v1/{projectKey}/sessions/{sessionId}/events', methods=['GET'], - authorizer=bp_authorizers.api_key_authorizer) -def get_session_events(projectKey, sessionId, context): - projectId = projects.get_internal_project_id(projectKey) - return { - 'data': events.get_by_sessionId2_pg( - project_id=projectId, - session_id=sessionId - ) - } - - -@app.route('/v1/{projectKey}/users/{userId}', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_user_details(projectKey, userId, context): - projectId = projects.get_internal_project_id(projectKey) - return { - 'data': sessions.get_session_user( - project_id=projectId, - user_id=userId - ) - } - pass - - -@app.route('/v1/{projectKey}/users/{userId}', methods=['DELETE'], authorizer=bp_authorizers.api_key_authorizer) -def schedule_to_delete_user_data(projectKey, userId, context): - projectId = projects.get_internal_project_id(projectKey) - data = app.current_request.json_body - - data["action"] = "delete_user_data" - data["reference_id"] = userId - data["description"] = f"Delete user sessions of userId = {userId}" - data["start_at"] = TimeUTC.to_human_readable(TimeUTC.midnight(1)) - record = jobs.create(project_id=projectId, data=data) - return { - 'data': record - } - - -@app.route('/v1/{projectKey}/jobs', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_jobs(projectKey, context): - projectId = projects.get_internal_project_id(projectKey) - return { - 'data': jobs.get_all(project_id=projectId) - } - pass - - -@app.route('/v1/{projectKey}/jobs/{jobId}', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_job(projectKey, jobId, context): - return { - 'data': jobs.get(job_id=jobId) - } - pass - - -@app.route('/v1/{projectKey}/jobs/{jobId}', methods=['DELETE'], authorizer=bp_authorizers.api_key_authorizer) -def cancel_job(projectKey, jobId, context): - job = jobs.get(job_id=jobId) - job_not_found = len(job.keys()) == 0 - - if job_not_found or job["status"] == jobs.JobStatus.COMPLETED or job["status"] == jobs.JobStatus.CANCELLED: - return Response(status_code=501, body="The request job has already been canceled/completed (or was not found).") - - job["status"] = "cancelled" - return { - 'data': jobs.update(job_id=jobId, job=job) - } - -@app.route('/v1/projects', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_projects(context): - records = projects.get_projects(tenant_id=context['tenantId']) - for record in records: - del record['projectId'] - - return { - 'data': records - } - - -@app.route('/v1/projects/{projectKey}', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_project(projectKey, context): - return { - 'data': projects.get_project_by_key(tenant_id=context['tenantId'], project_key=projectKey) - } - - -@app.route('/v1/projects', methods=['POST'], authorizer=bp_authorizers.api_key_authorizer) -def create_project(context): - data = app.current_request.json_body - record = projects.create( - tenant_id=context['tenantId'], - user_id=None, - data=data, - skip_authorization=True - ) - del record['data']['projectId'] - return record diff --git a/api/chalicelib/blueprints/bp_authorizers.py b/api/chalicelib/blueprints/bp_authorizers.py deleted file mode 100644 index 888f2910d..000000000 --- a/api/chalicelib/blueprints/bp_authorizers.py +++ /dev/null @@ -1,37 +0,0 @@ -from chalice import Blueprint, AuthResponse -from chalicelib.core import authorizers - -from chalicelib.core import users - -app = Blueprint(__name__) - - -@app.authorizer() -def api_key_authorizer(auth_request): - r = authorizers.api_key_authorizer(auth_request.token) - if r is None: - return AuthResponse(routes=[], principal_id=None) - r["authorizer_identity"] = "api_key" - print(r) - return AuthResponse( - routes=['*'], - principal_id=r['tenantId'], - context=r - ) - - -@app.authorizer(ttl_seconds=60) -def jwt_authorizer(auth_request): - jwt_payload = authorizers.jwt_authorizer(auth_request.token) - if jwt_payload is None \ - or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \ - or not users.auth_exists(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"], - jwt_iat=jwt_payload["iat"], jwt_aud=jwt_payload["aud"]): - return AuthResponse(routes=[], principal_id=None) - jwt_payload["authorizer_identity"] = "jwt" - print(jwt_payload) - return AuthResponse( - routes=['*'], - principal_id=jwt_payload['userId'], - context=jwt_payload - ) diff --git a/api/chalicelib/blueprints/bp_core.py b/api/chalicelib/blueprints/bp_core.py deleted file mode 100644 index 303bca306..000000000 --- a/api/chalicelib/blueprints/bp_core.py +++ /dev/null @@ -1,909 +0,0 @@ -from chalice import Blueprint -from chalice import Response - -from chalicelib import _overrides -from chalicelib.blueprints import bp_authorizers -from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \ - sessions_metas, alerts, funnels, issues, integrations_manager, errors_favorite_viewed, metadata, \ - log_tool_elasticsearch, log_tool_datadog, \ - log_tool_stackdriver, reset_password, sessions_favorite_viewed, \ - log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \ - log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \ - assist, heatmaps, mobile -from chalicelib.core.collaboration_slack import Slack -from chalicelib.utils import email_helper -from chalicelib.utils.helper import environ - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/{projectId}/sessions2/favorite', methods=['GET']) -def get_favorite_sessions2(projectId, context): - params = app.current_request.query_params - - return { - 'data': sessions.get_favorite_sessions(project_id=projectId, user_id=context["userId"], include_viewed=True) - } - - -@app.route('/{projectId}/sessions2/{sessionId}', methods=['GET']) -def get_session2(projectId, sessionId, context): - data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True, user_id=context["userId"], - include_fav_viewed=True, group_metadata=True) - if data is None: - return {"errors": ["session not found"]} - - sessions_favorite_viewed.view_session(project_id=projectId, user_id=context['userId'], session_id=sessionId) - return { - 'data': data - } - - -@app.route('/{projectId}/sessions2/{sessionId}/favorite', methods=['GET']) -def add_remove_favorite_session2(projectId, sessionId, context): - return { - "data": sessions_favorite_viewed.favorite_session(project_id=projectId, user_id=context['userId'], - session_id=sessionId)} - - -@app.route('/{projectId}/sessions2/{sessionId}/assign', methods=['GET']) -def assign_session(projectId, sessionId, context): - data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId, - tenant_id=context['tenantId'], - user_id=context["userId"]) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/{projectId}/sessions2/{sessionId}/errors/{errorId}/sourcemaps', methods=['GET']) -def get_error_trace(projectId, sessionId, errorId, context): - data = errors.get_trace(project_id=projectId, error_id=errorId) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/{projectId}/sessions2/{sessionId}/assign/{issueId}', methods=['GET']) -def assign_session(projectId, sessionId, issueId, context): - data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId, - tenant_id=context['tenantId'], user_id=context["userId"]) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', methods=['POST', 'PUT']) -def comment_assignment(projectId, sessionId, issueId, context): - data = app.current_request.json_body - data = sessions_assignments.comment(tenant_id=context['tenantId'], project_id=projectId, - session_id=sessionId, assignment_id=issueId, - user_id=context["userId"], message=data["message"]) - if "errors" in data.keys(): - return data - return { - 'data': data - } - - -@app.route('/{projectId}/events/search', methods=['GET']) -def events_search(projectId, context): - params = app.current_request.query_params - if params is None: - return {"data": []} - - q = params.get('q', '') - if len(q) == 0: - return {"data": []} - result = events.search_pg2(q, params.get('type', ''), project_id=projectId, source=params.get('source'), - key=params.get("key")) - return result - - -@app.route('/{projectId}/sessions/search2', methods=['POST']) -def sessions_search2(projectId, context): - data = app.current_request.json_body - - data = sessions.search2_pg(data, projectId, user_id=context["userId"]) - return {'data': data} - - -@app.route('/{projectId}/sessions/filters', methods=['GET']) -def session_filter_values(projectId, context): - return {'data': sessions_metas.get_key_values(projectId)} - - -@app.route('/{projectId}/sessions/filters/top', methods=['GET']) -def session_top_filter_values(projectId, context): - return {'data': sessions_metas.get_top_key_values(projectId)} - - -@app.route('/{projectId}/sessions/filters/search', methods=['GET']) -def get_session_filters_meta(projectId, context): - params = app.current_request.query_params - if params is None: - return {"data": []} - - meta_type = params.get('type', '') - if len(meta_type) == 0: - return {"data": []} - q = params.get('q', '') - if len(q) == 0: - return {"data": []} - return sessions_metas.search(project_id=projectId, meta_type=meta_type, text=q) - - -@app.route('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', - methods=['POST', 'PUT']) -def integration_notify(projectId, integration, integrationId, source, sourceId, context): - data = app.current_request.json_body - comment = None - if "comment" in data: - comment = data["comment"] - if integration == "slack": - args = {"tenant_id": context["tenantId"], - "user": context['email'], "comment": comment, "project_id": projectId, - "integration_id": integrationId} - if source == "sessions": - return Slack.share_session(session_id=sourceId, **args) - elif source == "errors": - return Slack.share_error(error_id=sourceId, **args) - return {"data": None} - - -@app.route('/integrations/sentry', methods=['GET']) -def get_all_sentry(context): - return {"data": log_tool_sentry.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/sentry', methods=['GET']) -def get_sentry(projectId, context): - return {"data": log_tool_sentry.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/sentry', methods=['POST', 'PUT']) -def add_edit_sentry(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_sentry.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/sentry', methods=['DELETE']) -def delete_sentry(projectId, context): - return {"data": log_tool_sentry.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/{projectId}/integrations/sentry/events/{eventId}', methods=['GET']) -def proxy_sentry(projectId, eventId, context): - return {"data": log_tool_sentry.proxy_get(tenant_id=context["tenantId"], project_id=projectId, event_id=eventId)} - - -@app.route('/integrations/datadog', methods=['GET']) -def get_all_datadog(context): - return {"data": log_tool_datadog.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/datadog', methods=['GET']) -def get_datadog(projectId, context): - return {"data": log_tool_datadog.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/datadog', methods=['POST', 'PUT']) -def add_edit_datadog(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_datadog.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/datadog', methods=['DELETE']) -def delete_datadog(projectId, context): - return {"data": log_tool_datadog.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/stackdriver', methods=['GET']) -def get_all_stackdriver(context): - return {"data": log_tool_stackdriver.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/stackdriver', methods=['GET']) -def get_stackdriver(projectId, context): - return {"data": log_tool_stackdriver.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/stackdriver', methods=['POST', 'PUT']) -def add_edit_stackdriver(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_stackdriver.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/stackdriver', methods=['DELETE']) -def delete_stackdriver(projectId, context): - return {"data": log_tool_stackdriver.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/newrelic', methods=['GET']) -def get_all_newrelic(context): - return {"data": log_tool_newrelic.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/newrelic', methods=['GET']) -def get_newrelic(projectId, context): - return {"data": log_tool_newrelic.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/newrelic', methods=['POST', 'PUT']) -def add_edit_newrelic(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_newrelic.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/newrelic', methods=['DELETE']) -def delete_newrelic(projectId, context): - return {"data": log_tool_newrelic.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/rollbar', methods=['GET']) -def get_all_rollbar(context): - return {"data": log_tool_rollbar.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/rollbar', methods=['GET']) -def get_rollbar(projectId, context): - return {"data": log_tool_rollbar.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/rollbar', methods=['POST', 'PUT']) -def add_edit_rollbar(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_rollbar.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/rollbar', methods=['DELETE']) -def delete_datadog(projectId, context): - return {"data": log_tool_rollbar.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/bugsnag/list_projects', methods=['POST']) -def list_projects_bugsnag(context): - data = app.current_request.json_body - return {"data": log_tool_bugsnag.list_projects(auth_token=data["authorizationToken"])} - - -@app.route('/integrations/bugsnag', methods=['GET']) -def get_all_bugsnag(context): - return {"data": log_tool_bugsnag.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/bugsnag', methods=['GET']) -def get_bugsnag(projectId, context): - return {"data": log_tool_bugsnag.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/bugsnag', methods=['POST', 'PUT']) -def add_edit_bugsnag(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_bugsnag.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/bugsnag', methods=['DELETE']) -def delete_bugsnag(projectId, context): - return {"data": log_tool_bugsnag.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/cloudwatch/list_groups', methods=['POST']) -def list_groups_cloudwatch(context): - data = app.current_request.json_body - return {"data": log_tool_cloudwatch.list_log_groups(aws_access_key_id=data["awsAccessKeyId"], - aws_secret_access_key=data["awsSecretAccessKey"], - region=data["region"])} - - -@app.route('/integrations/cloudwatch', methods=['GET']) -def get_all_cloudwatch(context): - return {"data": log_tool_cloudwatch.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/cloudwatch', methods=['GET']) -def get_cloudwatch(projectId, context): - return {"data": log_tool_cloudwatch.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/cloudwatch', methods=['POST', 'PUT']) -def add_edit_cloudwatch(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_cloudwatch.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/cloudwatch', methods=['DELETE']) -def delete_cloudwatch(projectId, context): - return {"data": log_tool_cloudwatch.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/elasticsearch', methods=['GET']) -def get_all_elasticsearch(context): - return {"data": log_tool_elasticsearch.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/elasticsearch', methods=['GET']) -def get_elasticsearch(projectId, context): - return {"data": log_tool_elasticsearch.get(project_id=projectId)} - - -@app.route('/integrations/elasticsearch/test', methods=['POST']) -def test_elasticsearch_connection(context): - data = app.current_request.json_body - return {"data": log_tool_elasticsearch.ping(tenant_id=context["tenantId"], **data)} - - -@app.route('/{projectId}/integrations/elasticsearch', methods=['POST', 'PUT']) -def add_edit_elasticsearch(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_elasticsearch.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/elasticsearch', methods=['DELETE']) -def delete_elasticsearch(projectId, context): - return {"data": log_tool_elasticsearch.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/sumologic', methods=['GET']) -def get_all_sumologic(context): - return {"data": log_tool_sumologic.get_all(tenant_id=context["tenantId"])} - - -@app.route('/{projectId}/integrations/sumologic', methods=['GET']) -def get_sumologic(projectId, context): - return {"data": log_tool_sumologic.get(project_id=projectId)} - - -@app.route('/{projectId}/integrations/sumologic', methods=['POST', 'PUT']) -def add_edit_sumologic(projectId, context): - data = app.current_request.json_body - - return {"data": log_tool_sumologic.add_edit(tenant_id=context["tenantId"], project_id=projectId, data=data)} - - -@app.route('/{projectId}/integrations/sumologic', methods=['DELETE']) -def delete_sumologic(projectId, context): - return {"data": log_tool_sumologic.delete(tenant_id=context["tenantId"], project_id=projectId)} - - -@app.route('/integrations/issues', methods=['GET']) -def get_integration_status(context): - error, integration = integrations_manager.get_integration(tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return {"data": {}} - return {"data": integration.get_obfuscated()} - - -@app.route('/integrations/jira', methods=['POST', 'PUT']) -def add_edit_jira_cloud(context): - data = app.current_request.json_body - error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER, - tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.add_edit(data=data)} - - -@app.route('/integrations/github', methods=['POST', 'PUT']) -def add_edit_github(context): - data = app.current_request.json_body - error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER, - tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.add_edit(data=data)} - - -@app.route('/integrations/issues', methods=['DELETE']) -def delete_default_issue_tracking_tool(context): - error, integration = integrations_manager.get_integration(tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.delete()} - - -@app.route('/integrations/jira', methods=['DELETE']) -def delete_jira_cloud(context): - error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER, - tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.delete()} - - -@app.route('/integrations/github', methods=['DELETE']) -def delete_github(context): - error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER, - tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.delete()} - - -@app.route('/integrations/issues/list_projects', methods=['GET']) -def get_all_issue_tracking_projects(context): - error, integration = integrations_manager.get_integration(tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - data = integration.issue_handler.get_projects() - if "errors" in data: - return data - return {"data": data} - - -@app.route('/integrations/issues/{integrationProjectId}', methods=['GET']) -def get_integration_metadata(integrationProjectId, context): - error, integration = integrations_manager.get_integration(tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - data = integration.issue_handler.get_metas(integrationProjectId) - if "errors" in data.keys(): - return data - return {"data": data} - - -@app.route('/{projectId}/assignments', methods=['GET']) -def get_all_assignments(projectId, context): - data = sessions_assignments.get_all(project_id=projectId, user_id=context["userId"]) - return { - 'data': data - } - - -@app.route('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', methods=['POST', 'PUT']) -def create_issue_assignment(projectId, sessionId, integrationProjectId, context): - data = app.current_request.json_body - data = sessions_assignments.create_new_assignment(tenant_id=context['tenantId'], project_id=projectId, - session_id=sessionId, - creator_id=context["userId"], assignee=data["assignee"], - description=data["description"], title=data["title"], - issue_type=data["issueType"], - integration_project_id=integrationProjectId) - if "errors" in data.keys(): - return data - return { - 'data': data - } - - -@app.route('/{projectId}/gdpr', methods=['GET']) -def get_gdpr(projectId, context): - return {"data": projects.get_gdpr(project_id=projectId)} - - -@app.route('/{projectId}/gdpr', methods=['POST', 'PUT']) -def edit_gdpr(projectId, context): - data = app.current_request.json_body - - return {"data": projects.edit_gdpr(project_id=projectId, gdpr=data)} - - -@app.route('/password/reset-link', methods=['PUT', 'POST'], authorizer=None) -def reset_password_handler(): - data = app.current_request.json_body - if "email" not in data or len(data["email"]) < 5: - return {"errors": ["please provide a valid email address"]} - return reset_password.reset(data) - - -@app.route('/{projectId}/metadata', methods=['GET']) -def get_metadata(projectId, context): - return {"data": metadata.get(project_id=projectId)} - - -@app.route('/{projectId}/metadata/list', methods=['POST', 'PUT']) -def add_edit_delete_metadata(projectId, context): - data = app.current_request.json_body - - return metadata.add_edit_delete(tenant_id=context["tenantId"], project_id=projectId, new_metas=data["list"]) - - -@app.route('/{projectId}/metadata', methods=['POST', 'PUT']) -def add_metadata(projectId, context): - data = app.current_request.json_body - - return metadata.add(tenant_id=context["tenantId"], project_id=projectId, new_name=data["key"]) - - -@app.route('/{projectId}/metadata/{index}', methods=['POST', 'PUT']) -def edit_metadata(projectId, index, context): - data = app.current_request.json_body - - return metadata.edit(tenant_id=context["tenantId"], project_id=projectId, index=int(index), - new_name=data["key"]) - - -@app.route('/{projectId}/metadata/{index}', methods=['DELETE']) -def delete_metadata(projectId, index, context): - return metadata.delete(tenant_id=context["tenantId"], project_id=projectId, index=index) - - -@app.route('/{projectId}/metadata/search', methods=['GET']) -def search_metadata(projectId, context): - params = app.current_request.query_params - q = params.get('q', '') - key = params.get('key', '') - if len(q) == 0 and len(key) == 0: - return {"data": []} - if len(q) == 0: - return {"errors": ["please provide a value for search"]} - if len(key) == 0: - return {"errors": ["please provide a key for search"]} - return metadata.search(tenant_id=context["tenantId"], project_id=projectId, value=q, key=key) - - -@app.route('/{projectId}/integration/sources', methods=['GET']) -def search_integrations(projectId, context): - return log_tools.search(project_id=projectId) - - -@app.route('/async/email_assignment', methods=['POST', 'PUT'], authorizer=None) -def async_send_signup_emails(): - data = app.current_request.json_body - if data.pop("auth") != environ["async_Token"]: - return {} - email_helper.send_assign_session(recipient=data["email"], link=data["link"], message=data["message"]) - - -@app.route('/async/funnel/weekly_report2', methods=['POST', 'PUT'], authorizer=None) -def async_weekly_report(): - print("=========================> Sending weekly report") - data = app.current_request.json_body - if data.pop("auth") != environ["async_Token"]: - return {} - email_helper.weekly_report2(recipients=data["email"], data=data.get("data", None)) - - -@app.route('/async/basic/{step}', methods=['POST', 'PUT'], authorizer=None) -def async_basic_emails(step): - data = app.current_request.json_body - if data.pop("auth") != environ["async_Token"]: - return {} - if step.lower() == "member_invitation": - email_helper.send_team_invitation(recipient=data["email"], invitation_link=data["invitationLink"], - client_id=data["clientId"], sender_name=data["senderName"]) - - -@app.route('/{projectId}/sample_rate', methods=['GET']) -def get_capture_status(projectId, context): - return {"data": projects.get_capture_status(project_id=projectId)} - - -@app.route('/{projectId}/sample_rate', methods=['POST', 'PUT']) -def update_capture_status(projectId, context): - data = app.current_request.json_body - - return {"data": projects.update_capture_status(project_id=projectId, changes=data)} - - -@app.route('/announcements', methods=['GET']) -def get_all_announcements(context): - return {"data": announcements.get_all(context["userId"])} - - -@app.route('/announcements/view', methods=['GET']) -def get_all_announcements(context): - return {"data": announcements.view(user_id=context["userId"])} - - -@app.route('/{projectId}/errors/{errorId}/{action}', methods=['GET']) -def add_remove_favorite_error(projectId, errorId, action, context): - if action == "favorite": - return errors_favorite_viewed.favorite_error(project_id=projectId, user_id=context['userId'], error_id=errorId) - elif action == "sessions": - params = app.current_request.query_params - if params is None: - params = {} - start_date = params.get("startDate") - end_date = params.get("endDate") - return { - "data": errors.get_sessions(project_id=projectId, user_id=context['userId'], error_id=errorId, - start_date=start_date, end_date=end_date)} - elif action in list(errors.ACTION_STATE.keys()): - return errors.change_state(project_id=projectId, user_id=context['userId'], error_id=errorId, action=action) - else: - return {"errors": ["undefined action"]} - - -@app.route('/{projectId}/errors/merge', methods=['POST']) -def errors_merge(projectId, context): - data = app.current_request.json_body - - data = errors.merge(error_ids=data.get("errors", [])) - return data - - -@app.route('/show_banner', methods=['GET']) -def errors_merge(context): - return {"data": False} - - -@app.route('/{projectId}/alerts', methods=['POST', 'PUT']) -def create_alert(projectId, context): - data = app.current_request.json_body - return alerts.create(projectId, data) - - -@app.route('/{projectId}/alerts', methods=['GET']) -def get_all_alerts(projectId, context): - return {"data": alerts.get_all(projectId)} - - -@app.route('/{projectId}/alerts/{alertId}', methods=['GET']) -def get_alert(projectId, alertId, context): - return {"data": alerts.get(alertId)} - - -@app.route('/{projectId}/alerts/{alertId}', methods=['POST', 'PUT']) -def update_alert(projectId, alertId, context): - data = app.current_request.json_body - return alerts.update(alertId, data) - - -@app.route('/{projectId}/alerts/{alertId}', methods=['DELETE']) -def delete_alert(projectId, alertId, context): - return alerts.delete(projectId, alertId) - - -@app.route('/{projectId}/funnels', methods=['POST', 'PUT']) -def add_funnel(projectId, context): - data = app.current_request.json_body - return funnels.create(project_id=projectId, - user_id=context['userId'], - name=data["name"], - filter=data["filter"], - is_public=data.get("isPublic", False)) - - -@app.route('/{projectId}/funnels', methods=['GET']) -def get_funnels(projectId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return {"data": funnels.get_by_user(project_id=projectId, - user_id=context['userId'], - range_value=None, - start_date=None, - end_date=None, - details=False)} - - -@app.route('/{projectId}/funnels/details', methods=['GET']) -def get_funnels_with_details(projectId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return {"data": funnels.get_by_user(project_id=projectId, - user_id=context['userId'], - range_value=params.get("rangeValue", None), - start_date=params.get('startDate', None), - end_date=params.get('endDate', None), - details=True)} - - -@app.route('/{projectId}/funnels/issue_types', methods=['GET']) -def get_possible_issue_types(projectId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return {"data": funnels.get_possible_issue_types(project_id=projectId)} - - -@app.route('/{projectId}/funnels/{funnelId}/insights', methods=['GET']) -def get_funnel_insights(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return funnels.get_top_insights(funnel_id=funnelId, project_id=projectId, - range_value=params.get("range_value", None), - start_date=params.get('startDate', None), - end_date=params.get('endDate', None)) - - -@app.route('/{projectId}/funnels/{funnelId}/insights', methods=['POST', 'PUT']) -def get_funnel_insights_on_the_fly(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - data = app.current_request.json_body - if data is None: - data = {} - - return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, project_id=projectId, data={**params, **data}) - - -@app.route('/{projectId}/funnels/{funnelId}/issues', methods=['GET']) -def get_funnel_issues(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return funnels.get_issues(funnel_id=funnelId, project_id=projectId, - range_value=params.get("range_value", None), - start_date=params.get('startDate', None), end_date=params.get('endDate', None)) - - -@app.route('/{projectId}/funnels/{funnelId}/issues', methods=['POST', 'PUT']) -def get_funnel_issues_on_the_fly(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - data = app.current_request.json_body - if data is None: - data = {} - - return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, project_id=projectId, data={**params, **data})} - - -@app.route('/{projectId}/funnels/{funnelId}/sessions', methods=['GET']) -def get_funnel_sessions(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return {"data": funnels.get_sessions(funnel_id=funnelId, user_id=context['userId'], project_id=projectId, - range_value=params.get("range_value", None), - start_date=params.get('startDate', None), - end_date=params.get('endDate', None))} - - -@app.route('/{projectId}/funnels/{funnelId}/sessions', methods=['POST', 'PUT']) -def get_funnel_sessions_on_the_fly(projectId, funnelId, context): - params = app.current_request.query_params - if params is None: - params = {} - data = app.current_request.json_body - if data is None: - data = {} - return {"data": funnels.get_sessions_on_the_fly(funnel_id=funnelId, user_id=context['userId'], project_id=projectId, - data={**params, **data})} - - -@app.route('/{projectId}/funnels/issues/{issueId}/sessions', methods=['GET']) -def get_issue_sessions(projectId, issueId, context): - params = app.current_request.query_params - if params is None: - params = {} - - issue = issues.get(project_id=projectId, issue_id=issueId) - return { - "data": {"sessions": sessions.search_by_issue(user_id=context["userId"], project_id=projectId, issue=issue, - start_date=params.get('startDate', None), - end_date=params.get('endDate', None)), - "issue": issue}} - - -@app.route('/{projectId}/funnels/{funnelId}/issues/{issueId}/sessions', methods=['POST', 'PUT']) -def get_funnel_issue_sessions(projectId, funnelId, issueId, context): - data = app.current_request.json_body - - data = funnels.search_by_issue(project_id=projectId, user_id=context["userId"], issue_id=issueId, - funnel_id=funnelId, data=data) - if "errors" in data: - return data - if data.get("issue") is None: - data["issue"] = issues.get(project_id=projectId, issue_id=issueId) - return { - "data": data - } - - -@app.route('/{projectId}/funnels/{funnelId}', methods=['GET']) -def get_funnel(projectId, funnelId, context): - data = funnels.get(funnel_id=funnelId, - project_id=projectId) - if data is None: - return {"errors": ["funnel not found"]} - return {"data": data} - - -@app.route('/{projectId}/funnels/{funnelId}', methods=['POST', 'PUT']) -def edit_funnel(projectId, funnelId, context): - data = app.current_request.json_body - return funnels.update(funnel_id=funnelId, - user_id=context['userId'], - name=data.get("name"), - filter=data.get("filter"), - is_public=data.get("isPublic")) - - -@app.route('/{projectId}/funnels/{funnelId}', methods=['DELETE']) -def delete_filter(projectId, funnelId, context): - return funnels.delete(user_id=context['userId'], funnel_id=funnelId, project_id=projectId) - - -@app.route('/{projectId}/sourcemaps', methods=['PUT'], authorizer=bp_authorizers.api_key_authorizer) -def sign_sourcemap_for_upload(projectId, context): - data = app.current_request.json_body - project_id = projects.get_internal_project_id(projectId) - if project_id is None: - return Response(status_code=400, body='invalid projectId') - - return {"data": sourcemaps.presign_upload_urls(project_id=project_id, urls=data["URL"])} - - -@app.route('/config/weekly_report', methods=['GET']) -def get_weekly_report_config(context): - return {"data": weekly_report.get_config(user_id=context['userId'])} - - -@app.route('/config/weekly_report', methods=['POST', 'PUT']) -def get_weekly_report_config(context): - data = app.current_request.json_body - return {"data": weekly_report.edit_config(user_id=context['userId'], weekly_report=data.get("weeklyReport", True))} - - -@app.route('/{projectId}/issue_types', methods=['GET']) -def issue_types(projectId, context): - # return {"data": issues.get_types_by_project(project_id=projectId)} - return {"data": issues.get_all_types()} - - -@app.route('/issue_types', methods=['GET']) -def all_issue_types(context): - return {"data": issues.get_all_types()} - - -@app.route('/flows', methods=['GET', 'PUT', 'POST', 'DELETE']) -@app.route('/{projectId}/flows', methods=['GET', 'PUT', 'POST', 'DELETE']) -def removed_endpoints(projectId=None, context=None): - return Response(body={"errors": ["Endpoint no longer available"]}, status_code=410) - - -@app.route('/{projectId}/assist/sessions', methods=['GET']) -def sessions_live(projectId, context): - data = assist.get_live_sessions(projectId) - return {'data': data} - - -@app.route('/{projectId}/assist/sessions', methods=['POST']) -def sessions_live_search(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - data = assist.get_live_sessions(projectId, filters=data.get("filters")) - return {'data': data} - - -@app.route('/{projectId}/heatmaps/url', methods=['POST']) -def get_heatmaps_by_url(projectId, context): - data = app.current_request.json_body - return {"data": heatmaps.get_by_url(project_id=projectId, data=data)} - - -@app.route('/general_stats', methods=['GET'], authorizer=None) -def get_general_stats(): - return {"data": {"sessions:": sessions.count_all()}} - - -@app.route('/{projectId}/mobile/{sessionId}/urls', methods=['POST']) -def mobile_signe(projectId, sessionId, context): - data = app.current_request.json_body - return {"data": mobile.sign_keys(project_id=projectId, session_id=sessionId, keys=data["keys"])} diff --git a/api/chalicelib/blueprints/bp_core_crons.py b/api/chalicelib/blueprints/bp_core_crons.py deleted file mode 100644 index 817ffbb16..000000000 --- a/api/chalicelib/blueprints/bp_core_crons.py +++ /dev/null @@ -1,18 +0,0 @@ -from chalice import Blueprint -from chalice import Cron -from chalicelib import _overrides -from chalicelib.core import reset_password, weekly_report, jobs - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.schedule(Cron('0', '*', '?', '*', '*', '*')) -def run_scheduled_jobs(event): - jobs.execute_jobs() - - -# Run every monday. -@app.schedule(Cron('5', '0', '?', '*', 'MON', '*')) -def weekly_report2(event): - weekly_report.cron() diff --git a/api/chalicelib/blueprints/bp_core_dynamic.py b/api/chalicelib/blueprints/bp_core_dynamic.py deleted file mode 100644 index b494ea953..000000000 --- a/api/chalicelib/blueprints/bp_core_dynamic.py +++ /dev/null @@ -1,460 +0,0 @@ -from chalice import Blueprint, Response - -from chalicelib import _overrides -from chalicelib.core import assist -from chalicelib.core import boarding -from chalicelib.core import errors -from chalicelib.core import license -from chalicelib.core import metadata, errors_favorite_viewed, slack, alerts, sessions, integrations_manager -from chalicelib.core import notifications -from chalicelib.core import projects -from chalicelib.core import signup -from chalicelib.core import tenants -from chalicelib.core import users -from chalicelib.core import webhook -from chalicelib.core.collaboration_slack import Slack -from chalicelib.utils import captcha -from chalicelib.utils import helper -from chalicelib.utils.helper import environ - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/login', methods=['POST'], authorizer=None) -def login(): - data = app.current_request.json_body - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): - return {"errors": ["Invalid captcha."]} - r = users.authenticate(data['email'], data['password'], - for_plugin=False - ) - if r is None: - return Response(status_code=401, body={ - 'errors': ['You’ve entered invalid Email or Password.'] - }) - - tenant_id = r.pop("tenantId") - - r["limits"] = { - "teamMember": -1, - "projects": -1, - "metadata": metadata.get_remaining_metadata_with_count(tenant_id)} - - c = tenants.get_by_tenant_id(tenant_id) - c.pop("createdAt") - c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True, version=True) - c["smtp"] = helper.has_smtp() - c["iceServers"] = assist.get_ice_servers() - return { - 'jwt': r.pop('jwt'), - 'data': { - "user": r, - "client": c - } - } - - -@app.route('/account', methods=['GET']) -def get_account(context): - r = users.get(tenant_id=context['tenantId'], user_id=context['userId']) - return { - 'data': { - **r, - "limits": { - "teamMember": -1, - "projects": -1, - "metadata": metadata.get_remaining_metadata_with_count(context['tenantId']) - }, - **license.get_status(context["tenantId"]), - "smtp": helper.has_smtp(), - "iceServers": assist.get_ice_servers() - } - } - - -@app.route('/projects', methods=['GET']) -def get_projects(context): - return {"data": projects.get_projects(tenant_id=context["tenantId"], recording_state=True, gdpr=True, recorded=True, - stack_integrations=True, version=True)} - - -@app.route('/projects', methods=['POST', 'PUT']) -def create_project(context): - data = app.current_request.json_body - return projects.create(tenant_id=context["tenantId"], user_id=context["userId"], data=data) - - -@app.route('/projects/{projectId}', methods=['POST', 'PUT']) -def create_edit_project(projectId, context): - data = app.current_request.json_body - - return projects.edit(tenant_id=context["tenantId"], user_id=context["userId"], data=data, project_id=projectId) - - -@app.route('/projects/{projectId}', methods=['GET']) -def get_project(projectId, context): - data = projects.get_project(tenant_id=context["tenantId"], project_id=projectId, include_last_session=True, - include_gdpr=True) - if data is None: - return {"errors": ["project not found"]} - return {"data": data} - - -@app.route('/projects/{projectId}', methods=['DELETE']) -def delete_project(projectId, context): - return projects.delete(tenant_id=context["tenantId"], user_id=context["userId"], project_id=projectId) - - -@app.route('/projects/limit', methods=['GET']) -def get_projects_limit(context): - return {"data": { - "current": projects.count_by_tenant(tenant_id=context["tenantId"]), - "remaining": -1 - }} - - -@app.route('/client', methods=['GET']) -def get_client(context): - r = tenants.get_by_tenant_id(context['tenantId']) - if r is not None: - r.pop("createdAt") - r["projects"] = projects.get_projects(tenant_id=context['tenantId'], recording_state=True, recorded=True, - stack_integrations=True, version=True) - return { - 'data': r - } - - -@app.route('/client/new_api_key', methods=['GET']) -def generate_new_tenant_token(context): - return { - 'data': tenants.generate_new_api_key(context['tenantId']) - } - - -@app.route('/client', methods=['PUT', 'POST']) -def put_client(context): - data = app.current_request.json_body - return tenants.update(tenant_id=context["tenantId"], user_id=context["userId"], data=data) - - -@app.route('/signup', methods=['GET'], authorizer=None) -def get_all_signup(): - return {"data": {"tenants": tenants.tenants_exists(), - "sso": None, - "ssoProvider": None, - "edition": helper.get_edition()}} - - -@app.route('/signup', methods=['POST', 'PUT'], authorizer=None) -def signup_handler(): - data = app.current_request.json_body - return signup.create_step1(data) - - -@app.route('/integrations/slack', methods=['POST', 'PUT']) -def add_slack_client(context): - data = app.current_request.json_body - if "url" not in data or "name" not in data: - return {"errors": ["please provide a url and a name"]} - n = Slack.add_channel(tenant_id=context["tenantId"], url=data["url"], name=data["name"]) - if n is None: - return { - "errors": ["We couldn't send you a test message on your Slack channel. Please verify your webhook url."] - } - return {"data": n} - - -@app.route('/integrations/slack/{integrationId}', methods=['POST', 'PUT']) -def edit_slack_integration(integrationId, context): - data = app.current_request.json_body - if data.get("url") and len(data["url"]) > 0: - old = webhook.get(tenant_id=context["tenantId"], webhook_id=integrationId) - if old["endpoint"] != data["url"]: - if not Slack.say_hello(data["url"]): - return { - "errors": [ - "We couldn't send you a test message on your Slack channel. Please verify your webhook url."] - } - return {"data": webhook.update(tenant_id=context["tenantId"], webhook_id=integrationId, - changes={"name": data.get("name", ""), "endpoint": data["url"]})} - - -@app.route('/{projectId}/errors/search', methods=['POST']) -def errors_search(projectId, context): - data = app.current_request.json_body - params = app.current_request.query_params - if params is None: - params = {} - - return errors.search(data, projectId, user_id=context["userId"], status=params.get("status", "ALL"), - favorite_only="favorite" in params) - - -@app.route('/{projectId}/errors/stats', methods=['GET']) -def errors_stats(projectId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return errors.stats(projectId, user_id=context["userId"], **params) - - -@app.route('/{projectId}/errors/{errorId}', methods=['GET']) -def errors_get_details(projectId, errorId, context): - params = app.current_request.query_params - if params is None: - params = {} - - data = errors.get_details(project_id=projectId, user_id=context["userId"], error_id=errorId, **params) - if data.get("data") is not None: - errors_favorite_viewed.viewed_error(project_id=projectId, user_id=context['userId'], error_id=errorId) - return data - - -@app.route('/{projectId}/errors/{errorId}/stats', methods=['GET']) -def errors_get_details_right_column(projectId, errorId, context): - params = app.current_request.query_params - if params is None: - params = {} - - data = errors.get_details_chart(project_id=projectId, user_id=context["userId"], error_id=errorId, **params) - return data - - -@app.route('/{projectId}/errors/{errorId}/sourcemaps', methods=['GET']) -def errors_get_details_sourcemaps(projectId, errorId, context): - data = errors.get_trace(project_id=projectId, error_id=errorId) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/async/alerts/notifications/{step}', methods=['POST', 'PUT'], authorizer=None) -def send_alerts_notification_async(step): - data = app.current_request.json_body - if data.pop("auth") != environ["async_Token"]: - return {"errors": ["missing auth"]} - if step == "slack": - slack.send_batch(notifications_list=data.get("notifications")) - elif step == "email": - alerts.send_by_email_batch(notifications_list=data.get("notifications")) - elif step == "webhook": - webhook.trigger_batch(data_list=data.get("notifications")) - - -@app.route('/notifications', methods=['GET']) -def get_notifications(context): - return {"data": notifications.get_all(tenant_id=context['tenantId'], user_id=context['userId'])} - - -@app.route('/notifications/{notificationId}/view', methods=['GET']) -def view_notifications(notificationId, context): - return {"data": notifications.view_notification(notification_ids=[notificationId], user_id=context['userId'])} - - -@app.route('/notifications/view', methods=['POST', 'PUT']) -def batch_view_notifications(context): - data = app.current_request.json_body - return {"data": notifications.view_notification(notification_ids=data.get("ids", []), - startTimestamp=data.get("startTimestamp"), - endTimestamp=data.get("endTimestamp"), - user_id=context['userId'], - tenant_id=context["tenantId"])} - - -@app.route('/notifications', methods=['POST', 'PUT'], authorizer=None) -def create_notifications(): - data = app.current_request.json_body - if data.get("token", "") != "nF46JdQqAM5v9KI9lPMpcu8o9xiJGvNNWOGL7TJP": - return {"errors": ["missing token"]} - return notifications.create(data.get("notifications", [])) - - -@app.route('/boarding', methods=['GET']) -def get_boarding_state(context): - return {"data": boarding.get_state(tenant_id=context["tenantId"])} - - -@app.route('/boarding/installing', methods=['GET']) -def get_boarding_state_installing(context): - return {"data": boarding.get_state_installing(tenant_id=context["tenantId"])} - - -@app.route('/boarding/identify-users', methods=['GET']) -def get_boarding_state_identify_users(context): - return {"data": boarding.get_state_identify_users(tenant_id=context["tenantId"])} - - -@app.route('/boarding/manage-users', methods=['GET']) -def get_boarding_state_manage_users(context): - return {"data": boarding.get_state_manage_users(tenant_id=context["tenantId"])} - - -@app.route('/boarding/integrations', methods=['GET']) -def get_boarding_state_integrations(context): - return {"data": boarding.get_state_integrations(tenant_id=context["tenantId"])} - - -# this endpoint supports both jira & github based on `provider` attribute -@app.route('/integrations/issues', methods=['POST', 'PUT']) -def add_edit_jira_cloud_github(context): - data = app.current_request.json_body - provider = data.get("provider", "").upper() - error, integration = integrations_manager.get_integration(tool=provider, tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.add_edit(data=data)} - - -@app.route('/integrations/slack/{integrationId}', methods=['GET']) -def get_slack_webhook(integrationId, context): - return {"data": webhook.get(tenant_id=context["tenantId"], webhook_id=integrationId)} - - -@app.route('/integrations/slack/channels', methods=['GET']) -def get_slack_integration(context): - return {"data": webhook.get_by_type(tenant_id=context["tenantId"], webhook_type='slack')} - - -@app.route('/integrations/slack/{integrationId}', methods=['DELETE']) -def delete_slack_integration(integrationId, context): - return webhook.delete(context["tenantId"], integrationId) - - -@app.route('/webhooks', methods=['POST', 'PUT']) -def add_edit_webhook(context): - data = app.current_request.json_body - return {"data": webhook.add_edit(tenant_id=context["tenantId"], data=data, replace_none=True)} - - -@app.route('/webhooks', methods=['GET']) -def get_webhooks(context): - return {"data": webhook.get_by_tenant(tenant_id=context["tenantId"], replace_none=True)} - - -@app.route('/webhooks/{webhookId}', methods=['DELETE']) -def delete_webhook(webhookId, context): - return {"data": webhook.delete(tenant_id=context["tenantId"], webhook_id=webhookId)} - - -@app.route('/client/members', methods=['GET']) -def get_members(context): - return {"data": users.get_members(tenant_id=context['tenantId'])} - - -@app.route('/client/members', methods=['PUT', 'POST']) -def add_member(context): - data = app.current_request.json_body - return users.create_member(tenant_id=context['tenantId'], user_id=context['userId'], data=data) - - -@app.route('/users/invitation', methods=['GET'], authorizer=None) -def process_invitation_link(): - params = app.current_request.query_params - if params is None or len(params.get("token", "")) < 64: - return {"errors": ["please provide a valid invitation"]} - user = users.get_by_invitation_token(params["token"]) - if user is None: - return {"errors": ["invitation not found"]} - if user["expiredInvitation"]: - return {"errors": ["expired invitation, please ask your admin to send a new one"]} - if user["expiredChange"] is not None and not user["expiredChange"] \ - and user["changePwdToken"] is not None and user["changePwdAge"] < -5 * 60: - pass_token = user["changePwdToken"] - else: - pass_token = users.allow_password_change(user_id=user["userId"]) - return Response( - status_code=307, - body='', - headers={'Location': environ["SITE_URL"] + environ["change_password_link"] % (params["token"], pass_token), - 'Content-Type': 'text/plain'}) - - -@app.route('/password/reset', methods=['POST', 'PUT'], authorizer=None) -def change_password_by_invitation(): - data = app.current_request.json_body - if data is None or len(data.get("invitation", "")) < 64 or len(data.get("pass", "")) < 8: - return {"errors": ["please provide a valid invitation & pass"]} - user = users.get_by_invitation_token(token=data["invitation"], pass_token=data["pass"]) - if user is None: - return {"errors": ["invitation not found"]} - if user["expiredChange"]: - return {"errors": ["expired change, please re-use the invitation link"]} - - return users.set_password_invitation(new_password=data["password"], user_id=user["userId"]) - - -@app.route('/client/members/{memberId}', methods=['PUT', 'POST']) -def edit_member(memberId, context): - data = app.current_request.json_body - return users.edit(tenant_id=context['tenantId'], editor_id=context['userId'], changes=data, - user_id_to_update=memberId) - - -@app.route('/client/members/{memberId}/reset', methods=['GET']) -def reset_reinvite_member(memberId, context): - return users.reset_member(tenant_id=context['tenantId'], editor_id=context['userId'], user_id_to_update=memberId) - - -@app.route('/client/members/{memberId}', methods=['DELETE']) -def delete_member(memberId, context): - return users.delete_member(tenant_id=context["tenantId"], user_id=context['userId'], id_to_delete=memberId) - - -@app.route('/account/new_api_key', methods=['GET']) -def generate_new_user_token(context): - return {"data": users.generate_new_api_key(user_id=context['userId'])} - - -@app.route('/account', methods=['POST', 'PUT']) -def edit_account(context): - data = app.current_request.json_body - return users.edit(tenant_id=context['tenantId'], user_id_to_update=context['userId'], changes=data, - editor_id=context['userId']) - - -@app.route('/account/password', methods=['PUT', 'POST']) -def change_client_password(context): - data = app.current_request.json_body - return users.change_password(email=context['email'], old_password=data["oldPassword"], - new_password=data["newPassword"], tenant_id=context["tenantId"], - user_id=context["userId"]) - - -@app.route('/metadata/session_search', methods=['GET']) -def search_sessions_by_metadata(context): - params = app.current_request.query_params - if params is None: - return {"errors": ["please provide a key&value for search"]} - value = params.get('value', '') - key = params.get('key', '') - project_id = params.get('projectId') - if len(value) == 0 and len(key) == 0: - return {"errors": ["please provide a key&value for search"]} - if len(value) == 0: - return {"errors": ["please provide a value for search"]} - if len(key) == 0: - return {"errors": ["please provide a key for search"]} - return { - "data": sessions.search_by_metadata(tenant_id=context["tenantId"], user_id=context["userId"], m_value=value, - m_key=key, - project_id=project_id)} - - -@app.route('/plans', methods=['GET']) -def get_current_plan(context): - return { - "data": license.get_status(context["tenantId"]) - } - - -@app.route('/alerts/notifications', methods=['POST', 'PUT'], authorizer=None) -def send_alerts_notifications(): - data = app.current_request.json_body - return {"data": alerts.process_notifications(data.get("notifications", []))} diff --git a/api/chalicelib/blueprints/bp_core_dynamic_crons.py b/api/chalicelib/blueprints/bp_core_dynamic_crons.py deleted file mode 100644 index 74d8766e5..000000000 --- a/api/chalicelib/blueprints/bp_core_dynamic_crons.py +++ /dev/null @@ -1,13 +0,0 @@ -from chalice import Blueprint, Cron -from chalicelib import _overrides - -app = Blueprint(__name__) -_overrides.chalice_app(app) - -from chalicelib.core import telemetry - - -# Run every day. -@app.schedule(Cron('0', '0', '?', '*', '*', '*')) -def telemetry_cron(event): - telemetry.compute() diff --git a/api/chalicelib/blueprints/subs/bp_dashboard.py b/api/chalicelib/blueprints/subs/bp_dashboard.py deleted file mode 100644 index 00b3c0ed4..000000000 --- a/api/chalicelib/blueprints/subs/bp_dashboard.py +++ /dev/null @@ -1,550 +0,0 @@ -from chalice import Blueprint -from chalicelib.utils import helper -from chalicelib import _overrides - -from chalicelib.core import dashboard -from chalicelib.core import metadata - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/{projectId}/dashboard/metadata', methods=['GET']) -def get_metadata_map(projectId, context): - metamap = [] - for m in metadata.get(project_id=projectId): - metamap.append({"name": m["key"], "key": f"metadata{m['index']}"}) - return {"data": metamap} - - -@app.route('/{projectId}/dashboard/sessions', methods=['GET', 'POST']) -def get_dashboard_processed_sessions(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_processed_sessions(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/errors', methods=['GET', 'POST']) -def get_dashboard_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/errors_trend', methods=['GET', 'POST']) -def get_dashboard_errors_trend(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_trend(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/application_activity', methods=['GET', 'POST']) -def get_dashboard_application_activity(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_application_activity(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/page_metrics', methods=['GET', 'POST']) -def get_dashboard_page_metrics(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_page_metrics(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/user_activity', methods=['GET', 'POST']) -def get_dashboard_user_activity(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_user_activity(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/performance', methods=['GET', 'POST']) -def get_dashboard_performance(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_performance(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/slowest_images', methods=['GET', 'POST']) -def get_dashboard_slowest_images(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_images(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/missing_resources', methods=['GET', 'POST']) -def get_performance_sessions(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_missing_resources_trend(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/network', methods=['GET', 'POST']) -def get_network_widget(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_network(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/{widget}/search', methods=['GET']) -def get_dashboard_autocomplete(projectId, widget, context): - params = app.current_request.query_params - if params is None or params.get('q') is None or len(params.get('q')) == 0: - return {"data": []} - params['q'] = '^' + params['q'] - - if widget in ['performance']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=True) - elif widget in ['pages', 'pages_dom_buildtime', 'top_metrics', 'time_to_render', - 'impacted_sessions_by_slow_pages', 'pages_response_time']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), pages_only=True) - elif widget in ['resources_loading_time']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=False) - elif widget in ['time_between_events', 'events']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=False, events_only=True) - elif widget in ['metadata']: - data = dashboard.search(params.get('q', ''), None, project_id=projectId, - platform=params.get('platform', None), metadata=True, key=params.get("key")) - else: - return {"errors": [f"unsupported widget: {widget}"]} - return {'data': data} - - -# 1 -@app.route('/{projectId}/dashboard/slowest_resources', methods=['GET', 'POST']) -def get_dashboard_slowest_resources(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_resources(project_id=projectId, **{**data, **args})} - - -# 2 -@app.route('/{projectId}/dashboard/resources_loading_time', methods=['GET', 'POST']) -def get_dashboard_resources(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_loading_time(project_id=projectId, **{**data, **args})} - - -# 3 -@app.route('/{projectId}/dashboard/pages_dom_buildtime', methods=['GET', 'POST']) -def get_dashboard_pages_dom(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_dom_build_time(project_id=projectId, **{**data, **args})} - - -# 4 -@app.route('/{projectId}/dashboard/busiest_time_of_day', methods=['GET', 'POST']) -def get_dashboard_busiest_time_of_day(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_busiest_time_of_day(project_id=projectId, **{**data, **args})} - - -# 5 -@app.route('/{projectId}/dashboard/sessions_location', methods=['GET', 'POST']) -def get_dashboard_sessions_location(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_sessions_location(project_id=projectId, **{**data, **args})} - - -# 6 -@app.route('/{projectId}/dashboard/speed_location', methods=['GET', 'POST']) -def get_dashboard_speed_location(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_speed_index_location(project_id=projectId, **{**data, **args})} - - -# 7 -@app.route('/{projectId}/dashboard/pages_response_time', methods=['GET', 'POST']) -def get_dashboard_pages_response_time(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_response_time(project_id=projectId, **{**data, **args})} - - -# 8 -@app.route('/{projectId}/dashboard/pages_response_time_distribution', methods=['GET', 'POST']) -def get_dashboard_pages_response_time_distribution(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_response_time_distribution(project_id=projectId, **{**data, **args})} - - -# 9 -@app.route('/{projectId}/dashboard/top_metrics', methods=['GET', 'POST']) -def get_dashboard_top_metrics(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_top_metrics(project_id=projectId, **{**data, **args})} - - -# 10 -@app.route('/{projectId}/dashboard/time_to_render', methods=['GET', 'POST']) -def get_dashboard_time_to_render(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_time_to_render(project_id=projectId, **{**data, **args})} - - -# 11 -@app.route('/{projectId}/dashboard/impacted_sessions_by_slow_pages', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_slow_pages(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_impacted_sessions_by_slow_pages(project_id=projectId, **{**data, **args})} - - -# 12 -@app.route('/{projectId}/dashboard/memory_consumption', methods=['GET', 'POST']) -def get_dashboard_memory_consumption(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_memory_consumption(project_id=projectId, **{**data, **args})} - - -# 12.1 -@app.route('/{projectId}/dashboard/fps', methods=['GET', 'POST']) -def get_dashboard_avg_fps(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_avg_fps(project_id=projectId, **{**data, **args})} - - -# 12.2 -@app.route('/{projectId}/dashboard/cpu', methods=['GET', 'POST']) -def get_dashboard_avg_cpu(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_avg_cpu(project_id=projectId, **{**data, **args})} - - -# 13 -@app.route('/{projectId}/dashboard/crashes', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_slow_pages(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_crashes(project_id=projectId, **{**data, **args})} - - -# 14 -@app.route('/{projectId}/dashboard/domains_errors', methods=['GET', 'POST']) -def get_dashboard_domains_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors(project_id=projectId, **{**data, **args})} - - -# 14.1 -@app.route('/{projectId}/dashboard/domains_errors_4xx', methods=['GET', 'POST']) -def get_dashboard_domains_errors_4xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors_4xx(project_id=projectId, **{**data, **args})} - - -# 14.2 -@app.route('/{projectId}/dashboard/domains_errors_5xx', methods=['GET', 'POST']) -def get_dashboard_domains_errors_5xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors_5xx(project_id=projectId, **{**data, **args})} - - -# 15 -@app.route('/{projectId}/dashboard/slowest_domains', methods=['GET', 'POST']) -def get_dashboard_slowest_domains(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_domains(project_id=projectId, **{**data, **args})} - - -# 16 -@app.route('/{projectId}/dashboard/errors_per_domains', methods=['GET', 'POST']) -def get_dashboard_errors_per_domains(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_per_domains(project_id=projectId, **{**data, **args})} - - -# 17 -@app.route('/{projectId}/dashboard/sessions_per_browser', methods=['GET', 'POST']) -def get_dashboard_sessions_per_browser(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_sessions_per_browser(project_id=projectId, **{**data, **args})} - - -# 18 -@app.route('/{projectId}/dashboard/calls_errors', methods=['GET', 'POST']) -def get_dashboard_calls_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors(project_id=projectId, **{**data, **args})} - - -# 18.1 -@app.route('/{projectId}/dashboard/calls_errors_4xx', methods=['GET', 'POST']) -def get_dashboard_calls_errors_4xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors_4xx(project_id=projectId, **{**data, **args})} - - -# 18.2 -@app.route('/{projectId}/dashboard/calls_errors_5xx', methods=['GET', 'POST']) -def get_dashboard_calls_errors_5xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors_5xx(project_id=projectId, **{**data, **args})} - - -# 19 -@app.route('/{projectId}/dashboard/errors_per_type', methods=['GET', 'POST']) -def get_dashboard_errors_per_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_per_type(project_id=projectId, **{**data, **args})} - - -# 20 -@app.route('/{projectId}/dashboard/resources_by_party', methods=['GET', 'POST']) -def get_dashboard_resources_by_party(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_by_party(project_id=projectId, **{**data, **args})} - - -# 21 -@app.route('/{projectId}/dashboard/resource_type_vs_response_end', methods=['GET', 'POST']) -def get_dashboard_errors_per_resource_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.resource_type_vs_response_end(project_id=projectId, **{**data, **args})} - - -# 22 -@app.route('/{projectId}/dashboard/resources_vs_visually_complete', methods=['GET', 'POST']) -def get_dashboard_resources_vs_visually_complete(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_vs_visually_complete(project_id=projectId, **{**data, **args})} - - -# 23 -@app.route('/{projectId}/dashboard/impacted_sessions_by_js_errors', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_js_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_impacted_sessions_by_js_errors(project_id=projectId, **{**data, **args})} - - -# 24 -@app.route('/{projectId}/dashboard/resources_count_by_type', methods=['GET', 'POST']) -def get_dashboard_resources_count_by_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_count_by_type(project_id=projectId, **{**data, **args})} - - -# 25 -@app.route('/{projectId}/dashboard/time_between_events', methods=['GET']) -def get_dashboard_resources_count_by_type(projectId, context): - return {"errors": ["please choose 2 events"]} - - -@app.route('/{projectId}/dashboard/overview', methods=['GET', 'POST']) -def get_dashboard_group(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": [ - *helper.explode_widget(key="count_sessions", - data=dashboard.get_processed_sessions(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data={**dashboard.get_application_activity(project_id=projectId, **{**data, **args}), - "chart": dashboard.get_performance(project_id=projectId, **{**data, **args}) - .get("chart", [])}), - *helper.explode_widget(data=dashboard.get_page_metrics(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_user_activity(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_pages_dom_build_time(project_id=projectId, **{**data, **args}), - key="avg_pages_dom_buildtime"), - *helper.explode_widget(data=dashboard.get_pages_response_time(project_id=projectId, **{**data, **args}), - key="avg_pages_response_time"), - *helper.explode_widget(dashboard.get_top_metrics(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_time_to_render(project_id=projectId, **{**data, **args}), - key="avg_time_to_render"), - *helper.explode_widget(dashboard.get_memory_consumption(project_id=projectId, **{**data, **args})), - *helper.explode_widget(dashboard.get_avg_cpu(project_id=projectId, **{**data, **args})), - *helper.explode_widget(dashboard.get_avg_fps(project_id=projectId, **{**data, **args})), - ]} diff --git a/api/chalicelib/core/alerts.py b/api/chalicelib/core/alerts.py index 6ed9aff3e..c701f0ce0 100644 --- a/api/chalicelib/core/alerts.py +++ b/api/chalicelib/core/alerts.py @@ -1,13 +1,11 @@ +import json import time -from chalicelib.utils.helper import environ -from chalicelib.core import notifications +import schemas +from chalicelib.core import notifications, slack, webhook from chalicelib.utils import pg_client, helper, email_helper from chalicelib.utils.TimeUTC import TimeUTC -import json - -ALLOW_UPDATE = ["name", "description", "active", "detectionMethod", "query", "options"] - +import logging def get(id): with pg_client.PostgresClient() as cur: @@ -37,34 +35,6 @@ def get_all(project_id): return all -SUPPORTED_THRESHOLD = [15, 30, 60, 120, 240, 1440] - - -def __transform_structure(data): - if data.get("options") is None: - return f"Missing 'options'", None - if data["options"].get("currentPeriod") not in SUPPORTED_THRESHOLD: - return f"Unsupported currentPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None - if data["options"].get("previousPeriod", 15) not in SUPPORTED_THRESHOLD: - return f"Unsupported previousPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None - if data["options"].get("renotifyInterval") is None: - data["options"]["renotifyInterval"] = 720 - data["query"]["right"] = float(data["query"]["right"]) - data["query"] = json.dumps(data["query"]) - data["description"] = data["description"] if data.get("description") is not None and len( - data["description"]) > 0 else None - if data.get("options"): - messages = [] - for m in data["options"].get("message", []): - if m.get("value") is None: - continue - m["value"] = str(m["value"]) - messages.append(m) - data["options"]["message"] = messages - data["options"] = json.dumps(data["options"]) - return None, data - - def __process_circular(alert): if alert is None: return None @@ -73,15 +43,16 @@ def __process_circular(alert): return alert -def create(project_id, data): - err, data = __transform_structure(data) - if err is not None: - return {"errors": [err]} +def create(project_id, data: schemas.AlertSchema): + data = data.dict() + data["query"] = json.dumps(data["query"]) + data["options"] = json.dumps(data["options"]) + with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""\ - INSERT INTO public.alerts(project_id, name, description, detection_method, query, options) - VALUES (%(project_id)s, %(name)s, %(description)s, %(detectionMethod)s, %(query)s, %(options)s::jsonb) + INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id) + VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s) RETURNING *;""", {"project_id": project_id, **data}) ) @@ -89,23 +60,24 @@ def create(project_id, data): return {"data": helper.dict_to_camel_case(__process_circular(a))} -def update(id, changes): - changes = {k: changes[k] for k in changes.keys() if k in ALLOW_UPDATE} - err, changes = __transform_structure(changes) - if err is not None: - return {"errors": [err]} - updateq = [] - for k in changes.keys(): - updateq.append(f"{helper.key_to_snake_case(k)} = %({k})s") - if len(updateq) == 0: - return {"errors": ["nothing to update"]} +def update(id, data: schemas.AlertSchema): + data = data.dict() + data["query"] = json.dumps(data["query"]) + data["options"] = json.dumps(data["options"]) + with pg_client.PostgresClient() as cur: - query = cur.mogrify(f"""\ + query = cur.mogrify("""\ UPDATE public.alerts - SET {", ".join(updateq)} + SET name = %(name)s, + description = %(description)s, + active = TRUE, + detection_method = %(detection_method)s, + query = %(query)s, + options = %(options)s, + series_id = %(series_id)s WHERE alert_id =%(id)s AND deleted_at ISNULL RETURNING *;""", - {"id": id, **changes}) + {"id": id, **data}) cur.execute(query=query) a = helper.dict_to_camel_case(cur.fetchone()) return {"data": __process_circular(a)} @@ -132,7 +104,26 @@ def process_notifications(data): BATCH_SIZE = 200 for t in full.keys(): for i in range(0, len(full[t]), BATCH_SIZE): - helper.async_post(environ['alert_ntf'] % t, {"notifications": full[t][i:i + BATCH_SIZE]}) + notifications_list = full[t][i:i + BATCH_SIZE] + + if t == "slack": + try: + slack.send_batch(notifications_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending slack notifications batch") + logging.error(str(e)) + elif t == "email": + try: + send_by_email_batch(notifications_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending email notifications batch") + logging.error(str(e)) + elif t == "webhook": + try: + webhook.trigger_batch(data_list=notifications_list) + except Exception as e: + logging.error("!!!Error while sending webhook notifications batch") + logging.error(str(e)) def send_by_email(notification, destination): diff --git a/api/chalicelib/core/alerts_listener.py b/api/chalicelib/core/alerts_listener.py new file mode 100644 index 000000000..419f0326d --- /dev/null +++ b/api/chalicelib/core/alerts_listener.py @@ -0,0 +1,27 @@ +from chalicelib.utils import pg_client, helper + + +def get_all_alerts(): + with pg_client.PostgresClient(long_query=True) as cur: + query = """SELECT -1 AS tenant_id, + alert_id, + project_id, + detection_method, + query, + options, + (EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at, + alerts.name, + alerts.series_id, + filter + FROM public.alerts + LEFT JOIN metric_series USING (series_id) + INNER JOIN projects USING (project_id) + WHERE alerts.deleted_at ISNULL + AND alerts.active + AND projects.active + AND projects.deleted_at ISNULL + AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL) + ORDER BY alerts.created_at;""" + cur.execute(query=query) + all_alerts = helper.list_to_camel_case(cur.fetchall()) + return all_alerts diff --git a/api/chalicelib/core/alerts_processor.py b/api/chalicelib/core/alerts_processor.py new file mode 100644 index 000000000..80973fadd --- /dev/null +++ b/api/chalicelib/core/alerts_processor.py @@ -0,0 +1,250 @@ +import decimal +import logging + +import schemas +from chalicelib.core import alerts_listener +from chalicelib.core import sessions, alerts +from chalicelib.utils import pg_client +from chalicelib.utils.TimeUTC import TimeUTC + +LeftToDb = { + schemas.AlertColumn.performance__dom_content_loaded__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"}, + schemas.AlertColumn.performance__first_meaningful_paint__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"}, + schemas.AlertColumn.performance__page_load_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"}, + schemas.AlertColumn.performance__dom_build_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(dom_building_time,0))"}, + schemas.AlertColumn.performance__speed_index__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"}, + schemas.AlertColumn.performance__page_response_time__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(response_time,0))"}, + schemas.AlertColumn.performance__ttfb__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(first_paint_time,0))"}, + schemas.AlertColumn.performance__time_to_render__average: { + "table": "events.pages INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(visually_complete,0))"}, + schemas.AlertColumn.performance__image_load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"}, + schemas.AlertColumn.performance__request_load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"}, + schemas.AlertColumn.resources__load_time__average: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "AVG(NULLIF(resources.duration,0))"}, + schemas.AlertColumn.resources__missing__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE"}, + schemas.AlertColumn.errors__4xx_5xx__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)", + "condition": "status/100!=2"}, + schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=4"}, + schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(session_id)", "condition": "status/100=5"}, + schemas.AlertColumn.errors__javascript__impacted_sessions__count: { + "table": "events.resources INNER JOIN public.sessions USING(session_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"}, + schemas.AlertColumn.performance__crashes__count: { + "table": "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions", + "formula": "COUNT(DISTINCT session_id)", "condition": "errors_count > 0"}, + schemas.AlertColumn.errors__javascript__count: { + "table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False}, + schemas.AlertColumn.errors__backend__count: { + "table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)", + "formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False}, +} + +# This is the frequency of execution for each threshold +TimeInterval = { + 15: 3, + 30: 5, + 60: 10, + 120: 20, + 240: 30, + 1440: 60, +} + + +def can_check(a) -> bool: + now = TimeUTC.now() + + repetitionBase = a["options"]["currentPeriod"] \ + if a["detectionMethod"] == schemas.AlertDetectionMethod.change \ + and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \ + else a["options"]["previousPeriod"] + + if TimeInterval.get(repetitionBase) is None: + logging.error(f"repetitionBase: {repetitionBase} NOT FOUND") + return False + + return (a["options"]["renotifyInterval"] <= 0 or + a["options"].get("lastNotification") is None or + a["options"]["lastNotification"] <= 0 or + ((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \ + and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000 + + +def Build(a): + params = {"project_id": a["projectId"]} + full_args = {} + j_s = True + if a["seriesId"] is not None: + a["filter"]["sort"] = "session_id" + a["filter"]["order"] = "DESC" + a["filter"]["startDate"] = -1 + a["filter"]["endDate"] = TimeUTC.now() + full_args, query_part, sort = sessions.search_query_parts( + data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]), + error_status=None, errors_only=False, + favorite_only=False, issue=None, project_id=a["projectId"], + user_id=None) + subQ = f"""SELECT COUNT(session_id) AS value + {query_part}""" + else: + colDef = LeftToDb[a["query"]["left"]] + subQ = f"""SELECT {colDef["formula"]} AS value + FROM {colDef["table"]} + WHERE project_id = %(project_id)s + {"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}""" + j_s = colDef.get("joinSessions", True) + + q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid""" + + # if len(colDef.group) > 0 { + # subQ = subQ.Column(colDef.group + " AS group_value") + # subQ = subQ.GroupBy(colDef.group) + # q = q.Column("group_value") + # } + + if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold: + if a["seriesId"] is not None: + q += f""" FROM ({subQ}) AS stat""" + else: + q += f""" FROM ({subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}) AS stat""" + params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000} + else: + if a["options"]["change"] == schemas.AlertDetectionChangeType.change: + # if len(colDef.group) > 0: + # subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60)) + # sub2, args2, _ := subQ.Where( + # sq.And{ + # sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60), + # sq.Expr("timestamp>=$4 ", time.Now().Unix()-2 * a.Options.CurrentPeriod * 60), + # }).ToSql() + # sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) + # q = q.FromSelect(sub1, "stat") + # else: + if a["seriesId"] is not None: + sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s") + sub1 = f"SELECT (({subQ})-({sub2})) AS value" + q += f" FROM ( {sub1} ) AS stat" + params = {**params, **full_args, + "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, + "timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000} + else: + sub1 = f"""{subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}""" + params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 + sub2 = f"""{subQ} AND timestamp<%(startDate)s + AND timestamp>=%(timestamp_sub2)s + {"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}""" + params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000 + sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value" + q += f" FROM ( {sub1} ) AS stat" + + else: + # if len(colDef.group) >0 { + # subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60)) + # sub2, args2, _ := subQ.Where( + # sq.And{ + # sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60), + # sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod * 60-a.Options.CurrentPeriod * 60), + # }).ToSql() + # sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...) + # q = q.FromSelect(sub1, "stat") + # } else { + if a["seriesId"] is not None: + sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s") + sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value" + q += f" FROM ({sub1}) AS stat" + params = {**params, **full_args, + "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000, + "timestamp_sub2": TimeUTC.now() \ + - (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \ + * 60 * 1000} + else: + sub1 = f"""{subQ} AND timestamp>=%(startDate)s + {"AND sessions.start_ts >= %(startDate)s" if j_s else ""}""" + params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000 + sub2 = f"""{subQ} AND timestamp<%(startDate)s + AND timestamp>=%(timestamp_sub2)s + {"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}""" + params["timestamp_sub2"] = TimeUTC.now() \ + - (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000 + sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value" + q += f" FROM ({sub1}) AS stat" + + return q, params + + +def process(): + notifications = [] + all_alerts = alerts_listener.get_all_alerts() + with pg_client.PostgresClient() as cur: + for alert in all_alerts: + if can_check(alert): + logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}") + query, params = Build(alert) + query = cur.mogrify(query, params) + logging.debug(alert) + logging.debug(query) + try: + cur.execute(query) + result = cur.fetchone() + if result["valid"]: + logging.info("Valid alert, notifying users") + notifications.append({ + "alertId": alert["alertId"], + "tenantId": alert["tenantId"], + "title": alert["name"], + "description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).", + "buttonText": "Check metrics for more details", + "buttonUrl": f"/{alert['projectId']}/metrics", + "imageUrl": None, + "options": {"source": "ALERT", "sourceId": alert["alertId"], + "sourceMeta": alert["detectionMethod"], + "message": alert["options"]["message"], "projectId": alert["projectId"], + "data": {"title": alert["name"], + "limitValue": alert["query"]["right"], + "actualValue": float(result["value"]) \ + if isinstance(result["value"], decimal.Decimal) \ + else result["value"], + "operator": alert["query"]["operator"], + "trigger": alert["query"]["left"], + "alertId": alert["alertId"], + "detectionMethod": alert["detectionMethod"], + "currentPeriod": alert["options"]["currentPeriod"], + "previousPeriod": alert["options"]["previousPeriod"], + "createdAt": TimeUTC.now()}}, + }) + except Exception as e: + logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}") + logging.error(str(e)) + logging.error(query) + if len(notifications) > 0: + cur.execute( + cur.mogrify(f"""UPDATE public.Alerts + SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb + WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])})) + if len(notifications) > 0: + alerts.process_notifications(notifications) diff --git a/api/chalicelib/core/announcements.py b/api/chalicelib/core/announcements.py index b4d4f2a22..2ef244751 100644 --- a/api/chalicelib/core/announcements.py +++ b/api/chalicelib/core/announcements.py @@ -1,6 +1,6 @@ from chalicelib.utils import pg_client from chalicelib.utils import helper -from chalicelib.utils.helper import environ +from decouple import config from chalicelib.utils.TimeUTC import TimeUTC @@ -22,7 +22,7 @@ def get_all(user_id): for a in announcements: a["createdAt"] = TimeUTC.datetime_to_timestamp(a["createdAt"]) if a["imageUrl"] is not None and len(a["imageUrl"]) > 0: - a["imageUrl"] = environ["announcement_url"] + a["imageUrl"] + a["imageUrl"] = config("announcement_url") + a["imageUrl"] return announcements diff --git a/api/chalicelib/core/assist.py b/api/chalicelib/core/assist.py index 12e24cac9..cd76d0be4 100644 --- a/api/chalicelib/core/assist.py +++ b/api/chalicelib/core/assist.py @@ -1,9 +1,11 @@ +import schemas +from chalicelib.utils import pg_client, helper +from chalicelib.core import projects, sessions, sessions_metas import requests +from decouple import config + from chalicelib.core import projects, sessions, sessions_metas from chalicelib.utils import pg_client, helper -from chalicelib.core import projects, sessions, sessions_metas -from chalicelib.utils import pg_client, helper -from chalicelib.utils.helper import environ SESSION_PROJECTION_COLS = """s.project_id, s.session_id::text AS session_id, @@ -23,7 +25,7 @@ SESSION_PROJECTION_COLS = """s.project_id, def get_live_sessions(project_id, filters=None): project_key = projects.get_project_key(project_id) - connected_peers = requests.get(environ["peers"] % environ["S3_KEY"] + f"/{project_key}") + connected_peers = requests.get(config("peers") % config("S3_KEY") + f"/{project_key}") if connected_peers.status_code != 200: print("!! issue with the peer-server") print(connected_peers.text) @@ -43,7 +45,7 @@ def get_live_sessions(project_id, filters=None): continue filter_type = f["type"].upper() f["value"] = sessions.__get_sql_value_multiple(f["value"]) - if filter_type == sessions_metas.meta_type.USERID: + if filter_type == schemas.FilterType.user_id: op = sessions.__get_sql_operator(f["operator"]) extra_constraints.append(f"user_id {op} %(value_{i})s") extra_params[f"value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) @@ -67,7 +69,7 @@ def get_live_sessions(project_id, filters=None): def is_live(project_id, session_id, project_key=None): if project_key is None: project_key = projects.get_project_key(project_id) - connected_peers = requests.get(environ["peers"] % environ["S3_KEY"] + f"/{project_key}") + connected_peers = requests.get(config("peers") % config("S3_KEY") + f"/{project_key}") if connected_peers.status_code != 200: print("!! issue with the peer-server") print(connected_peers.text) @@ -77,5 +79,5 @@ def is_live(project_id, session_id, project_key=None): def get_ice_servers(): - return environ.get("iceServers") if environ.get("iceServers") is not None \ - and len(environ["iceServers"]) > 0 else None + return config("iceServers") if config("iceServers", default=None) is not None \ + and len(config("iceServers")) > 0 else None diff --git a/api/chalicelib/core/authorizers.py b/api/chalicelib/core/authorizers.py index 1122b0d65..33a859cc8 100644 --- a/api/chalicelib/core/authorizers.py +++ b/api/chalicelib/core/authorizers.py @@ -1,8 +1,7 @@ -from chalicelib.utils.helper import environ import jwt from chalicelib.utils import helper from chalicelib.utils.TimeUTC import TimeUTC - +from decouple import config from chalicelib.core import tenants from chalicelib.core import users @@ -14,8 +13,8 @@ def jwt_authorizer(token): try: payload = jwt.decode( token[1], - environ["jwt_secret"], - algorithms=environ["jwt_algorithm"], + config("jwt_secret"), + algorithms=config("jwt_algorithm"), audience=[f"plugin:{helper.get_stage_name()}", f"front:{helper.get_stage_name()}"] ) except jwt.ExpiredSignatureError: @@ -43,15 +42,15 @@ def generate_jwt(id, tenant_id, iat, aud): payload={ "userId": id, "tenantId": tenant_id, - "exp": iat // 1000 + int(environ["jwt_exp_delta_seconds"]) + TimeUTC.get_utc_offset() // 1000, - "iss": environ["jwt_issuer"], + "exp": iat // 1000 + config("jwt_exp_delta_seconds",cast=int) + TimeUTC.get_utc_offset() // 1000, + "iss": config("jwt_issuer"), "iat": iat // 1000, "aud": aud }, - key=environ["jwt_secret"], - algorithm=environ["jwt_algorithm"] + key=config("jwt_secret"), + algorithm=config("jwt_algorithm") ) - return token.decode("utf-8") + return token def api_key_authorizer(token): diff --git a/api/chalicelib/core/collaboration_slack.py b/api/chalicelib/core/collaboration_slack.py index b3da03a37..bd0ae7f21 100644 --- a/api/chalicelib/core/collaboration_slack.py +++ b/api/chalicelib/core/collaboration_slack.py @@ -1,5 +1,5 @@ import requests -from chalicelib.utils.helper import environ +from decouple import config from datetime import datetime from chalicelib.core import webhook @@ -95,8 +95,8 @@ class Slack: def share_session(cls, tenant_id, project_id, session_id, user, comment, integration_id=None): args = {"fallback": f"{user} has shared the below session!", "pretext": f"{user} has shared the below session!", - "title": f"{environ['SITE_URL']}/{project_id}/session/{session_id}", - "title_link": f"{environ['SITE_URL']}/{project_id}/session/{session_id}", + "title": f"{config('SITE_URL')}/{project_id}/session/{session_id}", + "title_link": f"{config('SITE_URL')}/{project_id}/session/{session_id}", "text": comment} return {"data": cls.__share_to_slack(tenant_id, integration_id, **args)} @@ -104,8 +104,8 @@ class Slack: def share_error(cls, tenant_id, project_id, error_id, user, comment, integration_id=None): args = {"fallback": f"{user} has shared the below error!", "pretext": f"{user} has shared the below error!", - "title": f"{environ['SITE_URL']}/{project_id}/errors/{error_id}", - "title_link": f"{environ['SITE_URL']}/{project_id}/errors/{error_id}", + "title": f"{config('SITE_URL')}/{project_id}/errors/{error_id}", + "title_link": f"{config('SITE_URL')}/{project_id}/errors/{error_id}", "text": comment} return {"data": cls.__share_to_slack(tenant_id, integration_id, **args)} diff --git a/api/chalicelib/core/custom_metrics.py b/api/chalicelib/core/custom_metrics.py new file mode 100644 index 000000000..ffd911fea --- /dev/null +++ b/api/chalicelib/core/custom_metrics.py @@ -0,0 +1,225 @@ +import json + +import schemas +from chalicelib.core import sessions +from chalicelib.utils import helper, pg_client +from chalicelib.utils.TimeUTC import TimeUTC + + +def try_live(project_id, data: schemas.TryCustomMetricsSchema): + results = [] + for s in data.series: + s.filter.startDate = data.startDate + s.filter.endDate = data.endDate + results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density, + view_type=data.viewType)) + if data.viewType == schemas.MetricViewType.progress: + r = {"count": results[-1]} + diff = s.filter.endDate - s.filter.startDate + s.filter.startDate = data.endDate + s.filter.endDate = data.endDate - diff + r["previousCount"] = sessions.search2_series(data=s.filter, project_id=project_id, density=data.density, + view_type=data.viewType) + r["countProgress"] = helper.__progress(old_val=r["previousCount"], new_val=r["count"]) + results[-1] = r + return results + + +def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema): + metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id) + metric: schemas.TryCustomMetricsSchema = schemas.TryCustomMetricsSchema.parse_obj({**data.dict(), **metric}) + return try_live(project_id=project_id, data=metric) + + +def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema): + with pg_client.PostgresClient() as cur: + _data = {} + for i, s in enumerate(data.series): + for k in s.dict().keys(): + _data[f"{k}_{i}"] = s.__getattribute__(k) + _data[f"index_{i}"] = i + _data[f"filter_{i}"] = s.filter.json() + series_len = len(data.series) + data.series = None + params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data} + query = cur.mogrify(f"""\ + WITH m AS (INSERT INTO metrics (project_id, user_id, name) + VALUES (%(project_id)s, %(user_id)s, %(name)s) + RETURNING *) + INSERT + INTO metric_series(metric_id, index, name, filter) + VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)" + for i in range(series_len)])} + RETURNING metric_id;""", params) + + cur.execute( + query + ) + r = cur.fetchone() + r = helper.dict_to_camel_case(r) + return {"data": r} + + +def __get_series_id(metric_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT series_id + FROM metric_series + WHERE metric_series.metric_id = %(metric_id)s + AND metric_series.deleted_at ISNULL;""", + {"metric_id": metric_id} + ) + ) + rows = cur.fetchall() + return [r["series_id"] for r in rows] + + +def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSchema): + series_ids = __get_series_id(metric_id) + n_series = [] + d_series_ids = [] + u_series = [] + u_series_ids = [] + params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name, + "user_id": user_id, "project_id": project_id} + for i, s in enumerate(data.series): + prefix = "u_" + if s.series_id is None: + n_series.append({"i": i, "s": s}) + prefix = "n_" + else: + u_series.append({"i": i, "s": s}) + u_series_ids.append(s.series_id) + ns = s.dict() + for k in ns.keys(): + if k == "filter": + ns[k] = json.dumps(ns[k]) + params[f"{prefix}{k}_{i}"] = ns[k] + for i in series_ids: + if i not in u_series_ids: + d_series_ids.append(i) + params["d_series_ids"] = tuple(d_series_ids) + + with pg_client.PostgresClient() as cur: + sub_queries = [] + if len(n_series) > 0: + sub_queries.append(f"""\ + n AS (INSERT INTO metric_series (metric_id, index, name, filter) + VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)" + for s in n_series])} + RETURNING 1)""") + if len(u_series) > 0: + sub_queries.append(f"""\ + u AS (UPDATE metric_series + SET name=series.name, + filter=series.filter, + index=series.filter.index + FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)" + for s in n_series])}) AS series(series_id, index, name, filter) + WHERE metric_id =%(metric_id)s AND series_id=series.series_id + RETURNING 1)""") + if len(d_series_ids) > 0: + sub_queries.append("""\ + d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s + RETURNING 1)""") + query = cur.mogrify(f"""\ + {"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)} + UPDATE metrics + SET name = %(name)s, is_public= %(is_public)s + WHERE metric_id = %(metric_id)s + AND project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING metric_id;""", params) + cur.execute( + query + ) + r = cur.fetchone() + r = helper.dict_to_camel_case(r) + return r + + +def get_all(project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT * + FROM metrics + LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series + FROM metric_series + WHERE metric_series.metric_id = metrics.metric_id + AND metric_series.deleted_at ISNULL + ) AS metric_series ON (TRUE) + WHERE metrics.project_id = %(project_id)s + AND metrics.deleted_at ISNULL + AND (user_id = %(user_id)s OR is_public) + ORDER BY created_at;""", + {"project_id": project_id, "user_id": user_id} + ) + ) + rows = cur.fetchall() + for r in rows: + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + rows = helper.list_to_camel_case(rows) + return rows + + +def delete(project_id, metric_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify("""\ + UPDATE public.metrics + SET deleted_at = timezone('utc'::text, now()) + WHERE project_id = %(project_id)s + AND metric_id = %(metric_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"metric_id": metric_id, "project_id": project_id, "user_id": user_id}) + ) + + return {"state": "success"} + + +def get(metric_id, project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT * + FROM metrics + LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series + FROM metric_series + WHERE metric_series.metric_id = metrics.metric_id + AND metric_series.deleted_at ISNULL + ) AS metric_series ON (TRUE) + WHERE metrics.project_id = %(project_id)s + AND metrics.deleted_at ISNULL + AND (metrics.user_id = %(user_id)s OR metrics.is_public) + AND metrics.metric_id = %(metric_id)s + ORDER BY created_at;""", + {"metric_id": metric_id, "project_id": project_id, "user_id": user_id} + ) + ) + row = cur.fetchone() + row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + return helper.dict_to_camel_case(row) + + +def get_series_for_alert(project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT metric_id, + series_id, + metrics.name AS metric_name, + metric_series.name AS series_name, + index AS series_index + FROM metric_series + INNER JOIN metrics USING (metric_id) + WHERE metrics.deleted_at ISNULL + AND metrics.project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + ORDER BY metric_name, series_index, series_name;""", + {"project_id": project_id, "user_id": user_id} + ) + ) + rows = cur.fetchall() + return helper.list_to_camel_case(rows) diff --git a/api/chalicelib/core/dashboard.py b/api/chalicelib/core/dashboard.py index 919d6aa5a..9cd88eb6a 100644 --- a/api/chalicelib/core/dashboard.py +++ b/api/chalicelib/core/dashboard.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.core import metadata from chalicelib.utils import args_transformer from chalicelib.utils import helper, dev @@ -94,25 +95,25 @@ def __get_meta_constraint(project_id, data): else: filter_type = f["key"].upper() filter_type = [filter_type, "USER" + filter_type, filter_type[4:]] - if any(item in [sessions_metas.meta_type.USERBROWSER] \ + if any(item in [schemas.FilterType.user_browser] \ for item in filter_type): constraints.append(f"sessions.user_browser = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS] \ + elif any(item in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios] \ for item in filter_type): constraints.append(f"sessions.user_os = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS] \ + elif any(item in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios] \ for item in filter_type): constraints.append(f"sessions.user_device = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS] \ + elif any(item in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios] \ for item in filter_type): constraints.append(f"sessions.user_country = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS] \ + elif any(item in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios] \ for item in filter_type): constraints.append(f"sessions.user_id = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.USERANONYMOUSID, sessions_metas.meta_type.USERANONYMOUSID_IOS] \ + elif any(item in [schemas.FilterType.user_anonymous_id, schemas.FilterType.user_anonymous_id_ios] \ for item in filter_type): constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s") - elif any(item in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS] \ + elif any(item in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios] \ for item in filter_type): constraints.append(f"sessions.rev_id = %({f['key']}_{i})s") return constraints diff --git a/api/chalicelib/core/errors.py b/api/chalicelib/core/errors.py index 642501246..2bd4a8d80 100644 --- a/api/chalicelib/core/errors.py +++ b/api/chalicelib/core/errors.py @@ -777,4 +777,4 @@ def stats(project_id, user_id, startTimestamp=TimeUTC.now(delta_days=-7), endTim return { "data": helper.dict_to_camel_case(row) - } + } \ No newline at end of file diff --git a/api/chalicelib/core/events.py b/api/chalicelib/core/events.py index 69213a079..0a330d625 100644 --- a/api/chalicelib/core/events.py +++ b/api/chalicelib/core/events.py @@ -1,6 +1,7 @@ -from chalicelib.utils import pg_client, helper -from chalicelib.core import sessions_metas, metadata +import schemas from chalicelib.core import issues +from chalicelib.core import sessions_metas, metadata +from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.event_filter_definition import SupportedFilter, Event @@ -235,23 +236,23 @@ def __generic_autocomplete(event: Event): class event_type: - CLICK = Event(ui_type="CLICK", table="events.clicks", column="label") - INPUT = Event(ui_type="INPUT", table="events.inputs", column="label") - LOCATION = Event(ui_type="LOCATION", table="events.pages", column="base_path") - CUSTOM = Event(ui_type="CUSTOM", table="events_common.customs", column="name") - REQUEST = Event(ui_type="REQUEST", table="events_common.requests", column="url") - GRAPHQL = Event(ui_type="GRAPHQL", table="events.graphql", column="name") - STATEACTION = Event(ui_type="STATEACTION", table="events.state_actions", column="name") - ERROR = Event(ui_type="ERROR", table="events.errors", + CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label") + INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label") + LOCATION = Event(ui_type=schemas.EventType.location, table="events.pages", column="base_path") + CUSTOM = Event(ui_type=schemas.EventType.custom, table="events_common.customs", column="name") + REQUEST = Event(ui_type=schemas.EventType.request, table="events_common.requests", column="url") + GRAPHQL = Event(ui_type=schemas.EventType.graphql, table="events.graphql", column="name") + STATEACTION = Event(ui_type=schemas.EventType.state_action, table="events.state_actions", column="name") + ERROR = Event(ui_type=schemas.EventType.error, table="events.errors", column=None) # column=None because errors are searched by name or message - METADATA = Event(ui_type="METADATA", table="public.sessions", column=None) + METADATA = Event(ui_type=schemas.EventType.metadata, table="public.sessions", column=None) # IOS - CLICK_IOS = Event(ui_type="CLICK_IOS", table="events_ios.clicks", column="label") - INPUT_IOS = Event(ui_type="INPUT_IOS", table="events_ios.inputs", column="label") - VIEW_IOS = Event(ui_type="VIEW_IOS", table="events_ios.views", column="name") - CUSTOM_IOS = Event(ui_type="CUSTOM_IOS", table="events_common.customs", column="name") - REQUEST_IOS = Event(ui_type="REQUEST_IOS", table="events_common.requests", column="url") - ERROR_IOS = Event(ui_type="ERROR_IOS", table="events_ios.crashes", + CLICK_IOS = Event(ui_type=schemas.EventType.click_ios, table="events_ios.clicks", column="label") + INPUT_IOS = Event(ui_type=schemas.EventType.input_ios, table="events_ios.inputs", column="label") + VIEW_IOS = Event(ui_type=schemas.EventType.view_ios, table="events_ios.views", column="name") + CUSTOM_IOS = Event(ui_type=schemas.EventType.custom_ios, table="events_common.customs", column="name") + REQUEST_IOS = Event(ui_type=schemas.EventType.request_ios, table="events_common.requests", column="url") + ERROR_IOS = Event(ui_type=schemas.EventType.error_ios, table="events_ios.crashes", column=None) # column=None because errors are searched by name or message @@ -389,18 +390,18 @@ def search_pg2(text, event_type, project_id, source, key): if not event_type: return {"data": __get_autocomplete_table(text, project_id)} - if event_type.upper() in SUPPORTED_TYPES.keys(): - rows = SUPPORTED_TYPES[event_type.upper()].get(project_id=project_id, value=text, key=key, source=source) - if event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys(): - rows += SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key, - source=source) - elif event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys(): - rows = SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key, - source=source) - elif event_type.upper() in sessions_metas.SUPPORTED_TYPES.keys(): + if event_type in SUPPORTED_TYPES.keys(): + rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source) + if event_type + "_IOS" in SUPPORTED_TYPES.keys(): + rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, + source=source) + elif event_type + "_IOS" in SUPPORTED_TYPES.keys(): + rows = SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key, + source=source) + elif event_type in sessions_metas.SUPPORTED_TYPES.keys(): return sessions_metas.search(text, event_type, project_id) - elif event_type.upper().endswith("_IOS") \ - and event_type.upper()[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys(): + elif event_type.endswith("_IOS") \ + and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys(): return sessions_metas.search(text, event_type, project_id) else: return {"errors": ["unsupported event"]} diff --git a/api/chalicelib/core/funnels.py b/api/chalicelib/core/funnels.py index 24326902a..d73204c77 100644 --- a/api/chalicelib/core/funnels.py +++ b/api/chalicelib/core/funnels.py @@ -1,10 +1,10 @@ +import json + import chalicelib.utils.helper from chalicelib.core import events, significance, sessions -from chalicelib.utils.TimeUTC import TimeUTC - -from chalicelib.utils import helper, pg_client from chalicelib.utils import dev -import json +from chalicelib.utils import helper, pg_client +from chalicelib.utils.TimeUTC import TimeUTC REMOVE_KEYS = ["key", "_key", "startDate", "endDate"] @@ -40,7 +40,7 @@ def create(project_id, user_id, name, filter, is_public): return {"data": r} -def update(funnel_id, user_id, name=None, filter=None, is_public=None): +def update(funnel_id, user_id, project_id, name=None, filter=None, is_public=None): s_query = [] if filter is not None: helper.delete_keys_from_dict(filter, REMOVE_KEYS) @@ -56,9 +56,10 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None): UPDATE public.funnels SET {" , ".join(s_query)} WHERE funnel_id=%(funnel_id)s - RETURNING *;""", - {"user_id": user_id, "funnel_id": funnel_id, "name": name, - "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public}) + AND project_id = %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING *;""", {"user_id": user_id, "funnel_id": funnel_id, "name": name, + "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public}) # print("--------------------") # print(query) # print("--------------------") @@ -74,13 +75,12 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None): def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False): with pg_client.PostgresClient() as cur: - team_query = "" cur.execute( cur.mogrify( f"""\ - SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public + SELECT funnel_id, project_id, user_id, name, created_at, deleted_at, is_public {",filter" if details else ""} - FROM public.funnels {team_query} + FROM public.funnels WHERE project_id = %(project_id)s AND funnels.deleted_at IS NULL AND (funnels.user_id = %(user_id)s OR funnels.is_public);""", @@ -135,7 +135,8 @@ def delete(project_id, funnel_id, user_id): UPDATE public.funnels SET deleted_at = timezone('utc'::text, now()) WHERE project_id = %(project_id)s - AND funnel_id = %(funnel_id)s;""", + AND funnel_id = %(funnel_id)s + AND (user_id = %(user_id)s OR is_public);""", {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id}) ) @@ -220,7 +221,7 @@ def get_issues_on_the_fly(funnel_id, project_id, data): last_stage=last_stage))} -def get(funnel_id, project_id): +def get(funnel_id, project_id, user_id): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify( @@ -230,8 +231,9 @@ def get(funnel_id, project_id): FROM public.funnels WHERE project_id = %(project_id)s AND deleted_at IS NULL - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id} + AND funnel_id = %(funnel_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id} ) ) @@ -247,7 +249,7 @@ def get(funnel_id, project_id): @dev.timed def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None): if len(data.get("events", [])) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) + f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id) if f is None: return {"errors": ["funnel not found"]} get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date), diff --git a/api/chalicelib/core/insights.py b/api/chalicelib/core/insights.py new file mode 100644 index 000000000..08adfd3ca --- /dev/null +++ b/api/chalicelib/core/insights.py @@ -0,0 +1,933 @@ +import schemas +from chalicelib.core import sessions_metas +from chalicelib.utils import helper, dev +from chalicelib.utils import pg_client +from chalicelib.utils.TimeUTC import TimeUTC +from chalicelib.utils.metrics_helper import __get_step_size +import math +from chalicelib.core.dashboard import __get_constraints, __get_constraint_values + + +def __transform_journey(rows): + nodes = [] + links = [] + for r in rows: + source = r["source_event"][r["source_event"].index("_") + 1:] + target = r["target_event"][r["target_event"].index("_") + 1:] + if source not in nodes: + nodes.append(source) + if target not in nodes: + nodes.append(target) + links.append({"source": nodes.index(source), "target": nodes.index(target), "value": r["value"]}) + return {"nodes": nodes, "links": sorted(links, key=lambda x: x["value"], reverse=True)} + + +JOURNEY_DEPTH = 5 +JOURNEY_TYPES = { + "PAGES": {"table": "events.pages", "column": "base_path", "table_id": "message_id"}, + "CLICK": {"table": "events.clicks", "column": "label", "table_id": "message_id"}, + # "VIEW": {"table": "events_ios.views", "column": "name", "table_id": "seq_index"}, TODO: enable this for SAAS only + "EVENT": {"table": "events_common.customs", "column": "name", "table_id": "seq_index"} +} + + +@dev.timed +def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(), filters=[], **args): + pg_sub_query_subset = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + event_start = None + event_table = JOURNEY_TYPES["PAGES"]["table"] + event_column = JOURNEY_TYPES["PAGES"]["column"] + event_table_id = JOURNEY_TYPES["PAGES"]["table_id"] + extra_values = {} + for f in filters: + if f["type"] == "START_POINT": + event_start = f["value"] + elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query_subset.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT source_event, + target_event, + count(*) AS value + + FROM (SELECT event_number || '_' || value as target_event, + LAG(event_number || '_' || value, 1) OVER ( PARTITION BY session_rank ) AS source_event + FROM (SELECT value, + session_rank, + message_id, + ROW_NUMBER() OVER ( PARTITION BY session_rank ORDER BY timestamp ) AS event_number + + {f"FROM (SELECT * FROM (SELECT *, MIN(mark) OVER ( PARTITION BY session_id , session_rank ORDER BY timestamp ) AS max FROM (SELECT *, CASE WHEN value = %(event_start)s THEN timestamp ELSE NULL END as mark" + if event_start else ""} + + FROM (SELECT session_id, + message_id, + timestamp, + value, + SUM(new_session) OVER (ORDER BY session_id, timestamp) AS session_rank + FROM (SELECT *, + CASE + WHEN source_timestamp IS NULL THEN 1 + ELSE 0 END AS new_session + FROM (SELECT session_id, + {event_table_id} AS message_id, + timestamp, + {event_column} AS value, + LAG(timestamp) + OVER (PARTITION BY session_id ORDER BY timestamp) AS source_timestamp + FROM {event_table} INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query_subset)} + ) AS related_events) AS ranked_events) AS processed + {") AS marked) AS maxed WHERE timestamp >= max) AS filtered" if event_start else ""} + ) AS sorted_events + WHERE event_number <= %(JOURNEY_DEPTH)s) AS final + WHERE source_event IS NOT NULL + and target_event IS NOT NULL + GROUP BY source_event, target_event + ORDER BY value DESC + LIMIT 20;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, "event_start": event_start, "JOURNEY_DEPTH": JOURNEY_DEPTH, + **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + + return __transform_journey(rows) + + +def __compute_weekly_percentage(rows): + if rows is None or len(rows) == 0: + return rows + t = -1 + for r in rows: + if r["week"] == 0: + t = r["usersCount"] + r["percentage"] = r["usersCount"] / t + return rows + + +def __complete_retention(rows, start_date, end_date=None): + if rows is None: + return [] + max_week = 10 + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + if i < len(rows) \ + and i != rows[i]["week"]: + rows.insert(i, neutral) + elif i >= len(rows): + rows.append(neutral) + return rows + + +def __complete_acquisition(rows, start_date, end_date=None): + if rows is None: + return [] + max_week = 10 + week = 0 + delta_date = 0 + while max_week > 0: + start_date += TimeUTC.MS_WEEK + if end_date is not None and start_date >= end_date: + break + delta = 0 + if delta_date + week >= len(rows) \ + or delta_date + week < len(rows) and rows[delta_date + week]["firstConnexionWeek"] > start_date: + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + rows.insert(delta_date + week + i, neutral) + delta = i + else: + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + if delta_date + week + i < len(rows) \ + and i != rows[delta_date + week + i]["week"]: + rows.insert(delta_date + week + i, neutral) + elif delta_date + week + i >= len(rows): + rows.append(neutral) + delta = i + week += delta + max_week -= 1 + delta_date += 1 + return rows + + +@dev.timed +def users_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + pg_sub_query.append("DATE_TRUNC('week', to_timestamp(start_ts / 1000)) = to_timestamp(%(startTimestamp)s / 1000)") + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT FLOOR(DATE_PART('day', connexion_week - DATE_TRUNC('week', to_timestamp(%(startTimestamp)s / 1000)::timestamp)) / 7)::integer AS week, + COUNT(DISTINCT connexions_list.user_id) AS users_count, + ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users + FROM (SELECT DISTINCT user_id + FROM sessions + WHERE {" AND ".join(pg_sub_query)} + AND DATE_PART('week', to_timestamp((sessions.start_ts - %(startTimestamp)s)/1000)) = 1 + AND NOT EXISTS((SELECT 1 + FROM sessions AS bsess + WHERE bsess.start_ts < %(startTimestamp)s + AND project_id = %(project_id)s + AND bsess.user_id = sessions.user_id + LIMIT 1)) + ) AS users_list + LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week, + user_id + FROM sessions + WHERE users_list.user_id = sessions.user_id + AND %(startTimestamp)s <=sessions.start_ts + AND sessions.project_id = %(project_id)s + AND sessions.start_ts < (%(endTimestamp)s - 1) + GROUP BY connexion_week, user_id + ) AS connexions_list ON (TRUE) + GROUP BY week + ORDER BY week;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args)} + print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def users_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT EXTRACT(EPOCH FROM first_connexion_week::date)::bigint*1000 AS first_connexion_week, + FLOOR(DATE_PART('day', connexion_week - first_connexion_week) / 7)::integer AS week, + COUNT(DISTINCT connexions_list.user_id) AS users_count, + ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users + FROM (SELECT user_id, MIN(DATE_TRUNC('week', to_timestamp(start_ts / 1000))) AS first_connexion_week + FROM sessions + WHERE {" AND ".join(pg_sub_query)} + AND NOT EXISTS((SELECT 1 + FROM sessions AS bsess + WHERE bsess.start_ts<%(startTimestamp)s + AND project_id = %(project_id)s + AND bsess.user_id = sessions.user_id + LIMIT 1)) + GROUP BY user_id) AS users_list + LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week, + user_id + FROM sessions + WHERE users_list.user_id = sessions.user_id + AND first_connexion_week <= + DATE_TRUNC('week', to_timestamp(sessions.start_ts / 1000)::timestamp) + AND sessions.project_id = %(project_id)s + AND sessions.start_ts < (%(endTimestamp)s - 1) + GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE) + GROUP BY first_connexion_week, week + ORDER BY first_connexion_week, week;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args)} + print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + pg_sub_query.append(f"feature.{event_column} = %(value)s") + + with pg_client.PostgresClient() as cur: + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query[:-1])} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_query = f"""SELECT FLOOR(DATE_PART('day', connexion_week - to_timestamp(%(startTimestamp)s/1000)) / 7)::integer AS week, + COUNT(DISTINCT connexions_list.user_id) AS users_count, + ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users + FROM (SELECT DISTINCT user_id + FROM sessions INNER JOIN {event_table} AS feature USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + AND DATE_PART('week', to_timestamp((sessions.start_ts - %(startTimestamp)s)/1000)) = 1 + AND NOT EXISTS((SELECT 1 + FROM sessions AS bsess INNER JOIN {event_table} AS bfeature USING (session_id) + WHERE bsess.start_ts<%(startTimestamp)s + AND project_id = %(project_id)s + AND bsess.user_id = sessions.user_id + AND bfeature.timestamp<%(startTimestamp)s + AND bfeature.{event_column}=%(value)s + LIMIT 1)) + GROUP BY user_id) AS users_list + LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week, + user_id + FROM sessions INNER JOIN {event_table} AS feature USING (session_id) + WHERE users_list.user_id = sessions.user_id + AND %(startTimestamp)s <= sessions.start_ts + AND sessions.project_id = %(project_id)s + AND sessions.start_ts < (%(endTimestamp)s - 1) + AND feature.timestamp >= %(startTimestamp)s + AND feature.timestamp < %(endTimestamp)s + AND feature.{event_column} = %(value)s + GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE) + GROUP BY week + ORDER BY week;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + + pg_sub_query.append(f"feature.{event_column} = %(value)s") + + with pg_client.PostgresClient() as cur: + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query[:-1])} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_query = f"""SELECT EXTRACT(EPOCH FROM first_connexion_week::date)::bigint*1000 AS first_connexion_week, + FLOOR(DATE_PART('day', connexion_week - first_connexion_week) / 7)::integer AS week, + COUNT(DISTINCT connexions_list.user_id) AS users_count, + ARRAY_AGG(DISTINCT connexions_list.user_id) AS connected_users + FROM (SELECT user_id, DATE_TRUNC('week', to_timestamp(first_connexion_week / 1000)) AS first_connexion_week + FROM(SELECT DISTINCT user_id, MIN(start_ts) AS first_connexion_week + FROM sessions INNER JOIN {event_table} AS feature USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + AND NOT EXISTS((SELECT 1 + FROM sessions AS bsess INNER JOIN {event_table} AS bfeature USING (session_id) + WHERE bsess.start_ts<%(startTimestamp)s + AND project_id = %(project_id)s + AND bsess.user_id = sessions.user_id + AND bfeature.timestamp<%(startTimestamp)s + AND bfeature.{event_column}=%(value)s + LIMIT 1)) + GROUP BY user_id) AS raw_users_list) AS users_list + LEFT JOIN LATERAL (SELECT DATE_TRUNC('week', to_timestamp(start_ts / 1000)::timestamp) AS connexion_week, + user_id + FROM sessions INNER JOIN {event_table} AS feature USING(session_id) + WHERE users_list.user_id = sessions.user_id + AND first_connexion_week <= + DATE_TRUNC('week', to_timestamp(sessions.start_ts / 1000)::timestamp) + AND sessions.project_id = %(project_id)s + AND sessions.start_ts < (%(endTimestamp)s - 1) + AND feature.timestamp >= %(startTimestamp)s + AND feature.timestamp < %(endTimestamp)s + AND feature.{event_column} = %(value)s + GROUP BY connexion_week, user_id) AS connexions_list ON (TRUE) + GROUP BY first_connexion_week, week + ORDER BY first_connexion_week, week;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + event_table = JOURNEY_TYPES["CLICK"]["table"] + event_column = JOURNEY_TYPES["CLICK"]["column"] + extra_values = {} + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM sessions + WHERE {" AND ".join(pg_sub_query)} + AND user_id IS NOT NULL;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + all_user_count = cur.fetchone()["count"] + if all_user_count == 0: + return [] + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + pg_sub_query.append(f"length({event_column})>2") + pg_query = f"""SELECT {event_column} AS value, COUNT(DISTINCT user_id) AS count + FROM {event_table} AS feature INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + AND user_id IS NOT NULL + GROUP BY value + ORDER BY count DESC + LIMIT 7;""" + # TODO: solve full scan + print(cur.mogrify(pg_query, params)) + print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + popularity = cur.fetchall() + pg_query = f"""SELECT {event_column} AS value, COUNT(session_id) AS count + FROM {event_table} AS feature INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + GROUP BY value;""" + # TODO: solve full scan + print(cur.mogrify(pg_query, params)) + print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + frequencies = cur.fetchall() + total_usage = sum([f["count"] for f in frequencies]) + frequencies = {f["value"]: f["count"] for f in frequencies} + for p in popularity: + p["popularity"] = p.pop("count") / all_user_count + p["frequency"] = frequencies[p["value"]] / total_usage + + return popularity + + +@dev.timed +def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM sessions + WHERE {" AND ".join(pg_sub_query)} + AND user_id IS NOT NULL;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + all_user_count = cur.fetchone()["count"] + if all_user_count == 0: + return {"adoption": 0, "target": 0, "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": event_value}], } + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query[:-1])} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_sub_query.append(f"feature.{event_column} = %(value)s") + pg_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM {event_table} AS feature INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + AND user_id IS NOT NULL;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + adoption = cur.fetchone()["count"] / all_user_count + return {"target": all_user_count, "adoption": adoption, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + with pg_client.PostgresClient() as cur: + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query[:-1])} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_sub_query.append(f"feature.{event_column} = %(value)s") + pg_query = f"""SELECT user_id, COUNT(DISTINCT session_id) AS count + FROM {event_table} AS feature + INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + GROUP BY 1 + ORDER BY 2 DESC + LIMIT 10;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + return {"users": helper.list_to_camel_case(rows), + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True, + chart=True, data=args) + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + with pg_client.PostgresClient() as cur: + pg_sub_query_chart.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query_chart.append("feature.timestamp < %(endTimestamp)s") + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + AND length({event_column})>2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_sub_query_chart.append(f"feature.{event_column} = %(value)s") + pg_query = f"""SELECT generated_timestamp AS timestamp, + COALESCE(COUNT(session_id), 0) AS count + FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp + LEFT JOIN LATERAL ( SELECT DISTINCT session_id + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query_chart)} + ) AS users ON (TRUE) + GROUP BY generated_timestamp + ORDER BY generated_timestamp;""" + params = {"step_size": TimeUTC.MS_DAY, "project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(cur.mogrify(pg_query, params)) + print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + return {"chart": helper.list_to_camel_case(rows), + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + event_table = JOURNEY_TYPES["CLICK"]["table"] + event_column = JOURNEY_TYPES["CLICK"]["column"] + extra_values = {} + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + pg_sub_query.append(f"length({event_column})>2") + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT {event_column} AS value, AVG(DISTINCT session_id) AS avg + FROM {event_table} AS feature INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + GROUP BY value + ORDER BY avg DESC + LIMIT 7;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # TODO: solve full scan issue + print(cur.mogrify(pg_query, params)) + print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + + return rows + + +@dev.timed +def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], + **args): + pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True, + chart=True, data=args) + + pg_sub_query_chart.append("user_id IS NOT NULL") + period = "DAY" + extra_values = {} + for f in filters: + if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]: + period = f["value"] + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT AVG(count) AS avg, JSONB_AGG(chart) AS chart + FROM (SELECT generated_timestamp AS timestamp, + COALESCE(COUNT(users), 0) AS count + FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp + LEFT JOIN LATERAL ( SELECT DISTINCT user_id + FROM public.sessions + WHERE {" AND ".join(pg_sub_query_chart)} + ) AS users ON (TRUE) + GROUP BY generated_timestamp + ORDER BY generated_timestamp) AS chart;""" + params = {"step_size": TimeUTC.MS_DAY if period == "DAY" else TimeUTC.MS_WEEK, + "project_id": project_id, + "startTimestamp": TimeUTC.trunc_day(startTimestamp) if period == "DAY" else TimeUTC.trunc_week( + startTimestamp), + "endTimestamp": endTimestamp, **__get_constraint_values(args), + **extra_values} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + row_users = cur.fetchone() + + return row_users + + +@dev.timed +def users_power(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + pg_sub_query = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args) + pg_sub_query.append("user_id IS NOT NULL") + + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT AVG(count) AS avg, JSONB_AGG(day_users_partition) AS partition + FROM (SELECT number_of_days, COUNT(user_id) AS count + FROM (SELECT user_id, COUNT(DISTINCT DATE_TRUNC('day', to_timestamp(start_ts / 1000))) AS number_of_days + FROM sessions + WHERE {" AND ".join(pg_sub_query)} + GROUP BY 1) AS users_connexions + GROUP BY number_of_days + ORDER BY number_of_days) AS day_users_partition;""" + params = {"project_id": project_id, + "startTimestamp": startTimestamp, "endTimestamp": endTimestamp, **__get_constraint_values(args)} + # print(cur.mogrify(pg_query, params)) + # print("---------------------") + cur.execute(cur.mogrify(pg_query, params)) + row_users = cur.fetchone() + + return helper.dict_to_camel_case(row_users) + + +@dev.timed +def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + pg_sub_query = __get_constraints(project_id=project_id, data=args, duration=True, main_table="sessions", + time_constraint=True) + pg_sub_query.append("user_id IS NOT NULL") + pg_sub_query.append("feature.timestamp >= %(startTimestamp)s") + pg_sub_query.append("feature.timestamp < %(endTimestamp)s") + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + pg_sub_query.append(f"sessions.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + pg_sub_query.append(f"feature.{event_column} = %(value)s") + + with pg_client.PostgresClient() as cur: + if default: + # get most used value + pg_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature INNER JOIN public.sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query[:-1])} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + cur.execute(cur.mogrify(pg_query, params)) + row = cur.fetchone() + if row is not None: + event_value = row["value"] + extra_values["value"] = event_value + if len(event_value) > 2: + pg_sub_query.append(f"length({event_column})>2") + pg_query = f"""SELECT user_id, last_time, interactions_count, MIN(start_ts) AS first_seen, MAX(start_ts) AS last_seen + FROM (SELECT user_id, MAX(timestamp) AS last_time, COUNT(DISTINCT session_id) AS interactions_count + FROM {event_table} AS feature INNER JOIN sessions USING (session_id) + WHERE {" AND ".join(pg_sub_query)} + GROUP BY user_id) AS user_last_usage + INNER JOIN sessions USING (user_id) + WHERE EXTRACT(EPOCH FROM now()) * 1000 - last_time > 7 * 24 * 60 * 60 * 1000 + GROUP BY user_id, last_time,interactions_count;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "list": helper.list_to_camel_case(rows) + } + + +@dev.timed +def search(text, feature_type, project_id, platform=None): + if not feature_type: + resource_type = "ALL" + data = search(text=text, feature_type=resource_type, project_id=project_id, platform=platform) + return data + + pg_sub_query = __get_constraints(project_id=project_id, time_constraint=True, duration=True, + data={} if platform is None else {"platform": platform}) + + params = {"startTimestamp": TimeUTC.now() - 2 * TimeUTC.MS_MONTH, + "endTimestamp": TimeUTC.now(), + "project_id": project_id, + "value": helper.string_to_sql_like(text.lower()), + "platform_0": platform} + if feature_type == "ALL": + with pg_client.PostgresClient() as cur: + sub_queries = [] + for e in JOURNEY_TYPES: + sub_queries.append(f"""(SELECT DISTINCT {JOURNEY_TYPES[e]["column"]} AS value, '{e}' AS "type" + FROM {JOURNEY_TYPES[e]["table"]} INNER JOIN public.sessions USING(session_id) + WHERE {" AND ".join(pg_sub_query)} AND {JOURNEY_TYPES[e]["column"]} ILIKE %(value)s + LIMIT 10)""") + pg_query = "UNION ALL".join(sub_queries) + # print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + elif JOURNEY_TYPES.get(feature_type) is not None: + with pg_client.PostgresClient() as cur: + pg_query = f"""SELECT DISTINCT {JOURNEY_TYPES[feature_type]["column"]} AS value, '{feature_type}' AS "type" + FROM {JOURNEY_TYPES[feature_type]["table"]} INNER JOIN public.sessions USING(session_id) + WHERE {" AND ".join(pg_sub_query)} AND {JOURNEY_TYPES[feature_type]["column"]} ILIKE %(value)s + LIMIT 10;""" + # print(cur.mogrify(pg_query, params)) + cur.execute(cur.mogrify(pg_query, params)) + rows = cur.fetchall() + else: + return [] + return [helper.dict_to_camel_case(row) for row in rows] \ No newline at end of file diff --git a/api/chalicelib/core/integration_github_issue.py b/api/chalicelib/core/integration_github_issue.py index a9e5e7317..0c2b78720 100644 --- a/api/chalicelib/core/integration_github_issue.py +++ b/api/chalicelib/core/integration_github_issue.py @@ -39,7 +39,7 @@ class GithubIntegrationIssue(BaseIntegrationIssue): for a in assignees: for u in metas["users"]: if a == str(u["id"]): - real_assignees.append(u["login"]) + real_assignees.append(u["name"]) break real_labels = ["OpenReplay"] for l in labels: diff --git a/api/chalicelib/core/jobs.py b/api/chalicelib/core/jobs.py index 7ad4ae4a6..4b7ba85ee 100644 --- a/api/chalicelib/core/jobs.py +++ b/api/chalicelib/core/jobs.py @@ -132,7 +132,7 @@ def get_scheduled_jobs(): def execute_jobs(): jobs = get_scheduled_jobs() if len(jobs) == 0: - print('No jobs to execute.') + # No jobs to execute return for job in jobs: diff --git a/api/chalicelib/core/metadata.py b/api/chalicelib/core/metadata.py index f3f00e4e1..301503162 100644 --- a/api/chalicelib/core/metadata.py +++ b/api/chalicelib/core/metadata.py @@ -1,7 +1,7 @@ -from chalicelib.utils import pg_client, helper, dev +import re from chalicelib.core import projects -import re +from chalicelib.utils import pg_client, dev MAX_INDEXES = 10 @@ -30,6 +30,30 @@ def get(project_id): return results +def get_batch(project_ids): + if project_ids is None or len(project_ids) == 0: + return [] + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + f"""\ + SELECT + project_id, {",".join(_get_column_names())} + FROM public.projects + WHERE project_id IN %(project_ids)s + AND deleted_at ISNULL;""", {"project_ids": tuple(project_ids)}) + ) + full_metas = cur.fetchall() + results = {} + if full_metas is not None and len(full_metas) > 0: + for metas in full_metas: + results[str(metas["project_id"])] = [] + for i, k in enumerate(metas.keys()): + if metas[k] is not None and k != "project_id": + results[str(metas["project_id"])].append({"key": metas[k], "index": i + 1}) + return results + + regex = re.compile(r'^[a-z0-9_-]+$', re.IGNORECASE) @@ -90,7 +114,9 @@ def delete(tenant_id, project_id, index: int): cur.execute(query=query) query = cur.mogrify(f"""UPDATE public.sessions SET {colname}= NULL - WHERE project_id = %(project_id)s""", + WHERE project_id = %(project_id)s + AND {colname} IS NOT NULL + """, {"project_id": project_id}) cur.execute(query=query) @@ -251,12 +277,13 @@ def add_edit_delete(tenant_id, project_id, new_metas): def get_remaining_metadata_with_count(tenant_id): all_projects = projects.get_projects(tenant_id=tenant_id) results = [] + used_metas = get_batch([p["projectId"] for p in all_projects]) for p in all_projects: - used_metas = get(p["projectId"]) if MAX_INDEXES < 0: remaining = -1 else: - remaining = MAX_INDEXES - len(used_metas) - results.append({**p, "limit": MAX_INDEXES, "remaining": remaining, "count": len(used_metas)}) + remaining = MAX_INDEXES - len(used_metas[str(p["projectId"])]) + results.append( + {**p, "limit": MAX_INDEXES, "remaining": remaining, "count": len(used_metas[str(p["projectId"])])}) return results diff --git a/api/chalicelib/core/mobile.py b/api/chalicelib/core/mobile.py index 12a2d268d..3e60a0826 100644 --- a/api/chalicelib/core/mobile.py +++ b/api/chalicelib/core/mobile.py @@ -1,13 +1,13 @@ from chalicelib.core import projects from chalicelib.utils import s3 -from chalicelib.utils.helper import environ +from decouple import config def sign_keys(project_id, session_id, keys): result = [] project_key = projects.get_project_key(project_id) for k in keys: - result.append(s3.get_presigned_url_for_sharing(bucket=environ["iosBucket"], + result.append(s3.get_presigned_url_for_sharing(bucket=config("iosBucket"), key=f"{project_key}/{session_id}/{k}", expires_in=60 * 60)) return result diff --git a/api/chalicelib/core/performance_event.py b/api/chalicelib/core/performance_event.py new file mode 100644 index 000000000..76633ce40 --- /dev/null +++ b/api/chalicelib/core/performance_event.py @@ -0,0 +1,15 @@ +import schemas + + +def get_col(perf: schemas.PerformanceEventType): + return { + schemas.PerformanceEventType.location_dom_complete: {"column": "dom_building_time", "extraJoin": None}, + schemas.PerformanceEventType.location_ttfb: {"column": "ttfb", "extraJoin": None}, + schemas.PerformanceEventType.location_avg_cpu_load: {"column": "avg_cpu", "extraJoin": "events.performance"}, + schemas.PerformanceEventType.location_avg_memory_usage: {"column": "avg_used_js_heap_size", + "extraJoin": "events.performance"}, + schemas.PerformanceEventType.fetch_failed: {"column": "success", "extraJoin": None}, + # schemas.PerformanceEventType.fetch_duration: {"column": "duration", "extraJoin": None}, + schemas.PerformanceEventType.location_largest_contentful_paint_time: {"column": "first_contentful_paint_time", + "extraJoin": None} + }.get(perf) diff --git a/api/chalicelib/core/projects.py b/api/chalicelib/core/projects.py index a9e1cdf92..2536ee956 100644 --- a/api/chalicelib/core/projects.py +++ b/api/chalicelib/core/projects.py @@ -1,5 +1,6 @@ import json +import schemas from chalicelib.core import users from chalicelib.utils import pg_client, helper, dev from chalicelib.utils.TimeUTC import TimeUTC @@ -41,18 +42,29 @@ def __create(tenant_id, name): @dev.timed -def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False,version=False): +def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False, version=False, + last_tracker_version=None): with pg_client.PostgresClient() as cur: + tracker_query = "" + if last_tracker_version is not None and len(last_tracker_version) > 0: + tracker_query = cur.mogrify( + """,(SELECT tracker_version FROM public.sessions + WHERE sessions.project_id = s.project_id + AND tracker_version=%(version)s AND tracker_version IS NOT NULL LIMIT 1) AS tracker_version""", + {"version": last_tracker_version}).decode('UTF-8') + elif version: + tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version" + cur.execute(f"""\ SELECT s.project_id, s.name, s.project_key {',s.gdpr' if gdpr else ''} {',COALESCE((SELECT TRUE FROM public.sessions WHERE sessions.project_id = s.project_id LIMIT 1), FALSE) AS recorded' if recorded else ''} {',stack_integrations.count>0 AS stack_integrations' if stack_integrations else ''} - {',(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version' if version else ''} + {tracker_query} FROM public.projects AS s {'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''} - where s.deleted_at IS NULL + WHERE s.deleted_at IS NULL ORDER BY s.project_id;""" ) rows = cur.fetchall() @@ -75,8 +87,19 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st return helper.list_to_camel_case(rows) -def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=None): +def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=None, version=False, + last_tracker_version=None): with pg_client.PostgresClient() as cur: + tracker_query = "" + if last_tracker_version is not None and len(last_tracker_version) > 0: + tracker_query = cur.mogrify( + """,(SELECT tracker_version FROM public.sessions + WHERE sessions.project_id = s.project_id + AND tracker_version=%(version)s AND tracker_version IS NOT NULL LIMIT 1) AS tracker_version""", + {"version": last_tracker_version}).decode('UTF-8') + elif version: + tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version" + query = cur.mogrify(f"""\ SELECT s.project_id, @@ -84,6 +107,7 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr= s.name {",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_id = %(project_id)s) AS last_recorded_session_at" if include_last_session else ""} {',s.gdpr' if include_gdpr else ''} + {tracker_query} FROM public.projects AS s where s.project_id =%(project_id)s AND s.deleted_at IS NULL @@ -96,6 +120,7 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr= row = cur.fetchone() return helper.dict_to_camel_case(row) + def get_project_by_key(tenant_id, project_key, include_last_session=False, include_gdpr=None): with pg_client.PostgresClient() as cur: query = cur.mogrify(f"""\ @@ -117,20 +142,20 @@ def get_project_by_key(tenant_id, project_key, include_last_session=False, inclu return helper.dict_to_camel_case(row) -def create(tenant_id, user_id, data, skip_authorization=False): +def create(tenant_id, user_id, data: schemas.CreateProjectSchema, skip_authorization=False): if not skip_authorization: admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - return {"data": __create(tenant_id=tenant_id, name=data.get("name", "my first project"))} + return {"data": __create(tenant_id=tenant_id, name=data.name)} -def edit(tenant_id, user_id, project_id, data): +def edit(tenant_id, user_id, project_id, data: schemas.CreateProjectSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} return {"data": __update(tenant_id=tenant_id, project_id=project_id, - changes={"name": data.get("name", "my first project")})} + changes={"name": data.name})} def delete(tenant_id, user_id, project_id): diff --git a/api/chalicelib/core/reset_password.py b/api/chalicelib/core/reset_password.py index a8dbabf31..1baaf82d8 100644 --- a/api/chalicelib/core/reset_password.py +++ b/api/chalicelib/core/reset_password.py @@ -1,26 +1,25 @@ +import schemas from chalicelib.core import users from chalicelib.utils import email_helper, captcha, helper -def reset(data): +def reset(data: schemas.ForgetPasswordPayloadSchema): print("====================== reset password ===============") print(data) - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): print("error: Invalid captcha.") return {"errors": ["Invalid captcha."]} - if "email" not in data: - return {"errors": ["email not found in body"]} if not helper.has_smtp(): return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]} - a_users = users.get_by_email_only(data["email"]) + a_users = users.get_by_email_only(data.email) if len(a_users) > 1: - print(f"multiple users found for [{data['email']}] please contact our support") + print(f"multiple users found for [{data.email}] please contact our support") return {"errors": ["multiple users, please contact our support"]} elif len(a_users) == 1: a_users = a_users[0] invitation_link = users.generate_new_invitation(user_id=a_users["id"]) - email_helper.send_forgot_password(recipient=data["email"], invitation_link=invitation_link) + email_helper.send_forgot_password(recipient=data.email, invitation_link=invitation_link) else: - print(f"invalid email address [{data['email']}]") + print(f"invalid email address [{data.email}]") return {"errors": ["invalid email address"]} return {"data": {"state": "success"}} diff --git a/api/chalicelib/core/saved_search.py b/api/chalicelib/core/saved_search.py new file mode 100644 index 000000000..dfa9a1dcf --- /dev/null +++ b/api/chalicelib/core/saved_search.py @@ -0,0 +1,115 @@ +import json + +import schemas +from chalicelib.utils import helper, pg_client +from chalicelib.utils.TimeUTC import TimeUTC + + +def create(project_id, user_id, data: schemas.SavedSearchSchema): + with pg_client.PostgresClient() as cur: + data = data.dict() + data["filter"] = json.dumps(data["filter"]) + query = cur.mogrify("""\ + INSERT INTO public.searches (project_id, user_id, name, filter,is_public) + VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s) + RETURNING *;""", {"user_id": user_id, "project_id": project_id, **data}) + cur.execute( + query + ) + r = cur.fetchone() + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + r = helper.dict_to_camel_case(r) + return {"data": r} + + +def update(search_id, project_id, user_id, data: schemas.SavedSearchSchema): + with pg_client.PostgresClient() as cur: + data = data.dict() + data["filter"] = json.dumps(data["filter"]) + query = cur.mogrify(f"""\ + UPDATE public.searches + SET name = %(name)s, + filter = %(filter)s, + is_public = %(is_public)s + WHERE search_id=%(search_id)s + AND project_id= %(project_id)s + AND (user_id = %(user_id)s OR is_public) + RETURNING *;""", {"search_id": search_id, "project_id": project_id, "user_id": user_id, **data}) + cur.execute( + query + ) + r = cur.fetchone() + r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) + r = helper.dict_to_camel_case(r) + # r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) + return r + + +def get_all(project_id, user_id, details=False): + with pg_client.PostgresClient() as cur: + print(cur.mogrify( + f"""\ + SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public + {",filter" if details else ""} + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND (user_id = %(user_id)s OR is_public);""", + {"project_id": project_id, "user_id": user_id} + )) + cur.execute( + cur.mogrify( + f"""\ + SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public + {",filter" if details else ""} + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND (user_id = %(user_id)s OR is_public);""", + {"project_id": project_id, "user_id": user_id} + ) + ) + + rows = cur.fetchall() + rows = helper.list_to_camel_case(rows) + for row in rows: + row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"]) + return rows + + +def delete(project_id, search_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify("""\ + UPDATE public.searches + SET deleted_at = timezone('utc'::text, now()) + WHERE project_id = %(project_id)s + AND search_id = %(search_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"search_id": search_id, "project_id": project_id, "user_id": user_id}) + ) + + return {"state": "success"} + + +def get(search_id, project_id, user_id): + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + """SELECT + * + FROM public.searches + WHERE project_id = %(project_id)s + AND deleted_at IS NULL + AND search_id = %(search_id)s + AND (user_id = %(user_id)s OR is_public);""", + {"search_id": search_id, "project_id": project_id, "user_id": user_id} + ) + ) + + f = helper.dict_to_camel_case(cur.fetchone()) + if f is None: + return None + + f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"]) + return f diff --git a/api/chalicelib/core/sessions.py b/api/chalicelib/core/sessions.py index aa1ab3d58..8eb58a29d 100644 --- a/api/chalicelib/core/sessions.py +++ b/api/chalicelib/core/sessions.py @@ -1,6 +1,7 @@ -from chalicelib.utils import pg_client, helper, dev -from chalicelib.core import events, sessions_metas, socket_ios, metadata, events_ios, \ - sessions_mobs, issues, projects, errors, resources, assist +import schemas +from chalicelib.core import events, metadata, events_ios, \ + sessions_mobs, issues, projects, errors, resources, assist, performance_event +from chalicelib.utils import pg_client, helper, dev, metrics_helper SESSION_PROJECTION_COLS = """s.project_id, s.session_id::text AS session_id, @@ -103,368 +104,69 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_ return None -def sessions_args(args, params): - if params is not None: - for key in ['userOs', 'userBrowser', 'userCountry', 'path', 'path_in_order', 'after', 'minDuration', - 'maxDuration', 'sortSessions', 'eventsCount', 'consoleLogCount', 'startDate', 'endDate', - 'consoleLog', 'location']: - args[key] = params.get(key) - - -new_line = "\n" - - -def __get_sql_operator(op): - op = op.lower() +def __get_sql_operator(op: schemas.SearchEventOperator): return { - "is": "=", - "on": "=", - "isnot": "!=", - "noton": "!=", - "contains": "ILIKE", - "notcontains": "NOT ILIKE", + schemas.SearchEventOperator._is: "=", + schemas.SearchEventOperator._is_any: "IN", + schemas.SearchEventOperator._on: "=", + schemas.SearchEventOperator._on_any: "IN", + schemas.SearchEventOperator._is_not: "!=", + schemas.SearchEventOperator._not_on: "!=", + schemas.SearchEventOperator._contains: "ILIKE", + schemas.SearchEventOperator._not_contains: "NOT ILIKE", + schemas.SearchEventOperator._starts_with: "ILIKE", + schemas.SearchEventOperator._ends_with: "ILIKE", }.get(op, "=") -def __is_negation_operator(op): - return op in ("!=", "NOT ILIKE") +def __is_negation_operator(op: schemas.SearchEventOperator): + return op in [schemas.SearchEventOperator._is_not, + schemas.SearchEventOperator._not_on, + schemas.SearchEventOperator._not_contains] def __reverse_sql_operator(op): return "=" if op == "!=" else "!=" if op == "=" else "ILIKE" if op == "NOT ILIKE" else "NOT ILIKE" -def __get_sql_operator_multiple(op): - op = op.lower() - return " IN " if op == "is" else " NOT IN " - - -def __get_sql_operator_boolean(op): - op = op.lower() - return True if op == "true" else False +def __get_sql_operator_multiple(op: schemas.SearchEventOperator): + return " IN " if op not in [schemas.SearchEventOperator._is_not, schemas.SearchEventOperator._not_on, + schemas.SearchEventOperator._not_contains] else " NOT IN " def __get_sql_value_multiple(values): if isinstance(values, tuple): return values - return tuple([v for v in values]) + return tuple(values) if isinstance(values, list) else (values,) + + +def _multiple_conditions(condition, values, value_key="value", is_not=False): + query = [] + for i in range(len(values)): + k = f"{value_key}_{i}" + query.append(condition.replace(value_key, k)) + return "(" + (" AND " if is_not else " OR ").join(query) + ")" + + +def _multiple_values(values, value_key="value"): + query_values = {} + for i in range(len(values)): + k = f"{value_key}_{i}" + query_values[k] = values[i] + return query_values + + +def _isAny_opreator(op: schemas.SearchEventOperator): + return op in [schemas.SearchEventOperator._on_any, schemas.SearchEventOperator._is_any] @dev.timed -def search2_pg(data, project_id, user_id, favorite_only=False, errors_only=False, error_status="ALL", - count_only=False, issue=None): - sessions = [] - generic_args = {"startDate": data['startDate'], "endDate": data['endDate'], - "projectId": project_id, - "userId": user_id} +def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, favorite_only=False, errors_only=False, + error_status="ALL", count_only=False, issue=None): + full_args, query_part, sort = search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, + user_id) + with pg_client.PostgresClient() as cur: - ss_constraints = [] - extra_constraints = [ - cur.mogrify("s.project_id = %(project_id)s", {"project_id": project_id}), - cur.mogrify("s.duration IS NOT NULL", {}) - ] - extra_from = "" - fav_only_join = "" - if favorite_only and not errors_only: - fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id" - extra_constraints.append(cur.mogrify("fs.user_id = %(userId)s", {"userId": user_id})) - events_query_part = "" - - if "filters" in data: - meta_keys = metadata.get(project_id=project_id) - meta_keys = {m["key"]: m["index"] for m in meta_keys} - for f in data["filters"]: - if not isinstance(f.get("value"), list): - f["value"] = [f.get("value")] - if len(f["value"]) == 0 or f["value"][0] is None: - continue - filter_type = f["type"].upper() - f["value"] = __get_sql_value_multiple(f["value"]) - if filter_type == sessions_metas.meta_type.USERBROWSER: - op = __get_sql_operator_multiple(f["operator"]) - extra_constraints.append(cur.mogrify(f's.user_browser {op} %(value)s', {"value": f["value"]})) - ss_constraints.append(cur.mogrify(f'ms.user_browser {op} %(value)s', {"value": f["value"]})) - - elif filter_type in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS]: - op = __get_sql_operator_multiple(f["operator"]) - extra_constraints.append(cur.mogrify(f's.user_os {op} %(value)s', {"value": f["value"]})) - ss_constraints.append(cur.mogrify(f'ms.user_os {op} %(value)s', {"value": f["value"]})) - - elif filter_type in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS]: - op = __get_sql_operator_multiple(f["operator"]) - extra_constraints.append(cur.mogrify(f's.user_device {op} %(value)s', {"value": f["value"]})) - ss_constraints.append(cur.mogrify(f'ms.user_device {op} %(value)s', {"value": f["value"]})) - - elif filter_type in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS]: - op = __get_sql_operator_multiple(f["operator"]) - extra_constraints.append(cur.mogrify(f's.user_country {op} %(value)s', {"value": f["value"]})) - ss_constraints.append(cur.mogrify(f'ms.user_country {op} %(value)s', {"value": f["value"]})) - elif filter_type == "duration".upper(): - if len(f["value"]) > 0 and f["value"][0] is not None: - extra_constraints.append( - cur.mogrify("s.duration >= %(minDuration)s", {"minDuration": f["value"][0]})) - ss_constraints.append( - cur.mogrify("ms.duration >= %(minDuration)s", {"minDuration": f["value"][0]})) - if len(f["value"]) > 1 and f["value"][1] is not None and f["value"][1] > 0: - extra_constraints.append( - cur.mogrify("s.duration <= %(maxDuration)s", {"maxDuration": f["value"][1]})) - ss_constraints.append( - cur.mogrify("ms.duration <= %(maxDuration)s", {"maxDuration": f["value"][1]})) - elif filter_type == sessions_metas.meta_type.REFERRER: - # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" - extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" - op = __get_sql_operator_multiple(f["operator"]) - extra_constraints.append( - cur.mogrify(f"p.base_referrer {op} %(referrer)s", {"referrer": f["value"]})) - elif filter_type == events.event_type.METADATA.ui_type: - op = __get_sql_operator(f["operator"]) - if f.get("key") in meta_keys.keys(): - extra_constraints.append( - cur.mogrify(f"s.{metadata.index_to_colname(meta_keys[f['key']])} {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)})) - ss_constraints.append( - cur.mogrify(f"ms.{metadata.index_to_colname(meta_keys[f['key']])} {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)})) - elif filter_type in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: - op = __get_sql_operator(f["operator"]) - extra_constraints.append( - cur.mogrify(f"s.user_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.user_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - elif filter_type in [sessions_metas.meta_type.USERANONYMOUSID, - sessions_metas.meta_type.USERANONYMOUSID_IOS]: - op = __get_sql_operator(f["operator"]) - extra_constraints.append( - cur.mogrify(f"s.user_anonymous_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.user_anonymous_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - elif filter_type in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS]: - op = __get_sql_operator(f["operator"]) - extra_constraints.append( - cur.mogrify(f"s.rev_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - ss_constraints.append( - cur.mogrify(f"ms.rev_id {op} %(value)s", - {"value": helper.string_to_sql_like_with_op(f["value"][0], op)}) - ) - - # --------------------------------------------------------------------------- - if len(data.get("events", [])) > 0: - ss_constraints = [s.decode('UTF-8') for s in ss_constraints] - events_query_from = [] - event_index = 0 - - for event in data["events"]: - event_type = event["type"].upper() - if event.get("operator") is None: - event["operator"] = "is" - op = __get_sql_operator(event["operator"]) - is_not = False - if __is_negation_operator(op): - is_not = True - op = __reverse_sql_operator(op) - if event_index == 0: - event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)" - event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s", - "main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s", - "ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"] - else: - event_from = "%s" - event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s", - f"event_{event_index - 1}.timestamp <= main.timestamp", - "main.session_id=event_0.session_id"] - event_args = {"value": helper.string_to_sql_like_with_op(event['value'], op)} - if event_type not in list(events.SUPPORTED_TYPES.keys()) \ - or event.get("value") in [None, "", "*"] \ - and (event_type != events.event_type.ERROR.ui_type \ - or event_type != events.event_type.ERROR_IOS.ui_type): - continue - if event_type == events.event_type.CLICK.ui_type: - event_from = event_from % f"{events.event_type.CLICK.table} AS main " - event_where.append(f"main.{events.event_type.CLICK.column} {op} %(value)s") - - elif event_type == events.event_type.INPUT.ui_type: - event_from = event_from % f"{events.event_type.INPUT.table} AS main " - event_where.append(f"main.{events.event_type.INPUT.column} {op} %(value)s") - if len(event.get("custom", "")) > 0: - event_where.append("main.value ILIKE %(custom)s") - event_args["custom"] = helper.string_to_sql_like_with_op(event['custom'], "ILIKE") - elif event_type == events.event_type.LOCATION.ui_type: - event_from = event_from % f"{events.event_type.LOCATION.table} AS main " - event_where.append(f"main.{events.event_type.LOCATION.column} {op} %(value)s") - elif event_type == events.event_type.CUSTOM.ui_type: - event_from = event_from % f"{events.event_type.CUSTOM.table} AS main " - event_where.append(f"main.{events.event_type.CUSTOM.column} {op} %(value)s") - elif event_type == events.event_type.REQUEST.ui_type: - event_from = event_from % f"{events.event_type.REQUEST.table} AS main " - event_where.append(f"main.{events.event_type.REQUEST.column} {op} %(value)s") - elif event_type == events.event_type.GRAPHQL.ui_type: - event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main " - event_where.append(f"main.{events.event_type.GRAPHQL.column} {op} %(value)s") - elif event_type == events.event_type.STATEACTION.ui_type: - event_from = event_from % f"{events.event_type.STATEACTION.table} AS main " - event_where.append(f"main.{events.event_type.STATEACTION.column} {op} %(value)s") - elif event_type == events.event_type.ERROR.ui_type: - if event.get("source") in [None, "*", ""]: - event["source"] = "js_exception" - event_from = event_from % f"{events.event_type.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)" - if event.get("value") not in [None, "*", ""]: - event_where.append(f"(main1.message {op} %(value)s OR main1.name {op} %(value)s)") - if event.get("source") not in [None, "*", ""]: - event_where.append(f"main1.source = %(source)s") - event_args["source"] = event["source"] - elif event.get("source") not in [None, "*", ""]: - event_where.append(f"main1.source = %(source)s") - event_args["source"] = event["source"] - - # ----- IOS - elif event_type == events.event_type.CLICK_IOS.ui_type: - event_from = event_from % f"{events.event_type.CLICK_IOS.table} AS main " - event_where.append(f"main.{events.event_type.CLICK_IOS.column} {op} %(value)s") - - elif event_type == events.event_type.INPUT_IOS.ui_type: - event_from = event_from % f"{events.event_type.INPUT_IOS.table} AS main " - event_where.append(f"main.{events.event_type.INPUT_IOS.column} {op} %(value)s") - - if len(event.get("custom", "")) > 0: - event_where.append("main.value ILIKE %(custom)s") - event_args["custom"] = helper.string_to_sql_like_with_op(event['custom'], "ILIKE") - elif event_type == events.event_type.VIEW_IOS.ui_type: - event_from = event_from % f"{events.event_type.VIEW_IOS.table} AS main " - event_where.append(f"main.{events.event_type.VIEW_IOS.column} {op} %(value)s") - elif event_type == events.event_type.CUSTOM_IOS.ui_type: - event_from = event_from % f"{events.event_type.CUSTOM_IOS.table} AS main " - event_where.append(f"main.{events.event_type.CUSTOM_IOS.column} {op} %(value)s") - elif event_type == events.event_type.REQUEST_IOS.ui_type: - event_from = event_from % f"{events.event_type.REQUEST_IOS.table} AS main " - event_where.append(f"main.{events.event_type.REQUEST_IOS.column} {op} %(value)s") - elif event_type == events.event_type.ERROR_IOS.ui_type: - event_from = event_from % f"{events.event_type.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)" - if event.get("value") not in [None, "*", ""]: - event_where.append(f"(main1.reason {op} %(value)s OR main1.name {op} %(value)s)") - - else: - continue - if event_index == 0: - event_where += ss_constraints - if is_not: - if event_index == 0: - events_query_from.append(cur.mogrify(f"""\ - (SELECT - session_id, - 0 AS timestamp, - {event_index} AS funnel_step - FROM sessions - WHERE EXISTS(SELECT session_id - FROM {event_from} - WHERE {" AND ".join(event_where)} - AND sessions.session_id=ms.session_id) IS FALSE - AND project_id = %(projectId)s - AND start_ts >= %(startDate)s - AND start_ts <= %(endDate)s - AND duration IS NOT NULL - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - else: - events_query_from.append(cur.mogrify(f"""\ - (SELECT - event_0.session_id, - event_{event_index - 1}.timestamp AS timestamp, - {event_index} AS funnel_step - WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - else: - events_query_from.append(cur.mogrify(f"""\ - (SELECT main.session_id, MIN(timestamp) AS timestamp,{event_index} AS funnel_step - FROM {event_from} - WHERE {" AND ".join(event_where)} - GROUP BY 1 - ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ - """, {**generic_args, **event_args}).decode('UTF-8')) - event_index += 1 - if event_index > 0: - events_query_part = f"""SELECT - event_0.session_id, - MIN(event_0.timestamp) AS first_event_ts, - MAX(event_{event_index - 1}.timestamp) AS last_event_ts - FROM {(" INNER JOIN LATERAL ").join(events_query_from)} - GROUP BY 1 - {fav_only_join}""" - else: - data["events"] = [] - - # --------------------------------------------------------------------------- - - if data.get("startDate") is not None: - extra_constraints.append(cur.mogrify("s.start_ts >= %(startDate)s", {"startDate": data['startDate']})) - else: - data['startDate'] = None - if data.get("endDate") is not None: - extra_constraints.append(cur.mogrify("s.start_ts <= %(endDate)s", {"endDate": data['endDate']})) - else: - data['endDate'] = None - - if data.get('platform') is not None: - if data['platform'] == 'mobile': - extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')") - elif data['platform'] == 'desktop': - extra_constraints.append( - b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')") - - order = "DESC" - if data.get("order") is not None: - order = data["order"] - sort = 'session_id' - if data.get("sort") is not None and data["sort"] != "session_id": - sort += " " + order + "," + helper.key_to_snake_case(data["sort"]) - else: - sort = 'session_id' - - if errors_only: - extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)" - extra_constraints.append(b"ser.source = 'js_exception'") - if error_status != "ALL": - extra_constraints.append(cur.mogrify("ser.status = %(status)s", {"status": error_status.lower()})) - if favorite_only: - extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)" - extra_constraints.append(cur.mogrify("ufe.user_id = %(user_id)s", {"user_id": user_id})) - - extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints] - if not favorite_only and not errors_only: - extra_from += """LEFT JOIN (SELECT user_id, session_id - FROM public.user_favorite_sessions - WHERE user_id = %(userId)s) AS favorite_sessions - USING (session_id)""" - extra_join = "" - if issue is not None: - extra_join = cur.mogrify(""" - INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id) - WHERE issues.session_id=f.session_id - AND p_issues.type=%(type)s - AND p_issues.context_string=%(contextString)s - AND timestamp >= f.first_event_ts - AND timestamp <= f.last_event_ts) AS issues ON(TRUE) - """, {"contextString": issue["contextString"], "type": issue["type"]}).decode('UTF-8') - - query_part = f"""\ - FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"} - {extra_join} - {"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""} - {extra_from} - WHERE - - {" AND ".join(extra_constraints)}""" - if errors_only: main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id, ser.status, ser.parent_error_id, ser.payload, COALESCE((SELECT TRUE @@ -475,50 +177,600 @@ def search2_pg(data, project_id, user_id, favorite_only=False, errors_only=False FROM public.user_viewed_errors AS ve WHERE er.error_id = ve.error_id AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed - {query_part};""", - generic_args) + {query_part};""", full_args) elif count_only: - main_query = cur.mogrify( - f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions, COUNT(DISTINCT s.user_uuid) AS count_users - {query_part};""", - generic_args) + main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions, + COUNT(DISTINCT s.user_uuid) AS count_users + {query_part};""", full_args) else: - main_query = cur.mogrify(f"""SELECT * FROM - (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} - {query_part} - ORDER BY s.session_id desc) AS filtred_sessions - ORDER BY favorite DESC, issue_score DESC, {sort} {order};""", - generic_args) + main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count, COALESCE(JSONB_AGG(full_sessions) FILTER (WHERE rn <= 200), '[]'::JSONB) AS sessions + FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY favorite DESC, issue_score DESC, session_id desc, start_ts desc) AS rn FROM + (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} + {query_part} + ORDER BY s.session_id desc) AS filtred_sessions + ORDER BY favorite DESC, issue_score DESC, {sort} {data.order}) AS full_sessions;""", + full_args) + + # main_query = cur.mogrify(f"""SELECT * FROM + # (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS} + # {query_part} + # ORDER BY s.session_id desc) AS filtred_sessions + # ORDER BY favorite DESC, issue_score DESC, {sort} {order};""", + # full_args) # print("--------------------") # print(main_query) cur.execute(main_query) - + # print("--------------------") if count_only: return helper.dict_to_camel_case(cur.fetchone()) - sessions = [] - total = cur.rowcount - row = cur.fetchone() - limit = 200 - while row is not None and len(sessions) < limit: - if row.get("favorite"): - limit += 1 - sessions.append(row) - row = cur.fetchone() + sessions = cur.fetchone() + total = sessions["count"] + sessions = sessions["sessions"] + # sessions = [] + # total = cur.rowcount + # row = cur.fetchone() + # limit = 200 + # while row is not None and len(sessions) < limit: + # if row.get("favorite"): + # limit += 1 + # sessions.append(row) + # row = cur.fetchone() if errors_only: return sessions - if data.get("sort") is not None and data["sort"] != "session_id": - sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data["sort"])], - reverse=data.get("order", "DESC").upper() == "DESC") + if data.sort is not None and data.sort != "session_id": + sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)], + reverse=data.order.upper() == "DESC") return { 'total': total, 'sessions': helper.list_to_camel_case(sessions) } +@dev.timed +def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int, + view_type: schemas.MetricViewType): + step_size = metrics_helper.__get_step_size(endTimestamp=data.endDate, startTimestamp=data.startDate, + density=density, factor=1) + full_args, query_part, sort = search_query_parts(data=data, error_status=None, errors_only=False, + favorite_only=False, issue=None, project_id=project_id, + user_id=None) + full_args["step_size"] = step_size + with pg_client.PostgresClient() as cur: + if view_type == schemas.MetricViewType.line_chart: + main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts + {query_part}) + SELECT generated_timestamp AS timestamp, + COUNT(s) AS count + FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp + LEFT JOIN LATERAL ( SELECT 1 AS s + FROM full_sessions + WHERE start_ts >= generated_timestamp + AND start_ts < generated_timestamp + %(step_size)s) AS sessions ON (TRUE) + GROUP BY generated_timestamp + ORDER BY generated_timestamp;""", full_args) + else: + main_query = cur.mogrify(f"""SELECT count(DISTINCT s.session_id) AS count + {query_part};""", full_args) + + # print("--------------------") + # print(main_query) + cur.execute(main_query) + # print("--------------------") + if view_type == schemas.MetricViewType.line_chart: + sessions = cur.fetchall() + else: + sessions = cur.fetchone()["count"] + return sessions + + +def search_query_parts(data, error_status, errors_only, favorite_only, issue, project_id, user_id): + ss_constraints = [] + full_args = {"project_id": project_id, "startDate": data.startDate, "endDate": data.endDate, + "projectId": project_id, "userId": user_id} + extra_constraints = [ + "s.project_id = %(project_id)s", + "s.duration IS NOT NULL" + ] + extra_from = "" + fav_only_join = "" + if favorite_only and not errors_only: + fav_only_join = "LEFT JOIN public.user_favorite_sessions AS fs ON fs.session_id = s.session_id" + extra_constraints.append("fs.user_id = %(userId)s") + full_args["userId"] = user_id + events_query_part = "" + if len(data.filters) > 0: + meta_keys = None + for i, f in enumerate(data.filters): + if not isinstance(f.value, list): + f.value = [f.value] + if len(f.value) == 0 or f.value[0] is None: + continue + filter_type = f.type + # f.value = __get_sql_value_multiple(f.value) + f.value = helper.values_for_operator(value=f.value, op=f.operator) + f_k = f"f_value{i}" + full_args = {**full_args, **_multiple_values(f.value, value_key=f_k)} + op = __get_sql_operator(f.operator) \ + if filter_type not in [schemas.FilterType.events_count] else f.operator + is_any = _isAny_opreator(f.operator) + is_not = False + if __is_negation_operator(f.operator): + is_not = True + # op = __reverse_sql_operator(op) + if filter_type == schemas.FilterType.user_browser: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]: + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + + elif filter_type in [schemas.FilterType.utm_source]: + if is_any: + extra_constraints.append('s.utm_source IS NOT NULL') + ss_constraints.append('ms.utm_source IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_source {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_source {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.utm_medium]: + if is_any: + extra_constraints.append('s.utm_medium IS NOT NULL') + ss_constraints.append('ms.utm_medium IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_medium {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_medium {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.utm_campaign]: + if is_any: + extra_constraints.append('s.utm_campaign IS NOT NULL') + ss_constraints.append('ms.utm_campaign IS NOT NULL') + else: + extra_constraints.append( + _multiple_conditions(f's.utm_campaign {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f'ms.utm_campaign {op} %({f_k})s', f.value, is_not=is_not, + value_key=f_k)) + + elif filter_type == schemas.FilterType.duration: + if len(f.value) > 0 and f.value[0] is not None: + extra_constraints.append("s.duration >= %(minDuration)s") + ss_constraints.append("ms.duration >= %(minDuration)s") + full_args["minDuration"] = f.value[0] + if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0: + extra_constraints.append("s.duration <= %(maxDuration)s") + ss_constraints.append("ms.duration <= %(maxDuration)s") + full_args["maxDuration"] = f.value[1] + elif filter_type == schemas.FilterType.referrer: + # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" + extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" + # op = __get_sql_operator_multiple(f.operator) + extra_constraints.append( + _multiple_conditions(f"p.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type == events.event_type.METADATA.ui_type: + # get metadata list only if you need it + if meta_keys is None: + meta_keys = metadata.get(project_id=project_id) + meta_keys = {m["key"]: m["index"] for m in meta_keys} + # op = __get_sql_operator(f.operator) + if f.key in meta_keys.keys(): + extra_constraints.append( + _multiple_conditions(f"s.{metadata.index_to_colname(meta_keys[f.key])} {op} %({f_k})s", + f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.{metadata.index_to_colname(meta_keys[f.key])} {op} %({f_k})s", + f.value, is_not=is_not, value_key=f_k)) + elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type in [schemas.FilterType.user_anonymous_id, + schemas.FilterType.user_anonymous_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_anonymous_id {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_anonymous_id {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.rev_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.rev_id {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k)) + elif filter_type == schemas.FilterType.platform: + # op = __get_sql_operator(f.operator) + extra_constraints.append( + _multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.user_device_type {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type == schemas.FilterType.issue: + extra_constraints.append( + _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"%({f_k})s {op} ANY (ms.issue_types)", f.value, is_not=is_not, + value_key=f_k)) + elif filter_type == schemas.FilterType.events_count: + extra_constraints.append( + _multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + ss_constraints.append( + _multiple_conditions(f"ms.events_count {op} %({f_k})s", f.value, is_not=is_not, + value_key=f_k)) + # --------------------------------------------------------------------------- + if len(data.events) > 0: + # ss_constraints = [s.decode('UTF-8') for s in ss_constraints] + events_query_from = [] + event_index = 0 + or_events = data.events_order == schemas.SearchEventOrder._or + # events_joiner = " FULL JOIN " if or_events else " INNER JOIN LATERAL " + events_joiner = " UNION " if or_events else " INNER JOIN LATERAL " + for i, event in enumerate(data.events): + event_type = event.type + is_any = _isAny_opreator(event.operator) + if not isinstance(event.value, list): + event.value = [event.value] + op = __get_sql_operator(event.operator) + is_not = False + if __is_negation_operator(event.operator): + is_not = True + op = __reverse_sql_operator(op) + if event_index == 0 or or_events: + event_from = "%s INNER JOIN public.sessions AS ms USING (session_id)" + event_where = ["ms.project_id = %(projectId)s", "main.timestamp >= %(startDate)s", + "main.timestamp <= %(endDate)s", "ms.start_ts >= %(startDate)s", + "ms.start_ts <= %(endDate)s", "ms.duration IS NOT NULL"] + else: + event_from = "%s" + event_where = ["main.timestamp >= %(startDate)s", "main.timestamp <= %(endDate)s", + "main.session_id=event_0.session_id"] + if data.events_order == schemas.SearchEventOrder._then: + event_where.append(f"event_{event_index - 1}.timestamp <= main.timestamp") + e_k = f"e_value{i}" + if event.type != schemas.PerformanceEventType.time_between_events: + event.value = helper.values_for_operator(value=event.value, op=event.operator) + full_args = {**full_args, **_multiple_values(event.value, value_key=e_k)} + + # if event_type not in list(events.SUPPORTED_TYPES.keys()) \ + # or event.value in [None, "", "*"] \ + # and (event_type != events.event_type.ERROR.ui_type \ + # or event_type != events.event_type.ERROR_IOS.ui_type): + # continue + if event_type == events.event_type.CLICK.ui_type: + event_from = event_from % f"{events.event_type.CLICK.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CLICK.column} {op} %({e_k})s", event.value, + value_key=e_k)) + + elif event_type == events.event_type.INPUT.ui_type: + event_from = event_from % f"{events.event_type.INPUT.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.INPUT.column} {op} %({e_k})s", event.value, + value_key=e_k)) + if event.custom is not None and len(event.custom) > 0: + event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.custom, + value_key=f"custom{i}")) + full_args = {**full_args, **_multiple_values(event.custom, value_key=f"custom{i}")} + + elif event_type == events.event_type.LOCATION.ui_type: + event_from = event_from % f"{events.event_type.LOCATION.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.CUSTOM.ui_type: + event_from = event_from % f"{events.event_type.CUSTOM.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CUSTOM.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.REQUEST.ui_type: + event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.GRAPHQL.ui_type: + event_from = event_from % f"{events.event_type.GRAPHQL.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.GRAPHQL.column} {op} %({e_k})s", event.value, + value_key=e_k)) + elif event_type == events.event_type.STATEACTION.ui_type: + event_from = event_from % f"{events.event_type.STATEACTION.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.STATEACTION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.ERROR.ui_type: + # if event.source in [None, "*", ""]: + # event.source = "js_exception" + event_from = event_from % f"{events.event_type.ERROR.table} AS main INNER JOIN public.errors AS main1 USING(error_id)" + if event.value not in [None, "*", ""]: + if not is_any: + event_where.append(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)") + if event.source not in [None, "*", ""]: + event_where.append(f"main1.source = %(source)s") + full_args["source"] = event.source + elif event.source not in [None, "*", ""]: + event_where.append(f"main1.source = %(source)s") + full_args["source"] = event.source + + # ----- IOS + elif event_type == events.event_type.CLICK_IOS.ui_type: + event_from = event_from % f"{events.event_type.CLICK_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CLICK_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + + elif event_type == events.event_type.INPUT_IOS.ui_type: + event_from = event_from % f"{events.event_type.INPUT_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.INPUT_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + if event.custom is not None and len(event.custom) > 0: + event_where.append(_multiple_conditions(f"main.value ILIKE %(custom{i})s", event.custom, + value_key="custom{i}")) + full_args = {**full_args, **_multiple_values(event.custom, f"custom{i}")} + elif event_type == events.event_type.VIEW_IOS.ui_type: + event_from = event_from % f"{events.event_type.VIEW_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.VIEW_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.CUSTOM_IOS.ui_type: + event_from = event_from % f"{events.event_type.CUSTOM_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.CUSTOM_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.REQUEST_IOS.ui_type: + event_from = event_from % f"{events.event_type.REQUEST_IOS.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST_IOS.column} {op} %({e_k})s", + event.value, value_key=e_k)) + elif event_type == events.event_type.ERROR_IOS.ui_type: + event_from = event_from % f"{events.event_type.ERROR_IOS.table} AS main INNER JOIN public.crashes_ios AS main1 USING(crash_id)" + if not is_any and event.value not in [None, "*", ""]: + event_where.append( + _multiple_conditions(f"(main1.reason {op} %({e_k})s OR main1.name {op} %({e_k})s)", + event.value, value_key=e_k)) + elif event_type == schemas.PerformanceEventType.fetch_failed: + event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", + event.value, value_key=e_k)) + col = performance_event.get_col(event_type) + colname = col["column"] + event_where.append(f"main.{colname} = FALSE") + # elif event_type == schemas.PerformanceEventType.fetch_duration: + # event_from = event_from % f"{events.event_type.REQUEST.table} AS main " + # if not is_any: + # event_where.append( + # _multiple_conditions(f"main.{events.event_type.REQUEST.column} {op} %({e_k})s", + # event.value, value_key=e_k)) + # col = performance_event.get_col(event_type) + # colname = col["column"] + # tname = "main" + # e_k += "_custom" + # full_args = {**full_args, **_multiple_values(event.custom, value_key=e_k)} + # event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " + + # _multiple_conditions(f"{tname}.{colname} {event.customOperator} %({e_k})s", + # event.custom, value_key=e_k)) + elif event_type in [schemas.PerformanceEventType.location_dom_complete, + schemas.PerformanceEventType.location_largest_contentful_paint_time, + schemas.PerformanceEventType.location_ttfb, + schemas.PerformanceEventType.location_avg_cpu_load, + schemas.PerformanceEventType.location_avg_memory_usage + ]: + event_from = event_from % f"{events.event_type.LOCATION.table} AS main " + col = performance_event.get_col(event_type) + colname = col["column"] + tname = "main" + if col.get("extraJoin") is not None: + tname = "ej" + event_from += f" INNER JOIN {col['extraJoin']} AS {tname} USING(session_id)" + event_where += [f"{tname}.timestamp >= main.timestamp", f"{tname}.timestamp >= %(startDate)s", + f"{tname}.timestamp <= %(endDate)s"] + if not is_any: + event_where.append( + _multiple_conditions(f"main.{events.event_type.LOCATION.column} {op} %({e_k})s", + event.value, value_key=e_k)) + e_k += "_custom" + full_args = {**full_args, **_multiple_values(event.custom, value_key=e_k)} + + event_where.append(f"{tname}.{colname} IS NOT NULL AND {tname}.{colname}>0 AND " + + _multiple_conditions(f"{tname}.{colname} {event.customOperator} %({e_k})s", + event.custom, value_key=e_k)) + elif event_type == schemas.PerformanceEventType.time_between_events: + event_from = event_from % f"{getattr(events.event_type, event.value[0].type).table} AS main INNER JOIN {getattr(events.event_type, event.value[1].type).table} AS main2 USING(session_id) " + if not isinstance(event.value[0].value, list): + event.value[0].value = [event.value[0].value] + if not isinstance(event.value[1].value, list): + event.value[1].value = [event.value[1].value] + event.value[0].value = helper.values_for_operator(value=event.value[0].value, + op=event.value[0].operator) + event.value[1].value = helper.values_for_operator(value=event.value[1].value, + op=event.value[0].operator) + e_k1 = e_k + "_e1" + e_k2 = e_k + "_e2" + full_args = {**full_args, + **_multiple_values(event.value[0].value, value_key=e_k1), + **_multiple_values(event.value[1].value, value_key=e_k2)} + s_op = __get_sql_operator(event.value[0].operator) + event_where += ["main2.timestamp >= %(startDate)s", "main2.timestamp <= %(endDate)s"] + if event_index > 0 and not or_events: + event_where.append("main2.session_id=event_0.session_id") + event_where.append( + _multiple_conditions( + f"main.{getattr(events.event_type, event.value[0].type).column} {s_op} %({e_k1})s", + event.value[0].value, value_key=e_k1)) + s_op = __get_sql_operator(event.value[1].operator) + event_where.append( + _multiple_conditions( + f"main2.{getattr(events.event_type, event.value[1].type).column} {s_op} %({e_k2})s", + event.value[1].value, value_key=e_k2)) + + e_k += "_custom" + full_args = {**full_args, **_multiple_values(event.custom, value_key=e_k)} + event_where.append( + _multiple_conditions(f"main2.timestamp - main.timestamp {event.customOperator} %({e_k})s", + event.custom, value_key=e_k)) + + + else: + continue + if event_index == 0 or or_events: + event_where += ss_constraints + if is_not: + if event_index == 0 or or_events: + events_query_from.append(f"""\ + (SELECT + session_id, + 0 AS timestamp + FROM sessions + WHERE EXISTS(SELECT session_id + FROM {event_from} + WHERE {" AND ".join(event_where)} + AND sessions.session_id=ms.session_id) IS FALSE + AND project_id = %(projectId)s + AND start_ts >= %(startDate)s + AND start_ts <= %(endDate)s + AND duration IS NOT NULL + ) {"" if or_events else ("AS event_{event_index}" + ("ON(TRUE)" if event_index > 0 else ""))}\ + """) + else: + events_query_from.append(f"""\ + (SELECT + event_0.session_id, + event_{event_index - 1}.timestamp AS timestamp + WHERE EXISTS(SELECT session_id FROM {event_from} WHERE {" AND ".join(event_where)}) IS FALSE + ) AS event_{event_index} {"ON(TRUE)" if event_index > 0 else ""}\ + """) + else: + events_query_from.append(f"""\ + (SELECT main.session_id, MIN(main.timestamp) AS timestamp + FROM {event_from} + WHERE {" AND ".join(event_where)} + GROUP BY 1 + ) {"" if or_events else (f"AS event_{event_index} " + ("ON(TRUE)" if event_index > 0 else ""))}\ + """) + event_index += 1 + if event_index > 0: + if or_events: + events_query_part = f"""SELECT + session_id, + MIN(timestamp) AS first_event_ts, + MAX(timestamp) AS last_event_ts + FROM ({events_joiner.join(events_query_from)}) AS u + GROUP BY 1 + {fav_only_join}""" + else: + events_query_part = f"""SELECT + event_0.session_id, + MIN(event_0.timestamp) AS first_event_ts, + MAX(event_{event_index - 1}.timestamp) AS last_event_ts + FROM {events_joiner.join(events_query_from)} + GROUP BY 1 + {fav_only_join}""" + else: + data.events = [] + # --------------------------------------------------------------------------- + if data.startDate is not None: + extra_constraints.append("s.start_ts >= %(startDate)s") + if data.endDate is not None: + extra_constraints.append("s.start_ts <= %(endDate)s") + # if data.platform is not None: + # if data.platform == schemas.PlatformType.mobile: + # extra_constraints.append(b"s.user_os in ('Android','BlackBerry OS','iOS','Tizen','Windows Phone')") + # elif data.platform == schemas.PlatformType.desktop: + # extra_constraints.append( + # b"s.user_os in ('Chrome OS','Fedora','Firefox OS','Linux','Mac OS X','Ubuntu','Windows')") + if data.order is None: + data.order = "DESC" + sort = 'session_id' + if data.sort is not None and data.sort != "session_id": + sort += " " + data.order + "," + helper.key_to_snake_case(data.sort) + else: + sort = 'session_id' + if errors_only: + extra_from += f" INNER JOIN {events.event_type.ERROR.table} AS er USING (session_id) INNER JOIN public.errors AS ser USING (error_id)" + extra_constraints.append("ser.source = 'js_exception'") + if error_status != "ALL": + extra_constraints.append("ser.status = %(error_status)s") + full_args["status"] = error_status.lower() + if favorite_only: + extra_from += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)" + extra_constraints.append("ufe.user_id = %(user_id)s") + # extra_constraints = [extra.decode('UTF-8') + "\n" for extra in extra_constraints] + if not favorite_only and not errors_only and user_id is not None: + extra_from += """LEFT JOIN (SELECT user_id, session_id + FROM public.user_favorite_sessions + WHERE user_id = %(userId)s) AS favorite_sessions + USING (session_id)""" + extra_join = "" + if issue is not None: + extra_join = """ + INNER JOIN LATERAL(SELECT TRUE FROM events_common.issues INNER JOIN public.issues AS p_issues USING (issue_id) + WHERE issues.session_id=f.session_id + AND p_issues.type=%(issue_type)s + AND p_issues.context_string=%(issue_contextString)s + AND timestamp >= f.first_event_ts + AND timestamp <= f.last_event_ts) AS issues ON(TRUE) + """ + full_args["issue_contextString"] = issue["contextString"] + full_args["issue_type"] = issue["type"] + query_part = f"""\ + FROM {f"({events_query_part}) AS f" if len(events_query_part) > 0 else "public.sessions AS s"} + {extra_join} + {"INNER JOIN public.sessions AS s USING(session_id)" if len(events_query_part) > 0 else ""} + {extra_from} + WHERE + {" AND ".join(extra_constraints)}""" + return full_args, query_part, sort + + def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None): if project_id is None: all_projects = projects.get_projects(tenant_id=tenant_id, recording_state=False) @@ -532,8 +784,8 @@ def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None): available_keys = metadata.get_keys_by_projects(project_ids) for i in available_keys: - available_keys[i]["user_id"] = sessions_metas.meta_type.USERID - available_keys[i]["user_anonymous_id"] = sessions_metas.meta_type.USERANONYMOUSID + available_keys[i]["user_id"] = schemas.FilterType.user_id + available_keys[i]["user_anonymous_id"] = schemas.FilterType.user_anonymous_id results = {} for i in project_ids: if m_key not in available_keys[i].values(): @@ -736,7 +988,7 @@ def get_session_ids_by_user_ids(project_id, user_ids): def delete_sessions_by_session_ids(session_ids): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: query = cur.mogrify( """\ DELETE FROM public.sessions @@ -750,7 +1002,7 @@ def delete_sessions_by_session_ids(session_ids): def delete_sessions_by_user_ids(project_id, user_ids): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: query = cur.mogrify( """\ DELETE FROM public.sessions @@ -764,6 +1016,6 @@ def delete_sessions_by_user_ids(project_id, user_ids): def count_all(): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions") return row.get("count", 0) diff --git a/api/chalicelib/core/sessions_assignments.py b/api/chalicelib/core/sessions_assignments.py index 3e0929dad..4491f62d0 100644 --- a/api/chalicelib/core/sessions_assignments.py +++ b/api/chalicelib/core/sessions_assignments.py @@ -1,4 +1,4 @@ -from chalicelib.utils.helper import environ as env +from decouple import config from chalicelib.utils import helper from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils import pg_client @@ -32,7 +32,7 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne if i is None: return {"errors": [f"integration not found"]} - link = env["SITE_URL"] + f"/{project_id}/session/{session_id}" + link = config("SITE_URL") + f"/{project_id}/session/{session_id}" description += f"\n> {link}" try: issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description, diff --git a/api/chalicelib/core/sessions_metas.py b/api/chalicelib/core/sessions_metas.py index a21b78783..1d342d03f 100644 --- a/api/chalicelib/core/sessions_metas.py +++ b/api/chalicelib/core/sessions_metas.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.utils import pg_client, helper from chalicelib.utils.event_filter_definition import SupportedFilter @@ -8,40 +9,47 @@ def get_key_values(project_id): cur.mogrify( f"""\ SELECT ARRAY_AGG(DISTINCT s.user_os - ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {meta_type.USEROS}, + ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_os}, ARRAY_AGG(DISTINCT s.user_browser ORDER BY s.user_browser) - FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {meta_type.USERBROWSER}, + FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_browser}, ARRAY_AGG(DISTINCT s.user_device ORDER BY s.user_device) - FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {meta_type.USERDEVICE}, + FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {schemas.FilterType.user_device}, ARRAY_AGG(DISTINCT s.user_country ORDER BY s.user_country) - FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {meta_type.USERCOUNTRY}, + FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {schemas.FilterType.user_country}, ARRAY_AGG(DISTINCT s.user_id - ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {meta_type.USERID}, + ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {schemas.FilterType.user_id}, ARRAY_AGG(DISTINCT s.user_anonymous_id - ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {meta_type.USERANONYMOUSID}, + ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {schemas.FilterType.user_anonymous_id}, ARRAY_AGG(DISTINCT s.rev_id - ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {meta_type.REVID}, + ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {schemas.FilterType.rev_id}, ARRAY_AGG(DISTINCT p.referrer ORDER BY p.referrer) - FILTER ( WHERE p.referrer != '' ) AS {meta_type.REFERRER}, + FILTER ( WHERE p.referrer != '' ) AS {schemas.FilterType.referrer}, + + ARRAY_AGG(DISTINCT s.utm_source + ORDER BY s.utm_source) FILTER ( WHERE s.utm_source IS NOT NULL AND s.utm_source != 'none' AND s.utm_source != '') AS {schemas.FilterType.utm_source}, + ARRAY_AGG(DISTINCT s.utm_medium + ORDER BY s.utm_medium) FILTER ( WHERE s.utm_medium IS NOT NULL AND s.utm_medium != 'none' AND s.utm_medium != '') AS {schemas.FilterType.utm_medium}, + ARRAY_AGG(DISTINCT s.utm_campaign + ORDER BY s.utm_campaign) FILTER ( WHERE s.utm_campaign IS NOT NULL AND s.utm_campaign != 'none' AND s.utm_campaign != '') AS {schemas.FilterType.utm_campaign}, ARRAY_AGG(DISTINCT s.user_os - ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {meta_type.USEROS_IOS}, + ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {schemas.FilterType.user_os_ios}, ARRAY_AGG(DISTINCT s.user_device ORDER BY s.user_device) - FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {meta_type.USERDEVICE}, + FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {schemas.FilterType.user_device_ios}, ARRAY_AGG(DISTINCT s.user_country ORDER BY s.user_country) - FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {meta_type.USERCOUNTRY_IOS}, + FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {schemas.FilterType.user_country_ios}, ARRAY_AGG(DISTINCT s.user_id - ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {meta_type.USERID_IOS}, + ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {schemas.FilterType.user_id_ios}, ARRAY_AGG(DISTINCT s.user_anonymous_id - ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {meta_type.USERANONYMOUSID_IOS}, + ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {schemas.FilterType.user_anonymous_id_ios}, ARRAY_AGG(DISTINCT s.rev_id - ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {meta_type.REVID_IOS} + ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {schemas.FilterType.rev_id_ios} FROM public.sessions AS s LEFT JOIN events.pages AS p USING (session_id) WHERE s.project_id = %(site_id)s;""", @@ -108,119 +116,137 @@ def __generic_autocomplete(typename): return f -class meta_type: - USEROS = "USEROS" - USERBROWSER = "USERBROWSER" - USERDEVICE = "USERDEVICE" - USERCOUNTRY = "USERCOUNTRY" - USERID = "USERID" - USERANONYMOUSID = "USERANONYMOUSID" - REFERRER = "REFERRER" - REVID = "REVID" - # IOS - USEROS_IOS = "USEROS_IOS" - USERDEVICE_IOS = "USERDEVICE_IOS" - USERCOUNTRY_IOS = "USERCOUNTRY_IOS" - USERID_IOS = "USERID_IOS" - USERANONYMOUSID_IOS = "USERANONYMOUSID_IOS" - REVID_IOS = "REVID_IOS" - - SUPPORTED_TYPES = { - meta_type.USEROS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS), - query=__generic_query(typename=meta_type.USEROS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERBROWSER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERBROWSER), - query=__generic_query(typename=meta_type.USERBROWSER), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERDEVICE: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE), - query=__generic_query(typename=meta_type.USERDEVICE), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.USERCOUNTRY: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY), - query=__generic_query(typename=meta_type.USERCOUNTRY), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID), - query=__generic_query(typename=meta_type.USERID), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERANONYMOUSID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID), - query=__generic_query(typename=meta_type.USERANONYMOUSID), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.REVID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID), - query=__generic_query(typename=meta_type.REVID), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.REFERRER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REFERRER), - query=__generic_query(typename=meta_type.REFERRER), - value_limit=5, - starts_with="/", - starts_limit=5, - ignore_if_starts_with=[]), + schemas.FilterType.user_os: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_os), + query=__generic_query(typename=schemas.FilterType.user_os), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_browser: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_browser), + query=__generic_query(typename=schemas.FilterType.user_browser), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_device: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_device), + query=__generic_query(typename=schemas.FilterType.user_device), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_country: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_country), + query=__generic_query(typename=schemas.FilterType.user_country), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_id), + query=__generic_query(typename=schemas.FilterType.user_id), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_anonymous_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id), + query=__generic_query(typename=schemas.FilterType.user_anonymous_id), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.rev_id: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.rev_id), + query=__generic_query(typename=schemas.FilterType.rev_id), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.referrer: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.referrer), + query=__generic_query(typename=schemas.FilterType.referrer), + value_limit=5, + starts_with="/", + starts_limit=5, + ignore_if_starts_with=[]), + schemas.FilterType.utm_campaign: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_campaign), + query=__generic_query(typename=schemas.FilterType.utm_campaign), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.utm_medium: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_medium), + query=__generic_query(typename=schemas.FilterType.utm_medium), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.utm_source: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.utm_source), + query=__generic_query(typename=schemas.FilterType.utm_source), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), # IOS - meta_type.USEROS_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS_IOS), - query=__generic_query(typename=meta_type.USEROS_IOS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), - meta_type.USERDEVICE_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE_IOS), - query=__generic_query(typename=meta_type.USERDEVICE_IOS), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.USERCOUNTRY_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY_IOS), - query=__generic_query(typename=meta_type.USERCOUNTRY_IOS), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID_IOS), - query=__generic_query(typename=meta_type.USERID_IOS), - value_limit=2, - starts_with="", - starts_limit=2, - ignore_if_starts_with=["/"]), - meta_type.USERANONYMOUSID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID_IOS), - query=__generic_query(typename=meta_type.USERANONYMOUSID_IOS), - value_limit=3, - starts_with="", - starts_limit=3, - ignore_if_starts_with=["/"]), - meta_type.REVID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID_IOS), - query=__generic_query(typename=meta_type.REVID_IOS), - value_limit=0, - starts_with="", - starts_limit=0, - ignore_if_starts_with=["/"]), + schemas.FilterType.user_os_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_os_ios), + query=__generic_query(typename=schemas.FilterType.user_os_ios), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_device_ios: SupportedFilter( + get=__generic_autocomplete( + typename=schemas.FilterType.user_device_ios), + query=__generic_query(typename=schemas.FilterType.user_device_ios), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_country_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_country_ios), + query=__generic_query(typename=schemas.FilterType.user_country_ios), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_id_ios), + query=__generic_query(typename=schemas.FilterType.user_id_ios), + value_limit=2, + starts_with="", + starts_limit=2, + ignore_if_starts_with=["/"]), + schemas.FilterType.user_anonymous_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id_ios), + query=__generic_query(typename=schemas.FilterType.user_anonymous_id_ios), + value_limit=3, + starts_with="", + starts_limit=3, + ignore_if_starts_with=["/"]), + schemas.FilterType.rev_id_ios: SupportedFilter( + get=__generic_autocomplete(typename=schemas.FilterType.rev_id_ios), + query=__generic_query(typename=schemas.FilterType.rev_id_ios), + value_limit=0, + starts_with="", + starts_limit=0, + ignore_if_starts_with=["/"]), } def search(text, meta_type, project_id): rows = [] - if meta_type.upper() not in list(SUPPORTED_TYPES.keys()): + if meta_type not in list(SUPPORTED_TYPES.keys()): return {"errors": ["unsupported type"]} - rows += SUPPORTED_TYPES[meta_type.upper()].get(project_id=project_id, text=text) - if meta_type.upper() + "_IOS" in list(SUPPORTED_TYPES.keys()): - rows += SUPPORTED_TYPES[meta_type.upper() + "_IOS"].get(project_id=project_id, text=text) + rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text) + if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()): + rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text) return {"data": rows} diff --git a/api/chalicelib/core/sessions_mobs.py b/api/chalicelib/core/sessions_mobs.py index 760813b5c..8f61d436b 100644 --- a/api/chalicelib/core/sessions_mobs.py +++ b/api/chalicelib/core/sessions_mobs.py @@ -1,14 +1,15 @@ -from chalicelib.utils.helper import environ -from chalicelib.utils.s3 import client +from decouple import config + from chalicelib.utils import s3 +from chalicelib.utils.s3 import client def get_web(sessionId): return client.generate_presigned_url( 'get_object', Params={ - 'Bucket': environ["sessions_bucket"], - 'Key': sessionId + 'Bucket': config("sessions_bucket"), + 'Key': str(sessionId) }, ExpiresIn=100000 ) @@ -18,8 +19,8 @@ def get_ios(sessionId): return client.generate_presigned_url( 'get_object', Params={ - 'Bucket': environ["ios_bucket"], - 'Key': sessionId + 'Bucket': config("ios_bucket"), + 'Key': str(sessionId) }, ExpiresIn=100000 ) @@ -27,4 +28,4 @@ def get_ios(sessionId): def delete_mobs(session_ids): for session_id in session_ids: - s3.schedule_for_deletion(environ["sessions_bucket"], session_id) + s3.schedule_for_deletion(config("sessions_bucket"), session_id) diff --git a/api/chalicelib/core/significance.py b/api/chalicelib/core/significance.py index 8bcda04a7..9fc55065f 100644 --- a/api/chalicelib/core/significance.py +++ b/api/chalicelib/core/significance.py @@ -1,6 +1,7 @@ __author__ = "AZNAUROV David" __maintainer__ = "KRAIEM Taha Yassine" +import schemas from chalicelib.core import events, sessions_metas, metadata, sessions from chalicelib.utils import dev @@ -30,87 +31,107 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: :param filter_d: dict contains events&filters&... :return: """ - stages = filter_d["events"] - filters = filter_d.get("filters", []) + stages: [dict] = filter_d["events"] + filters: [dict] = filter_d.get("filters", []) filter_issues = filter_d.get("issueTypes") if filter_issues is None or len(filter_issues) == 0: filter_issues = [] stage_constraints = ["main.timestamp <= %(endTimestamp)s"] first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s", "s.start_ts <= %(endTimestamp)s"] - extra_from = "" + filter_extra_from = [] n_stages_query = [] values = {} if len(filters) > 0: - meta_keys = metadata.get(project_id=project_id) - meta_keys = {m["key"]: m["index"] for m in meta_keys} + meta_keys = None for i, f in enumerate(filters): - if not isinstance(f.get("value"), list): - if isinstance(f.get("value"), tuple): - f["value"] = list(f.get("value")) - else: - f["value"] = [f.get("value")] - if len(f["value"]) == 0 or f["value"][0] is None: + if not isinstance(f["value"], list): + f.value = [f["value"]] + if len(f["value"]) == 0 or f["value"] is None: continue - filter_type = f["type"].upper() - values[f"f_value_{i}"] = sessions.__get_sql_value_multiple(f["value"]) - if filter_type == sessions_metas.meta_type.USERBROWSER: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_browser {op} %({f"f_value_{i}"})s') + f["value"] = helper.values_for_operator(value=f["value"], op=f["operator"]) + # filter_args = _multiple_values(f["value"]) + op = sessions.__get_sql_operator(f["operator"]) - elif filter_type in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_os {op} %({f"f_value_{i}"})s') + filter_type = f["type"] + # values[f_k] = sessions.__get_sql_value_multiple(f["value"]) + f_k = f"f_value{i}" + values = {**values, + **sessions._multiple_values(helper.values_for_operator(value=f["value"], op=f["operator"]), + value_key=f_k)} + if filter_type == schemas.FilterType.user_browser: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_browser {op} %({f_k})s', f["value"], value_key=f_k)) - elif filter_type in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_device {op} %({f"f_value_{i}"})s') + elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_os {op} %({f_k})s', f["value"], value_key=f_k)) - elif filter_type in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS]: - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f's.user_country {op} %({f"f_value_{i}"})s') - elif filter_type == "duration".upper(): + elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_device {op} %({f_k})s', f["value"], value_key=f_k)) + + elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]: + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_country {op} %({f_k})s', f["value"], value_key=f_k)) + elif filter_type == schemas.FilterType.duration: if len(f["value"]) > 0 and f["value"][0] is not None: - first_stage_extra_constraints.append(f's.duration >= %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = f["value"][0] - if len(f["value"]) > 1 and f["value"][1] is not None and f["value"][1] > 0: - first_stage_extra_constraints.append('s.duration <= %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = f["value"][1] - elif filter_type == sessions_metas.meta_type.REFERRER: + first_stage_extra_constraints.append(f's.duration >= %(minDuration)s') + values["minDuration"] = f["value"][0] + if len(f["value"]) > 1 and f["value"][1] is not None and int(f["value"][1]) > 0: + first_stage_extra_constraints.append('s.duration <= %(maxDuration)s') + values["maxDuration"] = f["value"][1] + elif filter_type == schemas.FilterType.referrer: # events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)" - extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)" - op = sessions.__get_sql_operator_multiple(f["operator"]) - first_stage_extra_constraints.append(f"p.base_referrer {op} %(referrer)s") + filter_extra_from = [f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"] + # op = sessions.__get_sql_operator_multiple(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f"p.base_referrer {op} %({f_k})s", f["value"], value_key=f_k)) elif filter_type == events.event_type.METADATA.ui_type: - op = sessions.__get_sql_operator(f["operator"]) + if meta_keys is None: + meta_keys = metadata.get(project_id=project_id) + meta_keys = {m["key"]: m["index"] for m in meta_keys} + # op = sessions.__get_sql_operator(f["operator"]) if f.get("key") in meta_keys.keys(): first_stage_extra_constraints.append( - f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.user_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.USERANONYMOUSID, - sessions_metas.meta_type.USERANONYMOUSID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.user_anonymous_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) - elif filter_type in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS]: - op = sessions.__get_sql_operator(f["operator"]) - first_stage_extra_constraints.append(f's.rev_id {op} %({f"f_value_{i}"})s') - values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op) + sessions._multiple_conditions( + f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f_k})s', f["value"], + value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.user_anonymous_id, + schemas.FilterType.user_anonymous_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.user_anonymous_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) + elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]: + # op = sessions.__get_sql_operator(f["operator"]) + first_stage_extra_constraints.append( + sessions._multiple_conditions(f's.rev_id {op} %({f_k})s', f["value"], value_key=f_k)) + # values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op) for i, s in enumerate(stages): if i == 0: - extra_from = ["INNER JOIN public.sessions AS s USING (session_id)"] + extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"] else: extra_from = [] if s.get("operator") is None: s["operator"] = "is" + + if not isinstance(s["value"], list): + s["value"] = [s["value"]] + is_any = sessions._isAny_opreator(s["operator"]) op = sessions.__get_sql_operator(s["operator"]) event_type = s["type"].upper() - next_label = s["value"] if event_type == events.event_type.CLICK.ui_type: next_table = events.event_type.CLICK.table next_col_name = events.event_type.CLICK.column @@ -140,7 +161,8 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: print("=================UNDEFINED") continue - values[f"value{i + 1}"] = helper.string_to_sql_like_with_op(next_label, op) + values = {**values, **sessions._multiple_values(helper.values_for_operator(value=s["value"], op=s["operator"]), + value_key=f"value{i + 1}")} if sessions.__is_negation_operator(op) and i > 0: op = sessions.__reverse_sql_operator(op) main_condition = "left_not.session_id ISNULL" @@ -150,7 +172,11 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: AND s_main.timestamp >= T{i}.stage{i}_timestamp AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""") else: - main_condition = f"""main.{next_col_name} {op} %(value{i + 1})s""" + if is_any: + main_condition = "TRUE" + else: + main_condition = sessions._multiple_conditions(f"main.{next_col_name} {op} %(value{i + 1})s", + values=s["value"], value_key=f"value{i + 1}") n_stages_query.append(f""" (SELECT main.session_id, {"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp, @@ -197,9 +223,9 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]: params = {"project_id": project_id, "startTimestamp": filter_d["startDate"], "endTimestamp": filter_d["endDate"], "issueTypes": tuple(filter_issues), **values} with pg_client.PostgresClient() as cur: - # print("---------------------------------------------------") - # print(cur.mogrify(n_stages_query, params)) - # print("---------------------------------------------------") + print("---------------------------------------------------") + print(cur.mogrify(n_stages_query, params)) + print("---------------------------------------------------") cur.execute(cur.mogrify(n_stages_query, params)) rows = cur.fetchall() return rows @@ -535,7 +561,8 @@ def get_top_insights(filter_d, project_id): "dropDueToIssues": 0 }] - counts = sessions.search2_pg(data=filter_d, project_id=project_id, user_id=None, count_only=True) + counts = sessions.search2_pg(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id, + user_id=None, count_only=True) output[0]["sessionsCount"] = counts["countSessions"] output[0]["usersCount"] = counts["countUsers"] return output, 0 diff --git a/api/chalicelib/core/signup.py b/api/chalicelib/core/signup.py index 50fc6e41a..b4f02f0b8 100644 --- a/api/chalicelib/core/signup.py +++ b/api/chalicelib/core/signup.py @@ -1,21 +1,24 @@ -from chalicelib.utils import helper -from chalicelib.utils import pg_client +import json + +from decouple import config + +import schemas from chalicelib.core import users, telemetry, tenants from chalicelib.utils import captcha -import json +from chalicelib.utils import helper +from chalicelib.utils import pg_client from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ -def create_step1(data): +def create_step1(data: schemas.UserSignupSchema): print(f"===================== SIGNUP STEP 1 AT {TimeUTC.to_human_readable(TimeUTC.now())} UTC") errors = [] if tenants.tenants_exists(): return {"errors": ["tenants already registered"]} - email = data.get("email") + email = data.email print(f"=====================> {email}") - password = data.get("password") + password = data.password print("Verifying email validity") if email is None or len(email) < 5 or not helper.is_valid_email(email): @@ -28,25 +31,25 @@ def create_step1(data): errors.append("Email address previously deleted.") print("Verifying captcha") - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): errors.append("Invalid captcha.") print("Verifying password validity") - if len(data["password"]) < 6: + if len(password) < 6: errors.append("Password is too short, it must be at least 6 characters long.") print("Verifying fullname validity") - fullname = data.get("fullname") + fullname = data.fullname if fullname is None or len(fullname) < 1 or not helper.is_alphabet_space_dash(fullname): errors.append("Invalid full name.") print("Verifying company's name validity") - company_name = data.get("organizationName") + company_name = data.organizationName if company_name is None or len(company_name) < 1 or not helper.is_alphanumeric_space(company_name): errors.append("invalid organization's name") print("Verifying project's name validity") - project_name = data.get("projectName") + project_name = data.projectName if project_name is None or len(project_name) < 1: project_name = "my first project" @@ -61,7 +64,7 @@ def create_step1(data): "projectName": project_name, "data": json.dumps({"lastAnnouncementView": TimeUTC.now()}), "organizationName": company_name, - "versionNumber": environ["version_number"] + "versionNumber": config("version_number") } query = f"""\ WITH t AS ( diff --git a/api/chalicelib/core/slack.py b/api/chalicelib/core/slack.py index 411be0567..0bd715f5e 100644 --- a/api/chalicelib/core/slack.py +++ b/api/chalicelib/core/slack.py @@ -1,5 +1,5 @@ from datetime import datetime -from chalicelib.utils.helper import environ +from decouple import config from chalicelib.core.collaboration_slack import Slack @@ -10,7 +10,7 @@ def send(notification, destination): return Slack.send_text(tenant_id=notification["tenantId"], webhook_id=destination, text=notification["description"] \ - + f"\n<{environ['SITE_URL']}{notification['buttonUrl']}|{notification['buttonText']}>", + + f"\n<{config('SITE_URL')}{notification['buttonUrl']}|{notification['buttonText']}>", title=notification["title"], title_link=notification["buttonUrl"], ) @@ -23,7 +23,7 @@ def send_batch(notifications_list): if n.get("destination") not in webhookId_map: webhookId_map[n.get("destination")] = {"tenantId": n["notification"]["tenantId"], "batch": []} webhookId_map[n.get("destination")]["batch"].append({"text": n["notification"]["description"] \ - + f"\n<{environ['SITE_URL']}{n['notification']['buttonUrl']}|{n['notification']['buttonText']}>", + + f"\n<{config('SITE_URL')}{n['notification']['buttonUrl']}|{n['notification']['buttonText']}>", "title": n["notification"]["title"], "title_link": n["notification"]["buttonUrl"], "ts": datetime.now().timestamp()}) diff --git a/api/chalicelib/core/socket_ios.py b/api/chalicelib/core/socket_ios.py index d925797fe..50e4d025c 100644 --- a/api/chalicelib/core/socket_ios.py +++ b/api/chalicelib/core/socket_ios.py @@ -1,10 +1,10 @@ import requests -from chalicelib.utils.helper import environ +from decouple import config from chalicelib.core import projects def start_replay(project_id, session_id, device, os_version, mob_url): - r = requests.post(environ["IOS_MIDDLEWARE"] + "/replay", json={ + r = requests.post(config("IOS_MIDDLEWARE") + "/replay", json={ "projectId": project_id, "projectKey": projects.get_project_key(project_id), "sessionId": session_id, @@ -18,5 +18,5 @@ def start_replay(project_id, session_id, device, os_version, mob_url): print(r.text) return r.text result = r.json() - result["url"] = environ["IOS_MIDDLEWARE"] + result["url"] = config("IOS_MIDDLEWARE") return result diff --git a/api/chalicelib/core/sourcemaps.py b/api/chalicelib/core/sourcemaps.py index 01204847c..73341cb4d 100644 --- a/api/chalicelib/core/sourcemaps.py +++ b/api/chalicelib/core/sourcemaps.py @@ -1,4 +1,4 @@ -from chalicelib.utils.helper import environ +from decouple import config from chalicelib.utils import helper from chalicelib.utils import s3 @@ -17,7 +17,7 @@ def __get_key(project_id, url): def presign_share_urls(project_id, urls): results = [] for u in urls: - results.append(s3.get_presigned_url_for_sharing(bucket=environ['sourcemaps_bucket'], expires_in=120, + results.append(s3.get_presigned_url_for_sharing(bucket=config('sourcemaps_bucket'), expires_in=120, key=__get_key(project_id, u), check_exists=True)) return results @@ -26,7 +26,7 @@ def presign_share_urls(project_id, urls): def presign_upload_urls(project_id, urls): results = [] for u in urls: - results.append(s3.get_presigned_url_for_upload(bucket=environ['sourcemaps_bucket'], + results.append(s3.get_presigned_url_for_upload(bucket=config('sourcemaps_bucket'), expires_in=1800, key=__get_key(project_id, u))) return results @@ -87,7 +87,7 @@ def get_traces_group(project_id, payload): print(key) print("===============================") if key not in payloads: - file_exists = s3.exists(environ['sourcemaps_bucket'], key) + file_exists = s3.exists(config('sourcemaps_bucket'), key) all_exists = all_exists and file_exists if not file_exists: print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3") @@ -130,10 +130,10 @@ def fetch_missed_contexts(frames): if frames[i]["frame"]["absPath"] in source_cache: file = source_cache[frames[i]["frame"]["absPath"]] else: - file = s3.get_file(environ['js_cache_bucket'], get_js_cache_path(frames[i]["frame"]["absPath"])) + file = s3.get_file(config('js_cache_bucket'), get_js_cache_path(frames[i]["frame"]["absPath"])) if file is None: print( - f"File {get_js_cache_path(frames[i]['frame']['absPath'])} not found in {environ['js_cache_bucket']}") + f"File {get_js_cache_path(frames[i]['frame']['absPath'])} not found in {config('js_cache_bucket')}") source_cache[frames[i]["frame"]["absPath"]] = file if file is None: continue diff --git a/api/chalicelib/core/sourcemaps_parser.py b/api/chalicelib/core/sourcemaps_parser.py index d6e7414ba..83116aed7 100644 --- a/api/chalicelib/core/sourcemaps_parser.py +++ b/api/chalicelib/core/sourcemaps_parser.py @@ -1,6 +1,6 @@ import requests -from chalicelib.utils.helper import environ +from decouple import config def get_original_trace(key, positions): @@ -8,13 +8,13 @@ def get_original_trace(key, positions): "key": key, "positions": positions, "padding": 5, - "bucket": environ['sourcemaps_bucket'], - "S3_HOST": environ['S3_HOST'], - "S3_KEY": environ['S3_KEY'], - "S3_SECRET": environ['S3_SECRET'], - "region": environ['sessions_region'] + "bucket": config('sourcemaps_bucket'), + "S3_HOST": config('S3_HOST'), + "S3_KEY": config('S3_KEY'), + "S3_SECRET": config('S3_SECRET'), + "region": config('sessions_region') } - r = requests.post(environ["sourcemaps_reader"], json=payload) + r = requests.post(config("sourcemaps_reader"), json=payload) if r.status_code != 200: return {} diff --git a/api/chalicelib/core/tenants.py b/api/chalicelib/core/tenants.py index 054b3f5d5..db154525c 100644 --- a/api/chalicelib/core/tenants.py +++ b/api/chalicelib/core/tenants.py @@ -1,3 +1,4 @@ +import schemas from chalicelib.utils import pg_client from chalicelib.utils import helper from chalicelib.core import users @@ -62,18 +63,18 @@ def edit_client(tenant_id, changes): return helper.dict_to_camel_case(cur.fetchone()) -def update(tenant_id, user_id, data): +def update(tenant_id, user_id, data: schemas.UpdateTenantSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"error": "unauthorized"} - if "name" not in data and "optOut" not in data: + if data.name is None and data.opt_out is None: return {"errors": ["please provide 'name' of 'optOut' attribute for update"]} changes = {} - if "name" in data: - changes["name"] = data["name"] - if "optOut" in data: - changes["optOut"] = data["optOut"] + if data.name is not None and len(data.name) > 0: + changes["name"] = data.name + if data.opt_out is not None: + changes["optOut"] = data.opt_out return edit_client(tenant_id=tenant_id, changes=changes) diff --git a/api/chalicelib/core/users.py b/api/chalicelib/core/users.py index 2af50ce57..1461c6e14 100644 --- a/api/chalicelib/core/users.py +++ b/api/chalicelib/core/users.py @@ -1,16 +1,15 @@ import json import secrets -from chalicelib.core import authorizers, metadata, projects, assist -from chalicelib.core import tenants -from chalicelib.utils import dev +from decouple import config +from fastapi import BackgroundTasks + +from chalicelib.core import authorizers, metadata, projects +from chalicelib.core import tenants, assist +from chalicelib.utils import dev, email_helper from chalicelib.utils import helper from chalicelib.utils import pg_client from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ - -from chalicelib.core import tenants, assist -import secrets def __generate_invitation_token(): @@ -182,7 +181,7 @@ def update(tenant_id, user_id, changes): return helper.dict_to_camel_case(cur.fetchone()) -def create_member(tenant_id, user_id, data): +def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks): admin = get(tenant_id=tenant_id, user_id=user_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} @@ -205,18 +204,25 @@ def create_member(tenant_id, user_id, data): new_member = create_new_member(email=data["email"], invitation_token=invitation_token, admin=data.get("admin", False), name=name) new_member["invitationLink"] = __get_invitation_link(new_member.pop("invitationToken")) - helper.async_post(environ['email_basic'] % 'member_invitation', - { - "email": data["email"], - "invitationLink": new_member["invitationLink"], - "clientId": tenants.get_by_tenant_id(tenant_id)["name"], - "senderName": admin["name"] - }) + + # helper.async_post(config('email_basic') % 'member_invitation', + # { + # "email": data["email"], + # "invitationLink": new_member["invitationLink"], + # "clientId": tenants.get_by_tenant_id(tenant_id)["name"], + # "senderName": admin["name"] + # }) + background_tasks.add_task(email_helper.send_team_invitation, **{ + "recipient": data["email"], + "invitation_link": new_member["invitationLink"], + "client_id": tenants.get_by_tenant_id(tenant_id)["name"], + "sender_name": admin["name"] + }) return {"data": new_member} def __get_invitation_link(invitation_token): - return environ["SITE_URL"] + environ["invitation_link"] % invitation_token + return config("SITE_URL") + config("invitation_link") % invitation_token def allow_password_change(user_id, delta_min=10): @@ -282,12 +288,15 @@ def edit(user_id_to_update, tenant_id, changes, editor_id): admin = get(tenant_id=tenant_id, user_id=editor_id) if not admin["superAdmin"] and not admin["admin"]: return {"errors": ["unauthorized"]} - if user["superAdmin"]: - changes.pop("admin") + if editor_id == user_id_to_update: + if user["superAdmin"]: + changes.pop("admin") + elif user["admin"] != changes["admin"]: + return {"errors": ["cannot change your own role"]} keys = list(changes.keys()) for k in keys: - if k not in ALLOW_EDIT: + if k not in ALLOW_EDIT or changes[k] is None: changes.pop(k) keys = list(changes.keys()) @@ -441,7 +450,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password): c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, stack_integrations=True) c["smtp"] = helper.has_smtp() - c["iceServers"]= assist.get_ice_servers() + c["iceServers"] = assist.get_ice_servers() return { 'jwt': r.pop('jwt'), 'data': { @@ -469,7 +478,7 @@ def set_password_invitation(user_id, new_password): c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, stack_integrations=True) c["smtp"] = helper.has_smtp() - c["iceServers"]= assist.get_ice_servers() + c["iceServers"] = assist.get_ice_servers() return { 'jwt': r.pop('jwt'), 'data': { diff --git a/api/chalicelib/core/webhook.py b/api/chalicelib/core/webhook.py index fff2d4e7e..d0b3e2adc 100644 --- a/api/chalicelib/core/webhook.py +++ b/api/chalicelib/core/webhook.py @@ -1,6 +1,9 @@ +import logging + +import requests + from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC -import requests def get_by_id(webhook_id): @@ -114,7 +117,7 @@ def add(tenant_id, endpoint, auth_header=None, webhook_type='webhook', name="", def add_edit(tenant_id, data, replace_none=None): - if "webhookId" in data: + if data.get("webhookId") is not None: return update(tenant_id=tenant_id, webhook_id=data["webhookId"], changes={"endpoint": data["endpoint"], "authHeader": None if "authHeader" not in data else data["authHeader"], @@ -144,28 +147,24 @@ def trigger_batch(data_list): for w in data_list: if w["destination"] not in webhooks_map: webhooks_map[w["destination"]] = get_by_id(webhook_id=w["destination"]) - __trigger(hook=webhooks_map[w["destination"]], data=w["data"]) + if webhooks_map[w["destination"]] is None: + logging.error(f"!!Error webhook not found: webhook_id={w['destination']}") + else: + __trigger(hook=webhooks_map[w["destination"]], data=w["data"]) def __trigger(hook, data): - if hook["type"] == 'webhook': + if hook is not None and hook["type"] == 'webhook': headers = {} if hook["authHeader"] is not None and len(hook["authHeader"]) > 0: headers = {"Authorization": hook["authHeader"]} - # body = { - # "webhookId": hook["id"], - # "createdAt": TimeUTC.now(), - # "event": event, - # "data": data - # } - r = requests.post(url=hook["endpoint"], json=data, headers=headers) if r.status_code != 200: - print("=======> webhook: something went wrong") - print(r) - print(r.status_code) - print(r.text) + logging.error("=======> webhook: something went wrong") + logging.error(r) + logging.error(r.status_code) + logging.error(r.text) return response = None try: @@ -174,5 +173,5 @@ def __trigger(hook, data): try: response = r.text except: - print("no response found") + logging.info("no response found") return response diff --git a/api/chalicelib/core/weekly_report.py b/api/chalicelib/core/weekly_report.py index e0e6e0fa5..3d857ccc0 100644 --- a/api/chalicelib/core/weekly_report.py +++ b/api/chalicelib/core/weekly_report.py @@ -1,6 +1,5 @@ -from chalicelib.utils import pg_client, helper +from chalicelib.utils import pg_client, helper, email_helper from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ from chalicelib.utils.helper import get_issue_title LOWEST_BAR_VALUE = 3 @@ -30,7 +29,7 @@ def edit_config(user_id, weekly_report): def cron(): - with pg_client.PostgresClient() as cur: + with pg_client.PostgresClient(long_query=True) as cur: params = {"3_days_ago": TimeUTC.midnight(delta_days=-3), "1_week_ago": TimeUTC.midnight(delta_days=-7), "2_week_ago": TimeUTC.midnight(delta_days=-14), @@ -83,6 +82,7 @@ def cron(): ) AS month_1_issues ON (TRUE) WHERE projects.deleted_at ISNULL;"""), params) projects_data = cur.fetchall() + emails_to_send = [] for p in projects_data: params["project_id"] = p["project_id"] print(f"checking {p['project_name']} : {p['project_id']}") @@ -227,13 +227,14 @@ def cron(): if j["type"] in keep_types: keep.append(j) i["partition"] = keep - helper.async_post(environ['email_funnel'] % "weekly_report2", - {"email": p.pop("emails"), - "data": { - **p, - "days_partition": days_partition, - "issues_by_type": issues_by_type, - "issues_breakdown_by_day": issues_breakdown_by_day, - "issues_breakdown_list": issues_breakdown_list - } - }) + emails_to_send.append({"email": p.pop("emails"), + "data": { + **p, + "days_partition": days_partition, + "issues_by_type": issues_by_type, + "issues_breakdown_by_day": issues_breakdown_by_day, + "issues_breakdown_list": issues_breakdown_list + }}) + print(f">>> Sending weekly report to {len(emails_to_send)} email-group") + for e in emails_to_send: + email_helper.weekly_report2(recipients=e["email"], data=e["data"]) diff --git a/api/chalicelib/utils/TimeUTC.py b/api/chalicelib/utils/TimeUTC.py index c95359a00..bac7a027f 100644 --- a/api/chalicelib/utils/TimeUTC.py +++ b/api/chalicelib/utils/TimeUTC.py @@ -1,6 +1,9 @@ -from datetime import datetime, timedelta from calendar import monthrange -import pytz +from datetime import datetime, timedelta + +import zoneinfo + +UTC_ZI = zoneinfo.ZoneInfo("UTC") class TimeUTC: @@ -9,20 +12,20 @@ class TimeUTC: MS_DAY = MS_HOUR * 24 MS_WEEK = MS_DAY * 7 MS_MONTH = MS_DAY * 30 - MS_MONTH_TRUE = monthrange(datetime.now(pytz.utc).astimezone(pytz.utc).year, - datetime.now(pytz.utc).astimezone(pytz.utc).month)[1] * MS_DAY + MS_MONTH_TRUE = monthrange(datetime.now(UTC_ZI).astimezone(UTC_ZI).year, + datetime.now(UTC_ZI).astimezone(UTC_ZI).month)[1] * MS_DAY RANGE_VALUE = None @staticmethod def midnight(delta_days=0): - return int((datetime.now(pytz.utc) + timedelta(delta_days)) \ + return int((datetime.now(UTC_ZI) + timedelta(delta_days)) \ .replace(hour=0, minute=0, second=0, microsecond=0) \ - .astimezone(pytz.utc).timestamp() * 1000) + .astimezone(UTC_ZI).timestamp() * 1000) @staticmethod def __now(delta_days=0, delta_minutes=0, delta_seconds=0): - return (datetime.now(pytz.utc) + timedelta(days=delta_days, minutes=delta_minutes, seconds=delta_seconds)) \ - .astimezone(pytz.utc) + return (datetime.now(UTC_ZI) + timedelta(days=delta_days, minutes=delta_minutes, seconds=delta_seconds)) \ + .astimezone(UTC_ZI) @staticmethod def now(delta_days=0, delta_minutes=0, delta_seconds=0): @@ -32,28 +35,28 @@ class TimeUTC: @staticmethod def month_start(delta_month=0): month = TimeUTC.__now().month + delta_month - return int(datetime.now(pytz.utc) \ + return int(datetime.now(UTC_ZI) \ .replace(year=TimeUTC.__now().year + ((-12 + month) // 12 if month % 12 <= 0 else month // 12), month=12 + month % 12 if month % 12 <= 0 else month % 12 if month > 12 else month, day=1, hour=0, minute=0, second=0, microsecond=0) \ - .astimezone(pytz.utc).timestamp() * 1000) + .astimezone(UTC_ZI).timestamp() * 1000) @staticmethod def year_start(delta_year=0): - return int(datetime.now(pytz.utc) \ + return int(datetime.now(UTC_ZI) \ .replace(year=TimeUTC.__now().year + delta_year, month=1, day=1, hour=0, minute=0, second=0, microsecond=0) \ - .astimezone(pytz.utc).timestamp() * 1000) + .astimezone(UTC_ZI).timestamp() * 1000) @staticmethod def custom(year=None, month=None, day=None, hour=None, minute=None): args = locals() - return int(datetime.now(pytz.utc) \ + return int(datetime.now(UTC_ZI) \ .replace(**{key: args[key] for key in args if args[key] is not None}, second=0, microsecond=0) \ - .astimezone(pytz.utc).timestamp() * 1000) + .astimezone(UTC_ZI).timestamp() * 1000) @staticmethod def future(delta_day, delta_hour, delta_minute, minutes_period=None, start=None): @@ -78,7 +81,7 @@ class TimeUTC: @staticmethod def from_ms_timestamp(ts): - return datetime.fromtimestamp(ts // 1000, pytz.utc) + return datetime.fromtimestamp(ts // 1000, UTC_ZI) @staticmethod def to_human_readable(ts, fmt='%Y-%m-%d %H:%M:%S UTC'): @@ -113,14 +116,14 @@ class TimeUTC: @staticmethod def get_utc_offset(): - return int((datetime.now(pytz.utc).now() - datetime.now(pytz.utc).replace(tzinfo=None)).total_seconds() * 1000) + return int((datetime.now(UTC_ZI).now() - datetime.now(UTC_ZI).replace(tzinfo=None)).total_seconds() * 1000) @staticmethod def trunc_day(timestamp): dt = TimeUTC.from_ms_timestamp(timestamp) return TimeUTC.datetime_to_timestamp(dt .replace(hour=0, minute=0, second=0, microsecond=0) - .astimezone(pytz.utc)) + .astimezone(UTC_ZI)) @staticmethod def trunc_week(timestamp): @@ -128,4 +131,4 @@ class TimeUTC: start = dt - timedelta(days=dt.weekday()) return TimeUTC.datetime_to_timestamp(start .replace(hour=0, minute=0, second=0, microsecond=0) - .astimezone(pytz.utc)) + .astimezone(UTC_ZI)) diff --git a/api/chalicelib/utils/captcha.py b/api/chalicelib/utils/captcha.py index f499a6da9..741031071 100644 --- a/api/chalicelib/utils/captcha.py +++ b/api/chalicelib/utils/captcha.py @@ -1,10 +1,10 @@ -from chalicelib.utils.helper import environ as env +from decouple import config import requests from chalicelib.utils import helper def __get_captcha_config(): - return env["captcha_server"], env["captcha_key"] + return config("captcha_server"), config("captcha_key") def is_valid(response): diff --git a/api/chalicelib/utils/email_handler.py b/api/chalicelib/utils/email_handler.py index f7a7fd61b..66b8a3afd 100644 --- a/api/chalicelib/utils/email_handler.py +++ b/api/chalicelib/utils/email_handler.py @@ -6,7 +6,7 @@ from email.mime.multipart import MIMEMultipart from email.mime.text import MIMEText from chalicelib.utils import helper, smtp -from chalicelib.utils.helper import environ +from decouple import config def __get_subject(subject): @@ -16,7 +16,7 @@ def __get_subject(subject): def __get_html_from_file(source, formatting_variables): if formatting_variables is None: formatting_variables = {} - formatting_variables["frontend_url"] = environ["SITE_URL"] + formatting_variables["frontend_url"] = config("SITE_URL") with open(source, "r") as body: BODY_HTML = body.read() if formatting_variables is not None and len(formatting_variables.keys()) > 0: @@ -50,7 +50,7 @@ def send_html(BODY_HTML, SUBJECT, recipient, bcc=None): recipient = [recipient] msg = MIMEMultipart() msg['Subject'] = Header(__get_subject(SUBJECT), 'utf-8') - msg['From'] = environ["EMAIL_FROM"] + msg['From'] = config("EMAIL_FROM") msg['To'] = "" body = MIMEText(BODY_HTML.encode('utf-8'), 'html', "utf-8") msg.attach(body) @@ -75,7 +75,7 @@ def send_text(recipients, text, subject): with smtp.SMTPClient() as s: msg = MIMEMultipart() msg['Subject'] = Header(__get_subject(subject), 'utf-8') - msg['From'] = environ["EMAIL_FROM"] + msg['From'] = config("EMAIL_FROM") msg['To'] = ", ".join(recipients) body = MIMEText(text) msg.attach(body) diff --git a/api/chalicelib/utils/helper.py b/api/chalicelib/utils/helper.py index 1a743a57c..f8ce9fab5 100644 --- a/api/chalicelib/utils/helper.py +++ b/api/chalicelib/utils/helper.py @@ -1,22 +1,23 @@ import random import re import string +from typing import Union import math import requests -local_prefix = 'local-' -from os import environ, path +import schemas -import json +local_prefix = 'local-' +from decouple import config def get_version_number(): - return environ["version"] + return config("version") def get_stage_name(): - stage = environ["stage"] + stage = config("stage") return stage[len(local_prefix):] if stage.startswith(local_prefix) else stage @@ -33,7 +34,7 @@ def is_onprem(): def is_local(): - return environ["stage"].startswith(local_prefix) + return config("stage").startswith(local_prefix) def generate_salt(): @@ -135,16 +136,16 @@ def __sbool_to_bool(value): def allow_captcha(): - return environ.get("captcha_server") is not None and environ.get("captcha_key") is not None \ - and len(environ["captcha_server"]) > 0 and len(environ["captcha_key"]) > 0 + return config("captcha_server", default=None) is not None and config("captcha_key", default=None) is not None \ + and len(config("captcha_server")) > 0 and len(config("captcha_key")) > 0 def allow_sentry(): - return environ.get("sentryURL") is not None and len(environ["sentryURL"]) > 0 + return config("sentryURL", default=None) is not None and len(config("sentryURL")) > 0 def async_post(endpoint, data): - data["auth"] = environ["async_Token"] + data["auth"] = config("async_Token") try: requests.post(endpoint, timeout=1, json=data) except requests.exceptions.ReadTimeout: @@ -168,25 +169,56 @@ def string_to_sql_like(value): def string_to_sql_like_with_op(value, op): - if isinstance(value, list) and len(value) > 0: - _value = value[0] + if isinstance(value, list): + r = [] + for v in value: + r.append(string_to_sql_like_with_op(v, op)) + return r else: _value = value - if _value is None: - return _value - if op.lower() != 'ilike': - return _value.replace("%", "%%") - _value = _value.replace("*", "%") - if _value.startswith("^"): - _value = _value[1:] - elif not _value.startswith("%"): - _value = '%' + _value + if _value is None: + return _value + if op.upper() != 'ILIKE': + return _value.replace("%", "%%") + _value = _value.replace("*", "%") + if _value.startswith("^"): + _value = _value[1:] + elif not _value.startswith("%"): + _value = '%' + _value - if _value.endswith("$"): - _value = _value[:-1] - elif not _value.endswith("%"): - _value = _value + '%' - return _value.replace("%", "%%") + if _value.endswith("$"): + _value = _value[:-1] + elif not _value.endswith("%"): + _value = _value + '%' + return _value.replace("%", "%%") + + +likable_operators = [schemas.SearchEventOperator._starts_with, schemas.SearchEventOperator._ends_with, + schemas.SearchEventOperator._contains, schemas.SearchEventOperator._not_contains] + + +def is_likable(op: schemas.SearchEventOperator): + return op in likable_operators + + +def values_for_operator(value: Union[str, list], op: schemas.SearchEventOperator): + if not is_likable(op): + return value + if isinstance(value, list): + r = [] + for v in value: + r.append(values_for_operator(v, op)) + return r + else: + if value is None: + return value + if op == schemas.SearchEventOperator._starts_with: + return value + '%' + elif op == schemas.SearchEventOperator._ends_with: + return '%' + value + elif op == schemas.SearchEventOperator._contains: + return '%' + value + '%' + return value def is_valid_email(email): @@ -328,46 +360,9 @@ def __decimal_limit(value, limit): return value / factor -def is_free_open_source_edition(): - return __sbool_to_bool(environ.get("isFOS")) - - -def is_enterprise_edition(): - return __sbool_to_bool(environ.get("isEE")) - - -stag_config_file = f"chalicelib/.configs/{environ['stage']}.json" -if not path.isfile(stag_config_file): - print("!! stage config file not found, using .chalice/config.json only") -else: - print("!! stage config file found, merging with priority to .chalice/config.json") - with open(stag_config_file) as json_file: - config = json.load(json_file) - environ = {**config, **environ} - -if (is_free_open_source_edition() or is_enterprise_edition()) and environ.get("config_file"): - if not path.isfile(environ.get("config_file")): - print("!! config file not found, using default environment") - else: - with open(environ.get("config_file")) as json_file: - config = json.load(json_file) - environ = {**environ, **config} - - -def get_internal_project_id(project_id64): - if project_id64 < 0x10000000000000 or project_id64 >= 0x20000000000000: - return None - - project_id64 = (project_id64 - 0x10000000000000) * 4212451012670231 & 0xfffffffffffff - if project_id64 > 0xffffffff: - return None - project_id = int(project_id64) - return project_id - - def has_smtp(): - return environ["EMAIL_HOST"] is not None and len(environ["EMAIL_HOST"]) > 0 + return config("EMAIL_HOST") is not None and len(config("EMAIL_HOST")) > 0 def get_edition(): - return "foss" if is_free_open_source_edition() else "ee" + return "ee" if "ee" in config("ENTERPRISE_BUILD", default="").lower() else "foss" diff --git a/api/chalicelib/utils/html/alert_notification.html b/api/chalicelib/utils/html/alert_notification.html index 881d6ffb0..2d63341f3 100644 --- a/api/chalicelib/utils/html/alert_notification.html +++ b/api/chalicelib/utils/html/alert_notification.html @@ -38,7 +38,7 @@

- Sent with ♡ from OpenReplay © 2021 - All rights reserved.

+ Sent with ♡ from OpenReplay © 2022 - All rights reserved.

https://openreplay.com/

diff --git a/api/chalicelib/utils/pg_client.py b/api/chalicelib/utils/pg_client.py index c54e514ec..c598d8971 100644 --- a/api/chalicelib/utils/pg_client.py +++ b/api/chalicelib/utils/pg_client.py @@ -1,15 +1,17 @@ +from threading import Semaphore + import psycopg2 import psycopg2.extras -from chalicelib.utils.helper import environ - -PG_CONFIG = {"host": environ["pg_host"], - "database": environ["pg_dbname"], - "user": environ["pg_user"], - "password": environ["pg_password"], - "port": int(environ["pg_port"])} - +from decouple import config from psycopg2 import pool -from threading import Semaphore + +PG_CONFIG = {"host": config("pg_host"), + "database": config("pg_dbname"), + "user": config("pg_user"), + "password": config("pg_password"), + "port": config("pg_port", cast=int)} +if config("pg_timeout", cast=int, default=0) > 0: + PG_CONFIG["options"] = f"-c statement_timeout={config('pg_timeout', cast=int) * 1000}" class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool): @@ -19,28 +21,51 @@ class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool): def getconn(self, *args, **kwargs): self._semaphore.acquire() - return super().getconn(*args, **kwargs) + try: + return super().getconn(*args, **kwargs) + except psycopg2.pool.PoolError as e: + if str(e) == "connection pool is closed": + make_pool() + raise e def putconn(self, *args, **kwargs): super().putconn(*args, **kwargs) self._semaphore.release() -try: - postgreSQL_pool = ORThreadedConnectionPool(50, 100, **PG_CONFIG) - if (postgreSQL_pool): - print("Connection pool created successfully") -except (Exception, psycopg2.DatabaseError) as error: - print("Error while connecting to PostgreSQL", error) - raise error +postgreSQL_pool: ORThreadedConnectionPool = None + + +def make_pool(): + global postgreSQL_pool + if postgreSQL_pool is not None: + try: + postgreSQL_pool.closeall() + except (Exception, psycopg2.DatabaseError) as error: + print("Error while closing all connexions to PostgreSQL", error) + try: + postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20), 100, **PG_CONFIG) + if (postgreSQL_pool): + print("Connection pool created successfully") + except (Exception, psycopg2.DatabaseError) as error: + print("Error while connecting to PostgreSQL", error) + raise error + + +make_pool() class PostgresClient: connection = None cursor = None + long_query = False - def __init__(self): - self.connection = postgreSQL_pool.getconn() + def __init__(self, long_query=False): + self.long_query = long_query + if long_query: + self.connection = psycopg2.connect(**PG_CONFIG) + else: + self.connection = postgreSQL_pool.getconn() def __enter__(self): if self.cursor is None: @@ -51,11 +76,18 @@ class PostgresClient: try: self.connection.commit() self.cursor.close() + if self.long_query: + self.connection.close() except Exception as error: print("Error while committing/closing PG-connection", error) - raise error + if str(error) == "connection already closed": + print("Recreating the connexion pool") + make_pool() + else: + raise error finally: - postgreSQL_pool.putconn(self.connection) + if not self.long_query: + postgreSQL_pool.putconn(self.connection) def close(): diff --git a/api/chalicelib/utils/s3.py b/api/chalicelib/utils/s3.py index 176a19fa2..67e1eafd2 100644 --- a/api/chalicelib/utils/s3.py +++ b/api/chalicelib/utils/s3.py @@ -1,24 +1,24 @@ from botocore.exceptions import ClientError -from chalicelib.utils.helper import environ +from decouple import config from datetime import datetime, timedelta import boto3 import botocore from botocore.client import Config -client = boto3.client('s3', endpoint_url=environ["S3_HOST"], - aws_access_key_id=environ["S3_KEY"], - aws_secret_access_key=environ["S3_SECRET"], +client = boto3.client('s3', endpoint_url=config("S3_HOST"), + aws_access_key_id=config("S3_KEY"), + aws_secret_access_key=config("S3_SECRET"), config=Config(signature_version='s3v4'), - region_name=environ["sessions_region"]) + region_name=config("sessions_region")) def exists(bucket, key): try: - boto3.resource('s3', endpoint_url=environ["S3_HOST"], - aws_access_key_id=environ["S3_KEY"], - aws_secret_access_key=environ["S3_SECRET"], + boto3.resource('s3', endpoint_url=config("S3_HOST"), + aws_access_key_id=config("S3_KEY"), + aws_secret_access_key=config("S3_SECRET"), config=Config(signature_version='s3v4'), - region_name=environ["sessions_region"]) \ + region_name=config("sessions_region")) \ .Object(bucket, key).load() except botocore.exceptions.ClientError as e: if e.response['Error']['Code'] == "404": @@ -73,21 +73,21 @@ def get_file(source_bucket, source_key): def rename(source_bucket, source_key, target_bucket, target_key): - s3 = boto3.resource('s3', endpoint_url=environ["S3_HOST"], - aws_access_key_id=environ["S3_KEY"], - aws_secret_access_key=environ["S3_SECRET"], + s3 = boto3.resource('s3', endpoint_url=config("S3_HOST"), + aws_access_key_id=config("S3_KEY"), + aws_secret_access_key=config("S3_SECRET"), config=Config(signature_version='s3v4'), - region_name=environ["sessions_region"]) + region_name=config("sessions_region")) s3.Object(target_bucket, target_key).copy_from(CopySource=f'{source_bucket}/{source_key}') s3.Object(source_bucket, source_key).delete() def schedule_for_deletion(bucket, key): - s3 = boto3.resource('s3', endpoint_url=environ["S3_HOST"], - aws_access_key_id=environ["S3_KEY"], - aws_secret_access_key=environ["S3_SECRET"], + s3 = boto3.resource('s3', endpoint_url=config("S3_HOST"), + aws_access_key_id=config("S3_KEY"), + aws_secret_access_key=config("S3_SECRET"), config=Config(signature_version='s3v4'), - region_name=environ["sessions_region"]) + region_name=config("sessions_region")) s3_object = s3.Object(bucket, key) s3_object.copy_from(CopySource={'Bucket': bucket, 'Key': key}, Expires=datetime.now() + timedelta(days=7), diff --git a/api/chalicelib/utils/s3urls.py b/api/chalicelib/utils/s3urls.py deleted file mode 100644 index bc0b39bea..000000000 --- a/api/chalicelib/utils/s3urls.py +++ /dev/null @@ -1,120 +0,0 @@ -import re -from urllib.parse import urlparse - - -def style(url): - """ Determine 'style' of a given S3 url - - >>> style("s3://my-bucket/my-key/") - 's3' - - >>> style("s3://user@my-bucket/my-key/") - 's3-credential' - - >>> style("https://my-bucket.s3.amazonaws.com/my-key/") - 'bucket-in-netloc' - - >>> style("https://s3.amazonaws.com/my-bucket/my-key/") - 'bucket-in-path' - """ - o = urlparse(url) - if o.scheme == 's3': - if '@' in o.netloc: - return 's3-credential' - else: - return 's3' - - if re.search(r'^s3[.-](\w{2}-\w{4,9}-\d\.)?amazonaws\.com', o.netloc): - return 'bucket-in-path' - - if re.search(r'\.s3[.-](\w{2}-\w{4,9}-\d\.)?amazonaws\.com', o.netloc): - return 'bucket-in-netloc' - - raise ValueError(f'Unknown url style: {url}') - - -def build_url(url_type, bucket, key=None, region=None, credential_name=None): - """ Construct an S3 URL - - Args: - url_type: one of 's3', 's3-credential', 'bucket-in-path', 'bucket-in-netloc' - bucket: S3 bucket name - key: Key within bucket (optional) - region: S3 region name (optional) - credential_name: user/credential name to use in S3 scheme url (optional) - - Returns - (string) S3 URL - """ - if url_type == 's3': - credential = f'{credential_name}@' if credential_name else "" - return f's3://{credential}{bucket}/{key or ""}' - - if url_type == 'bucket-in-path': - return f'https://s3{"-" if region else ""}{region or ""}.amazonaws.com/{bucket}/{key}' - - if url_type == 'bucket-in-netloc': - return f'https://{bucket}.s3.amazonaws.com/{key}' - - raise ValueError(f'Invalid url_type: {url_type}') - - -def parse_s3_credential_url(url): - """ Parse S3 scheme url containing a user/credential name - - >>> parse_s3_url("s3://user@my-bucket/my-key") - {'bucket': 'my-bucket', 'key': 'my-key/', 'credential_name': 'user'} - """ - o = urlparse(url) - cred_name, bucket = o.netloc.split('@') - key = o.path if o.path[0] != '/' else o.path[1:] - return {'bucket': bucket, 'key': key, 'credential_name': cred_name} - - -def parse_s3_url(url): - """ Parse S3 scheme url - - >>> parse_s3_url("s3://my-bucket/my-key") - {'bucket': 'my-bucket', 'key': 'my-key/'} - """ - o = urlparse(url) - bucket = o.netloc - key = o.path if o.path[0] != '/' else o.path[1:] - return {'bucket': bucket, 'key': key} - - -def parse_bucket_in_path_url(url): - """ Parse url with bucket name path - - >>> parse_bucket_in_path_url("https://s3-eu-west-1.amazonaws.com/my-bucket/my-key/") - {'bucket': 'my-bucket', 'key': 'my-key/'} - """ - path = urlparse(url).path - bucket = path.split('/')[1] - key = '/'.join(path.split('/')[2:]) - return {'bucket': bucket, 'key': key} - - -def parse_bucket_in_netloc_url(url): - """ Parse url with bucket name in host/netloc - - >>> parse_bucket_in_netloc_url("https://my-bucket.s3.amazonaws.com/my-key/") - {'bucket': 'my-bucket', 'key': 'my-key/'} - """ - o = urlparse(url) - bucket = o.netloc.split('.')[0] - key = o.path if o.path[0] != '/' else o.path[1:] - return {'bucket': bucket, 'key': key} - - -def parse_url(url): - url_style = style(url) - - if url_style == 's3-credential': - return parse_s3_credential_url(url) - if url_style == 's3': - return parse_s3_url(url) - if url_style == 'bucket-in-path': - return parse_bucket_in_path_url(url) - if url_style == 'bucket-in-netloc': - return parse_bucket_in_netloc_url(url) diff --git a/api/chalicelib/utils/smtp.py b/api/chalicelib/utils/smtp.py index 90c95693b..3615ca71a 100644 --- a/api/chalicelib/utils/smtp.py +++ b/api/chalicelib/utils/smtp.py @@ -1,5 +1,5 @@ import smtplib -from chalicelib.utils.helper import environ +from decouple import config class EmptySMTP: @@ -11,26 +11,26 @@ class SMTPClient: server = None def __init__(self): - if environ["EMAIL_HOST"] is None or len(environ["EMAIL_HOST"]) == 0: + if config("EMAIL_HOST") is None or len(config("EMAIL_HOST")) == 0: return - elif environ["EMAIL_USE_SSL"].lower() == "false": - self.server = smtplib.SMTP(host=environ["EMAIL_HOST"], port=int(environ["EMAIL_PORT"])) + elif config("EMAIL_USE_SSL").lower() == "false": + self.server = smtplib.SMTP(host=config("EMAIL_HOST"), port=int(config("EMAIL_PORT"))) else: - if len(environ["EMAIL_SSL_KEY"]) == 0 or len(environ["EMAIL_SSL_CERT"]) == 0: - self.server = smtplib.SMTP_SSL(host=environ["EMAIL_HOST"], port=int(environ["EMAIL_PORT"])) + if len(config("EMAIL_SSL_KEY")) == 0 or len(config("EMAIL_SSL_CERT")) == 0: + self.server = smtplib.SMTP_SSL(host=config("EMAIL_HOST"), port=int(config("EMAIL_PORT"))) else: - self.server = smtplib.SMTP_SSL(host=environ["EMAIL_HOST"], port=int(environ["EMAIL_PORT"]), - keyfile=environ["EMAIL_SSL_KEY"], certfile=environ["EMAIL_SSL_CERT"]) + self.server = smtplib.SMTP_SSL(host=config("EMAIL_HOST"), port=int(config("EMAIL_PORT")), + keyfile=config("EMAIL_SSL_KEY"), certfile=config("EMAIL_SSL_CERT")) def __enter__(self): if self.server is None: return EmptySMTP() self.server.ehlo() - if environ["EMAIL_USE_SSL"].lower() == "false" and environ["EMAIL_USE_TLS"].lower() == "true": + if config("EMAIL_USE_SSL").lower() == "false" and config("EMAIL_USE_TLS").lower() == "true": self.server.starttls() # stmplib docs recommend calling ehlo() before & after starttls() self.server.ehlo() - self.server.login(user=environ["EMAIL_USER"], password=environ["EMAIL_PASSWORD"]) + self.server.login(user=config("EMAIL_USER"), password=config("EMAIL_PASSWORD")) return self.server def __exit__(self, *args): diff --git a/api/entrypoint.sh b/api/entrypoint.sh index 4a8c790c8..60fefb5c0 100755 --- a/api/entrypoint.sh +++ b/api/entrypoint.sh @@ -1,3 +1,2 @@ #!/bin/bash -python env_handler.py -chalice local --no-autoreload --host 0.0.0.0 --stage ${ENTERPRISE_BUILD} +uvicorn app:app --host 0.0.0.0 diff --git a/api/env_handler.py b/api/env_handler.py deleted file mode 100644 index d56dd17c8..000000000 --- a/api/env_handler.py +++ /dev/null @@ -1,13 +0,0 @@ -from os import environ -import json - -with open('.chalice/config.json') as json_file: - data = json.load(json_file) - stages = data.get("stages", {}) - for s in stages.keys(): - if environ.get("SITE_URL") is None or environ["SITE_URL"] == '': - environ["SITE_URL"] = environ.get("S3_HOST", "") - data["stages"][s]["environment_variables"] = {**stages[s].get("environment_variables", {}), **environ} -with open('.chalice/config.json', 'w') as outfile: - json.dump(data, outfile, indent=2, sort_keys=True) - print("override config.json") diff --git a/api/or_dependencies.py b/api/or_dependencies.py new file mode 100644 index 000000000..7eee72c49 --- /dev/null +++ b/api/or_dependencies.py @@ -0,0 +1,43 @@ +import json +from typing import Callable + +from fastapi.routing import APIRoute +from starlette import status +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import Response, JSONResponse + +import schemas + + +async def OR_context(request: Request) -> schemas.CurrentContext: + if hasattr(request.state, "currentContext"): + return request.state.currentContext + else: + raise Exception("currentContext not found") + + +class ORRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + if e.status_code // 100 == 4: + return JSONResponse(content={"errors": [e.detail]}, status_code=e.status_code) + else: + raise e + + if isinstance(response, JSONResponse): + response: JSONResponse = response + body = json.loads(response.body.decode('utf8')) + if response.status_code == 200 and body is not None and body.get("errors") is not None: + if "not found" in body["errors"][0]: + response.status_code = status.HTTP_404_NOT_FOUND + else: + response.status_code = status.HTTP_400_BAD_REQUEST + return response + + return custom_route_handler diff --git a/api/requirements.txt b/api/requirements.txt index f211cec45..0a239790c 100644 --- a/api/requirements.txt +++ b/api/requirements.txt @@ -3,9 +3,13 @@ urllib3==1.26.6 boto3==1.16.1 pyjwt==1.7.1 psycopg2-binary==2.8.6 -pytz==2020.1 -sentry-sdk==0.19.1 elasticsearch==7.9.1 jira==2.0.0 -schedule==1.1.0 -croniter==1.0.12 \ No newline at end of file + + + +fastapi==0.70.1 +uvicorn[standard]==0.16.0 +python-decouple==3.5 +pydantic[email]==1.8.2 +apscheduler==3.8.1 \ No newline at end of file diff --git a/api/chalicelib/blueprints/app/__init__.py b/api/routers/__init__.py similarity index 100% rename from api/chalicelib/blueprints/app/__init__.py rename to api/routers/__init__.py diff --git a/api/chalicelib/blueprints/subs/__init__.py b/api/routers/app/__init__.py similarity index 100% rename from api/chalicelib/blueprints/subs/__init__.py rename to api/routers/app/__init__.py diff --git a/api/routers/app/v1_api.py b/api/routers/app/v1_api.py new file mode 100644 index 000000000..1dcc7eb7d --- /dev/null +++ b/api/routers/app/v1_api.py @@ -0,0 +1,120 @@ +from fastapi import Depends, Body + +import schemas +from chalicelib.core import sessions, events, jobs, projects +from chalicelib.utils.TimeUTC import TimeUTC +from or_dependencies import OR_context +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@app_apikey.get('/v1/{projectKey}/users/{userId}/sessions', tags=["api"]) +def get_user_sessions(projectKey: str, userId: str, start_date: int = None, end_date: int = None): + projectId = projects.get_internal_project_id(projectKey) + + return { + 'data': sessions.get_user_sessions( + project_id=projectId, + user_id=userId, + start_date=start_date, + end_date=end_date + ) + } + + +@app_apikey.get('/v1/{projectKey}/sessions/{sessionId}/events', tags=["api"]) +def get_session_events(projectKey: str, sessionId: int): + projectId = projects.get_internal_project_id(projectKey) + return { + 'data': events.get_by_sessionId2_pg( + project_id=projectId, + session_id=sessionId + ) + } + + +@app_apikey.get('/v1/{projectKey}/users/{userId}', tags=["api"]) +def get_user_details(projectKey: str, userId: str): + projectId = projects.get_internal_project_id(projectKey) + return { + 'data': sessions.get_session_user( + project_id=projectId, + user_id=userId + ) + } + + +@app_apikey.delete('/v1/{projectKey}/users/{userId}', tags=["api"]) +def schedule_to_delete_user_data(projectKey: str, userId: str): + projectId = projects.get_internal_project_id(projectKey) + data = {"action": "delete_user_data", + "reference_id": userId, + "description": f"Delete user sessions of userId = {userId}", + "start_at": TimeUTC.to_human_readable(TimeUTC.midnight(1))} + record = jobs.create(project_id=projectId, data=data) + return { + 'data': record + } + + +@app_apikey.get('/v1/{projectKey}/jobs', tags=["api"]) +def get_jobs(projectKey: str): + projectId = projects.get_internal_project_id(projectKey) + return { + 'data': jobs.get_all(project_id=projectId) + } + + +@app_apikey.get('/v1/{projectKey}/jobs/{jobId}', tags=["api"]) +def get_job(projectKey: str, jobId: int): + return { + 'data': jobs.get(job_id=jobId) + } + + +@app_apikey.delete('/v1/{projectKey}/jobs/{jobId}', tags=["api"]) +def cancel_job(projectKey: str, jobId: int): + job = jobs.get(job_id=jobId) + job_not_found = len(job.keys()) == 0 + + if job_not_found: + return {"errors": ["Job not found."]} + if job["status"] == jobs.JobStatus.COMPLETED or job["status"] == jobs.JobStatus.CANCELLED: + return {"errors": ["The request job has already been canceled/completed."]} + + job["status"] = "cancelled" + return { + 'data': jobs.update(job_id=jobId, job=job) + } + + +@app_apikey.get('/v1/projects', tags=["api"]) +def get_projects(context: schemas.CurrentContext = Depends(OR_context)): + records = projects.get_projects(tenant_id=context.tenant_id) + for record in records: + del record['projectId'] + + return { + 'data': records + } + + +@app_apikey.get('/v1/projects/{projectKey}', tags=["api"]) +def get_project(projectKey: str, context: schemas.CurrentContext = Depends(OR_context)): + return { + 'data': projects.get_project_by_key(tenant_id=context.tenant_id, project_key=projectKey) + } + + +@app_apikey.post('/v1/projects', tags=["api"]) +def create_project(data: schemas.CreateProjectSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + record = projects.create( + tenant_id=context.tenant_id, + user_id=None, + data=data, + skip_authorization=True + ) + del record['data']['projectId'] + return record diff --git a/api/routers/base.py b/api/routers/base.py new file mode 100644 index 000000000..ff7fe165f --- /dev/null +++ b/api/routers/base.py @@ -0,0 +1,12 @@ +from fastapi import APIRouter, Depends + +from auth.auth_apikey import APIKeyAuth +from auth.auth_jwt import JWTAuth +from or_dependencies import ORRoute + + +def get_routers() -> (APIRouter, APIRouter, APIRouter): + public_app = APIRouter(route_class=ORRoute) + app = APIRouter(dependencies=[Depends(JWTAuth())], route_class=ORRoute) + app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth())], route_class=ORRoute) + return public_app, app, app_apikey diff --git a/api/routers/core.py b/api/routers/core.py new file mode 100644 index 000000000..792e4c739 --- /dev/null +++ b/api/routers/core.py @@ -0,0 +1,1144 @@ +from typing import Union + +from decouple import config +from fastapi import Depends, Body + +import schemas +from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \ + sessions_metas, alerts, funnels, issues, integrations_manager, metadata, \ + log_tool_elasticsearch, log_tool_datadog, \ + log_tool_stackdriver, reset_password, sessions_favorite_viewed, \ + log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \ + log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \ + assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \ + custom_metrics, saved_search +from chalicelib.core.collaboration_slack import Slack +from chalicelib.utils import email_helper +from chalicelib.utils.TimeUTC import TimeUTC +from or_dependencies import OR_context +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@app.get('/{projectId}/sessions2/favorite', tags=["sessions"]) +def get_favorite_sessions(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return { + 'data': sessions.get_favorite_sessions(project_id=projectId, user_id=context.user_id, include_viewed=True) + } + + +@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"]) +def get_session2(projectId: int, sessionId: int, context: schemas.CurrentContext = Depends(OR_context)): + data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True, user_id=context.user_id, + include_fav_viewed=True, group_metadata=True) + if data is None: + return {"errors": ["session not found"]} + + sessions_favorite_viewed.view_session(project_id=projectId, user_id=context.user_id, session_id=sessionId) + return { + 'data': data + } + + +@app.get('/{projectId}/sessions2/{sessionId}/favorite', tags=["sessions"]) +def add_remove_favorite_session2(projectId: int, sessionId: int, + context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": sessions_favorite_viewed.favorite_session(project_id=projectId, user_id=context.user_id, + session_id=sessionId)} + + +@app.get('/{projectId}/sessions2/{sessionId}/assign', tags=["sessions"]) +def assign_session(projectId: int, sessionId, context: schemas.CurrentContext = Depends(OR_context)): + data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId, + tenant_id=context.tenant_id, + user_id=context.user_id) + if "errors" in data: + return data + return { + 'data': data + } + + +@app.get('/{projectId}/sessions2/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"]) +def get_error_trace(projectId: int, sessionId: int, errorId: str, + context: schemas.CurrentContext = Depends(OR_context)): + data = errors.get_trace(project_id=projectId, error_id=errorId) + if "errors" in data: + return data + return { + 'data': data + } + + +@app.get('/{projectId}/sessions2/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"]) +def assign_session(projectId: int, sessionId: int, issueId: str, + context: schemas.CurrentContext = Depends(OR_context)): + data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId, + tenant_id=context.tenant_id, user_id=context.user_id) + if "errors" in data: + return data + return { + 'data': data + } + + +@app.post('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"]) +@app.put('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"]) +def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = sessions_assignments.comment(tenant_id=context.tenant_id, project_id=projectId, + session_id=sessionId, assignment_id=issueId, + user_id=context.user_id, message=data.message) + if "errors" in data.keys(): + return data + return { + 'data': data + } + + +@app.get('/{projectId}/events/search', tags=["events"]) +def events_search(projectId: int, q: str, type: Union[schemas.FilterType, schemas.EventType] = None, key: str = None, + source: str = None, context: schemas.CurrentContext = Depends(OR_context)): + if len(q) == 0: + return {"data": []} + result = events.search_pg2(text=q, event_type=type, project_id=projectId, source=source, key=key) + return result + + +@app.post('/{projectId}/sessions/search2', tags=["sessions"]) +def sessions_search2(projectId: int, data: schemas.SessionsSearchPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = sessions.search2_pg(data, projectId, user_id=context.user_id) + return {'data': data} + + +@app.get('/{projectId}/sessions/filters', tags=["sessions"]) +def session_filter_values(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {'data': sessions_metas.get_key_values(projectId)} + + +@app.get('/{projectId}/sessions/filters/top', tags=["sessions"]) +def session_top_filter_values(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {'data': sessions_metas.get_top_key_values(projectId)} + + +@app.get('/{projectId}/sessions/filters/search', tags=["sessions"]) +def get_session_filters_meta(projectId: int, q: str, type: str, + context: schemas.CurrentContext = Depends(OR_context)): + meta_type = type + if len(meta_type) == 0: + return {"data": []} + if len(q) == 0: + return {"data": []} + return sessions_metas.search(project_id=projectId, meta_type=meta_type, text=q) + + +@app.post('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', tags=["integrations"]) +@app.put('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', tags=["integrations"]) +def integration_notify(projectId: int, integration: str, integrationId: int, source: str, sourceId: str, + data: schemas.IntegrationNotificationSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + comment = None + if data.comment: + comment = data.comment + if integration == "slack": + args = {"tenant_id": context.tenant_id, + "user": context.email, "comment": comment, "project_id": projectId, + "integration_id": integrationId} + if source == "sessions": + return Slack.share_session(session_id=sourceId, **args) + elif source == "errors": + return Slack.share_error(error_id=sourceId, **args) + return {"data": None} + + +@app.get('/integrations/sentry', tags=["integrations"]) +def get_all_sentry(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sentry.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/sentry', tags=["integrations"]) +def get_sentry(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sentry.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/sentry', tags=["integrations"]) +@app.put('/{projectId}/integrations/sentry', tags=["integrations"]) +def add_edit_sentry(projectId: int, data: schemas.SentrySchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sentry.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/sentry', tags=["integrations"]) +def delete_sentry(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sentry.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/{projectId}/integrations/sentry/events/{eventId}', tags=["integrations"]) +def proxy_sentry(projectId: int, eventId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sentry.proxy_get(tenant_id=context.tenant_id, project_id=projectId, event_id=eventId)} + + +@app.get('/integrations/datadog', tags=["integrations"]) +def get_all_datadog(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_datadog.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/datadog', tags=["integrations"]) +def get_datadog(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_datadog.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/datadog', tags=["integrations"]) +@app.put('/{projectId}/integrations/datadog', tags=["integrations"]) +def add_edit_datadog(projectId: int, data: schemas.DatadogSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_datadog.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/datadog', tags=["integrations"]) +def delete_datadog(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_datadog.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/stackdriver', tags=["integrations"]) +def get_all_stackdriver(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_stackdriver.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/stackdriver', tags=["integrations"]) +def get_stackdriver(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_stackdriver.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/stackdriver', tags=["integrations"]) +@app.put('/{projectId}/integrations/stackdriver', tags=["integrations"]) +def add_edit_stackdriver(projectId: int, data: schemas.StackdriverSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_stackdriver.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/stackdriver', tags=["integrations"]) +def delete_stackdriver(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_stackdriver.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/newrelic', tags=["integrations"]) +def get_all_newrelic(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_newrelic.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/newrelic', tags=["integrations"]) +def get_newrelic(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_newrelic.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/newrelic', tags=["integrations"]) +@app.put('/{projectId}/integrations/newrelic', tags=["integrations"]) +def add_edit_newrelic(projectId: int, data: schemas.NewrelicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_newrelic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/newrelic', tags=["integrations"]) +def delete_newrelic(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_newrelic.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/rollbar', tags=["integrations"]) +def get_all_rollbar(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_rollbar.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/rollbar', tags=["integrations"]) +def get_rollbar(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_rollbar.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/rollbar', tags=["integrations"]) +@app.put('/{projectId}/integrations/rollbar', tags=["integrations"]) +def add_edit_rollbar(projectId: int, data: schemas.RollbarSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_rollbar.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/rollbar', tags=["integrations"]) +def delete_datadog(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_rollbar.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.post('/integrations/bugsnag/list_projects', tags=["integrations"]) +def list_projects_bugsnag(data: schemas.BugsnagBasicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_bugsnag.list_projects(auth_token=data.authorizationToken)} + + +@app.get('/integrations/bugsnag', tags=["integrations"]) +def get_all_bugsnag(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_bugsnag.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/bugsnag', tags=["integrations"]) +def get_bugsnag(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_bugsnag.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/bugsnag', tags=["integrations"]) +@app.put('/{projectId}/integrations/bugsnag', tags=["integrations"]) +def add_edit_bugsnag(projectId: int, data: schemas.BugsnagSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_bugsnag.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/bugsnag', tags=["integrations"]) +def delete_bugsnag(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_bugsnag.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.post('/integrations/cloudwatch/list_groups', tags=["integrations"]) +def list_groups_cloudwatch(data: schemas.CloudwatchBasicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_cloudwatch.list_log_groups(aws_access_key_id=data.awsAccessKeyId, + aws_secret_access_key=data.awsSecretAccessKey, + region=data.region)} + + +@app.get('/integrations/cloudwatch', tags=["integrations"]) +def get_all_cloudwatch(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_cloudwatch.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/cloudwatch', tags=["integrations"]) +def get_cloudwatch(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_cloudwatch.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/cloudwatch', tags=["integrations"]) +@app.put('/{projectId}/integrations/cloudwatch', tags=["integrations"]) +def add_edit_cloudwatch(projectId: int, data: schemas.CloudwatchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_cloudwatch.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/cloudwatch', tags=["integrations"]) +def delete_cloudwatch(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_cloudwatch.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/elasticsearch', tags=["integrations"]) +def get_all_elasticsearch(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_elasticsearch.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/elasticsearch', tags=["integrations"]) +def get_elasticsearch(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_elasticsearch.get(project_id=projectId)} + + +@app.post('/integrations/elasticsearch/test', tags=["integrations"]) +def test_elasticsearch_connection(data: schemas.ElasticsearchBasicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_elasticsearch.ping(tenant_id=context.tenant_id, **data.dict())} + + +@app.post('/{projectId}/integrations/elasticsearch', tags=["integrations"]) +@app.put('/{projectId}/integrations/elasticsearch', tags=["integrations"]) +def add_edit_elasticsearch(projectId: int, data: schemas.ElasticsearchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": log_tool_elasticsearch.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/elasticsearch', tags=["integrations"]) +def delete_elasticsearch(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_elasticsearch.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/sumologic', tags=["integrations"]) +def get_all_sumologic(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sumologic.get_all(tenant_id=context.tenant_id)} + + +@app.get('/{projectId}/integrations/sumologic', tags=["integrations"]) +def get_sumologic(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sumologic.get(project_id=projectId)} + + +@app.post('/{projectId}/integrations/sumologic', tags=["integrations"]) +@app.put('/{projectId}/integrations/sumologic', tags=["integrations"]) +def add_edit_sumologic(projectId: int, data: schemas.SumologicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sumologic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())} + + +@app.delete('/{projectId}/integrations/sumologic', tags=["integrations"]) +def delete_sumologic(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": log_tool_sumologic.delete(tenant_id=context.tenant_id, project_id=projectId)} + + +@app.get('/integrations/issues', tags=["integrations"]) +def get_integration_status(context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return {"data": {}} + return {"data": integration.get_obfuscated()} + + +@app.post('/integrations/jira', tags=["integrations"]) +@app.put('/integrations/jira', tags=["integrations"]) +def add_edit_jira_cloud(data: schemas.JiraGithubSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER, + tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + data.provider = integration_jira_cloud.PROVIDER + return {"data": integration.add_edit(data=data.dict())} + + +@app.post('/integrations/github', tags=["integrations"]) +@app.put('/integrations/github', tags=["integrations"]) +def add_edit_github(data: schemas.JiraGithubSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER, + tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + data.provider = integration_github.PROVIDER + return {"data": integration.add_edit(data=data.dict())} + + +@app.delete('/integrations/issues', tags=["integrations"]) +def delete_default_issue_tracking_tool(context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + return {"data": integration.delete()} + + +@app.delete('/integrations/jira', tags=["integrations"]) +def delete_jira_cloud(context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER, + tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + return {"data": integration.delete()} + + +@app.delete('/integrations/github', tags=["integrations"]) +def delete_github(context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER, + tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + return {"data": integration.delete()} + + +@app.get('/integrations/issues/list_projects', tags=["integrations"]) +def get_all_issue_tracking_projects(context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + data = integration.issue_handler.get_projects() + if "errors" in data: + return data + return {"data": data} + + +@app.get('/integrations/issues/{integrationProjectId}', tags=["integrations"]) +def get_integration_metadata(integrationProjectId: int, context: schemas.CurrentContext = Depends(OR_context)): + error, integration = integrations_manager.get_integration(tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + data = integration.issue_handler.get_metas(integrationProjectId) + if "errors" in data.keys(): + return data + return {"data": data} + + +@app.get('/{projectId}/assignments', tags=["assignment"]) +def get_all_assignments(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + data = sessions_assignments.get_all(project_id=projectId, user_id=context.user_id) + return { + 'data': data + } + + +@app.post('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"]) +@app.put('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"]) +def create_issue_assignment(projectId: int, sessionId: int, integrationProjectId, + data: schemas.AssignmentSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = sessions_assignments.create_new_assignment(tenant_id=context.tenant_id, project_id=projectId, + session_id=sessionId, + creator_id=context.user_id, assignee=data.assignee, + description=data.description, title=data.title, + issue_type=data.issue_type, + integration_project_id=integrationProjectId) + if "errors" in data.keys(): + return data + return { + 'data': data + } + + +@app.get('/{projectId}/gdpr', tags=["projects", "gdpr"]) +def get_gdpr(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_gdpr(project_id=projectId)} + + +@app.post('/{projectId}/gdpr', tags=["projects", "gdpr"]) +@app.put('/{projectId}/gdpr', tags=["projects", "gdpr"]) +def edit_gdpr(projectId: int, data: schemas.GdprSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.edit_gdpr(project_id=projectId, gdpr=data.dict())} + + +@public_app.post('/password/reset-link', tags=["reset password"]) +@public_app.put('/password/reset-link', tags=["reset password"]) +def reset_password_handler(data: schemas.ForgetPasswordPayloadSchema = Body(...)): + if len(data.email) < 5: + return {"errors": ["please provide a valid email address"]} + return reset_password.reset(data) + + +@app.get('/{projectId}/metadata', tags=["metadata"]) +def get_metadata(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": metadata.get(project_id=projectId)} + + +@app.post('/{projectId}/metadata/list', tags=["metadata"]) +@app.put('/{projectId}/metadata/list', tags=["metadata"]) +def add_edit_delete_metadata(projectId: int, data: schemas.MetadataListSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return metadata.add_edit_delete(tenant_id=context.tenant_id, project_id=projectId, new_metas=data.list) + + +@app.post('/{projectId}/metadata', tags=["metadata"]) +@app.put('/{projectId}/metadata', tags=["metadata"]) +def add_metadata(projectId: int, data: schemas.MetadataBasicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return metadata.add(tenant_id=context.tenant_id, project_id=projectId, new_name=data.key) + + +@app.post('/{projectId}/metadata/{index}', tags=["metadata"]) +@app.put('/{projectId}/metadata/{index}', tags=["metadata"]) +def edit_metadata(projectId: int, index: int, data: schemas.MetadataBasicSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return metadata.edit(tenant_id=context.tenant_id, project_id=projectId, index=int(index), + new_name=data.key) + + +@app.delete('/{projectId}/metadata/{index}', tags=["metadata"]) +def delete_metadata(projectId: int, index: int, context: schemas.CurrentContext = Depends(OR_context)): + return metadata.delete(tenant_id=context.tenant_id, project_id=projectId, index=index) + + +@app.get('/{projectId}/metadata/search', tags=["metadata"]) +def search_metadata(projectId: int, q: str, key: str, context: schemas.CurrentContext = Depends(OR_context)): + if len(q) == 0 and len(key) == 0: + return {"data": []} + if len(q) == 0: + return {"errors": ["please provide a value for search"]} + if len(key) == 0: + return {"errors": ["please provide a key for search"]} + return metadata.search(tenant_id=context.tenant_id, project_id=projectId, value=q, key=key) + + +@app.get('/{projectId}/integration/sources', tags=["integrations"]) +def search_integrations(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return log_tools.search(project_id=projectId) + + +@public_app.post('/async/email_assignment', tags=["async mail"]) +def async_send_signup_emails(data: schemas.EmailPayloadSchema = Body(...)): + if data.auth != config("async_Token"): + return {} + email_helper.send_assign_session(recipient=data.email, link=data.link, message=data.message) + + +# @public_app.post('/async/basic/member_invitation', tags=["async mail"]) +# def async_basic_emails(data: schemas.MemberInvitationPayloadSchema = Body(...)): +# if data.auth != config("async_Token"): +# return {} +# email_helper.send_team_invitation(recipient=data.email, invitation_link=data.invitation_link, +# client_id=data.client_id, sender_name=data.sender_name) + + +@app.get('/{projectId}/sample_rate', tags=["projects"]) +def get_capture_status(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_capture_status(project_id=projectId)} + + +@app.post('/{projectId}/sample_rate', tags=["projects"]) +@app.put('/{projectId}/sample_rate', tags=["projects"]) +def update_capture_status(projectId: int, data: schemas.SampleRateSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.update_capture_status(project_id=projectId, changes=data.dict())} + + +@app.get('/announcements', tags=["announcements"]) +def get_all_announcements(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": announcements.get_all(context.user_id)} + + +@app.get('/announcements/view', tags=["announcements"]) +def get_all_announcements(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": announcements.view(user_id=context.user_id)} + + +@app.post('/{projectId}/errors/merge', tags=["errors"]) +def errors_merge(projectId: int, data: schemas.ErrorIdsPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = errors.merge(error_ids=data.errors) + return data + + +@app.get('/show_banner', tags=["banner"]) +def errors_merge(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": False} + + +@app.post('/{projectId}/alerts', tags=["alerts"]) +@app.put('/{projectId}/alerts', tags=["alerts"]) +def create_alert(projectId: int, data: schemas.AlertSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return alerts.create(projectId, data) + + +@app.get('/{projectId}/alerts', tags=["alerts"]) +def get_all_alerts(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": alerts.get_all(projectId)} + + +@app.get('/{projectId}/alerts/{alertId}', tags=["alerts"]) +def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": alerts.get(alertId)} + + +@app.post('/{projectId}/alerts/{alertId}', tags=["alerts"]) +@app.put('/{projectId}/alerts/{alertId}', tags=["alerts"]) +def update_alert(projectId: int, alertId: int, data: schemas.AlertSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return alerts.update(alertId, data) + + +@app.delete('/{projectId}/alerts/{alertId}', tags=["alerts"]) +def delete_alert(projectId: int, alertId: int, context: schemas.CurrentContext = Depends(OR_context)): + return alerts.delete(projectId, alertId) + + +@app.post('/{projectId}/funnels', tags=["funnels"]) +@app.put('/{projectId}/funnels', tags=["funnels"]) +def add_funnel(projectId: int, data: schemas.FunnelSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return funnels.create(project_id=projectId, + user_id=context.user_id, + name=data.name, + filter=data.filter.dict(), + is_public=data.is_public) + + +@app.get('/{projectId}/funnels', tags=["funnels"]) +def get_funnels(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_by_user(project_id=projectId, + user_id=context.user_id, + range_value=None, + start_date=None, + end_date=None, + details=False)} + + +@app.get('/{projectId}/funnels/details', tags=["funnels"]) +def get_funnels_with_details(projectId: int, rangeValue: str = None, startDate: int = None, endDate: int = None, + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_by_user(project_id=projectId, + user_id=context.user_id, + range_value=rangeValue, + start_date=startDate, + end_date=endDate, + details=True)} + + +@app.get('/{projectId}/funnels/issue_types', tags=["funnels"]) +def get_possible_issue_types(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_possible_issue_types(project_id=projectId)} + + +@app.get('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) +def get_funnel_insights(projectId: int, funnelId: int, rangeValue: str = None, startDate: int = None, + endDate: int = None, context: schemas.CurrentContext = Depends(OR_context)): + return funnels.get_top_insights(funnel_id=funnelId, project_id=projectId, + range_value=rangeValue, + start_date=startDate, + end_date=endDate) + + +@app.post('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) +@app.put('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"]) +def get_funnel_insights_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelInsightsPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict()) + + +@app.get('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) +def get_funnel_issues(projectId: int, funnelId, rangeValue: str = None, startDate: int = None, endDate: int = None, + context: schemas.CurrentContext = Depends(OR_context)): + return funnels.get_issues(funnel_id=funnelId, project_id=projectId, + range_value=rangeValue, + start_date=startDate, end_date=endDate) + + +@app.post('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) +@app.put('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"]) +def get_funnel_issues_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict())} + + +@app.get('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"]) +def get_funnel_sessions(projectId: int, funnelId: int, rangeValue: str = None, startDate: int = None, + endDate: int = None, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_sessions(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + range_value=rangeValue, + start_date=startDate, + end_date=endDate)} + + +@app.post('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"]) +@app.put('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"]) +def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": funnels.get_sessions_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId, + data=data.dict())} + + +@app.get('/{projectId}/funnels/issues/{issueId}/sessions', tags=["funnels"]) +def get_issue_sessions(projectId: int, issueId: str, startDate: int = None, endDate: int = None, + context: schemas.CurrentContext = Depends(OR_context)): + issue = issues.get(project_id=projectId, issue_id=issueId) + return { + "data": {"sessions": sessions.search_by_issue(user_id=context.user_id, project_id=projectId, issue=issue, + start_date=startDate, + end_date=endDate), + "issue": issue}} + + +@app.post('/{projectId}/funnels/{funnelId}/issues/{issueId}/sessions', tags=["funnels"]) +@app.put('/{projectId}/funnels/{funnelId}/issues/{issueId}/sessions', tags=["funnels"]) +def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str, + data: schemas.FunnelSearchPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = funnels.search_by_issue(project_id=projectId, user_id=context.user_id, issue_id=issueId, + funnel_id=funnelId, data=data.dict()) + if "errors" in data: + return data + if data.get("issue") is None: + data["issue"] = issues.get(project_id=projectId, issue_id=issueId) + return { + "data": data + } + + +@app.get('/{projectId}/funnels/{funnelId}', tags=["funnels"]) +def get_funnel(projectId: int, funnelId: int, context: schemas.CurrentContext = Depends(OR_context)): + data = funnels.get(funnel_id=funnelId, project_id=projectId, user_id=context.user_id) + if data is None: + return {"errors": ["funnel not found"]} + return {"data": data} + + +@app.post('/{projectId}/funnels/{funnelId}', tags=["funnels"]) +@app.put('/{projectId}/funnels/{funnelId}', tags=["funnels"]) +def edit_funnel(projectId: int, funnelId: int, data: schemas.UpdateFunnelSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return funnels.update(funnel_id=funnelId, + user_id=context.user_id, + name=data.name, + filter=data.filter.dict(), + is_public=data.is_public, + project_id=projectId) + + +@app.delete('/{projectId}/funnels/{funnelId}', tags=["funnels"]) +def delete_filter(projectId: int, funnelId: int, context: schemas.CurrentContext = Depends(OR_context)): + return funnels.delete(user_id=context.user_id, funnel_id=funnelId, project_id=projectId) + + +@app_apikey.put('/{projectKey}/sourcemaps/', tags=["sourcemaps"]) +@app_apikey.put('/{projectKey}/sourcemaps', tags=["sourcemaps"]) +def sign_sourcemap_for_upload(projectKey: str, data: schemas.SourcemapUploadPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + project_id = projects.get_internal_project_id(projectKey) + if project_id is None: + return {"errors": ["Project not found."]} + + return {"data": sourcemaps.presign_upload_urls(project_id=project_id, urls=data.urls)} + + +@app.get('/config/weekly_report', tags=["weekly report config"]) +def get_weekly_report_config(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": weekly_report.get_config(user_id=context.user_id)} + + +@app.post('/config/weekly_report', tags=["weekly report config"]) +@app.put('/config/weekly_report', tags=["weekly report config"]) +def edit_weekly_report_config(data: schemas.WeeklyReportConfigSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": weekly_report.edit_config(user_id=context.user_id, weekly_report=data.weekly_report)} + + +@app.get('/{projectId}/issue_types', tags=["issues"]) +def issue_types(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": issues.get_all_types()} + + +@app.get('/issue_types', tags=["issues"]) +def all_issue_types(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": issues.get_all_types()} + + +@app.get('/{projectId}/assist/sessions', tags=["assist"]) +def sessions_live(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + data = assist.get_live_sessions(projectId) + return {'data': data} + + +@app.post('/{projectId}/assist/sessions', tags=["assist"]) +def sessions_live_search(projectId: int, data: schemas.AssistSearchPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = assist.get_live_sessions(projectId, filters=data.filters) + return {'data': data} + + +@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"]) +def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": heatmaps.get_by_url(project_id=projectId, data=data.dict())} + + +@app.post('/{projectId}/mobile/{sessionId}/urls', tags=['mobile']) +def mobile_signe(projectId: int, sessionId: int, data: schemas.MobileSignPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": mobile.sign_keys(project_id=projectId, session_id=sessionId, keys=data.keys)} + + +@public_app.put('/signup', tags=['signup']) +@public_app.post('/signup', tags=['signup']) +def signup_handler(data: schemas.UserSignupSchema = Body(...)): + return signup.create_step1(data) + + +@app.post('/projects', tags=['projects']) +@app.put('/projects', tags=['projects']) +def create_project(data: schemas.CreateProjectSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return projects.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data) + + +@app.post('/projects/{projectId}', tags=['projects']) +@app.put('/projects/{projectId}', tags=['projects']) +def edit_project(projectId: int, data: schemas.CreateProjectSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return projects.edit(tenant_id=context.tenant_id, user_id=context.user_id, data=data, project_id=projectId) + + +@app.delete('/projects/{projectId}', tags=['projects']) +def delete_project(projectId, context: schemas.CurrentContext = Depends(OR_context)): + return projects.delete(tenant_id=context.tenant_id, user_id=context.user_id, project_id=projectId) + + +@app.get('/client/new_api_key', tags=['client']) +def generate_new_tenant_token(context: schemas.CurrentContext = Depends(OR_context)): + return { + 'data': tenants.generate_new_api_key(context.tenant_id) + } + + +@app.put('/client', tags=['client']) +@app.post('/client', tags=['client']) +def edit_client(data: schemas.UpdateTenantSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return tenants.update(tenant_id=context.tenant_id, user_id=context.user_id, data=data) + + +@app.post('/{projectId}/errors/search', tags=['errors']) +def errors_search(projectId: int, status: str = "ALL", favorite: Union[str, bool] = False, + data: schemas.SearchErrorsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + if isinstance(favorite, str): + favorite = True if len(favorite) == 0 else False + return errors.search(data.dict(), projectId, user_id=context.user_id, status=status, + favorite_only=favorite) + + +@app.get('/{projectId}/errors/stats', tags=['errors']) +def errors_stats(projectId: int, startTimestamp: int, endTimestamp: int, + context: schemas.CurrentContext = Depends(OR_context)): + return errors.stats(projectId, user_id=context.user_id, startTimestamp=startTimestamp, endTimestamp=endTimestamp) + + +@app.get('/{projectId}/errors/{errorId}', tags=['errors']) +def errors_get_details(projectId: int, errorId: str, density24: int = 24, density30: int = 30, + context: schemas.CurrentContext = Depends(OR_context)): + data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId, + **{"density24": density24, "density30": density30}) + if data.get("data") is not None: + errors_favorite_viewed.viewed_error(project_id=projectId, user_id=context.user_id, error_id=errorId) + return data + + +@app.get('/{projectId}/errors/{errorId}/stats', tags=['errors']) +def errors_get_details_right_column(projectId: int, errorId: str, startDate: int = TimeUTC.now(-7), + endDate: int = TimeUTC.now(), density: int = 7, + context: schemas.CurrentContext = Depends(OR_context)): + data = errors.get_details_chart(project_id=projectId, user_id=context.user_id, error_id=errorId, + **{"startDate": startDate, "endDate": endDate, "density": density}) + return data + + +@app.get('/{projectId}/errors/{errorId}/sourcemaps', tags=['errors']) +def errors_get_details_sourcemaps(projectId: int, errorId: str, + context: schemas.CurrentContext = Depends(OR_context)): + data = errors.get_trace(project_id=projectId, error_id=errorId) + if "errors" in data: + return data + return { + 'data': data + } + + +@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"]) +def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7), + endDate: int = TimeUTC.now(), context: schemas.CurrentContext = Depends(OR_context)): + if action == "favorite": + return errors_favorite_viewed.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId) + elif action == "sessions": + start_date = startDate + end_date = endDate + return { + "data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId, + start_date=start_date, end_date=end_date)} + elif action in list(errors.ACTION_STATE.keys()): + return errors.change_state(project_id=projectId, user_id=context.user_id, error_id=errorId, action=action) + else: + return {"errors": ["undefined action"]} + + +@app.get('/notifications', tags=['notifications']) +def get_notifications(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)} + + +@app.get('/notifications/{notificationId}/view', tags=['notifications']) +def view_notifications(notificationId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": notifications.view_notification(notification_ids=[notificationId], user_id=context.user_id)} + + +@app.post('/notifications/view', tags=['notifications']) +@app.put('/notifications/view', tags=['notifications']) +def batch_view_notifications(data: schemas.NotificationsViewSchema, + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": notifications.view_notification(notification_ids=data.ids, + startTimestamp=data.startTimestamp, + endTimestamp=data.endTimestamp, + user_id=context.user_id, + tenant_id=context.tenant_id)} + + +@public_app.post('/notifications', tags=['notifications']) +@public_app.put('/notifications', tags=['notifications']) +def create_notifications(data: schemas.CreateNotificationSchema): + if data.token != config("async_Token"): + return {"errors": ["missing token"]} + return notifications.create(data.notifications) + + +@app.get('/boarding', tags=['boarding']) +def get_boarding_state(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": boarding.get_state(tenant_id=context.tenant_id)} + + +@app.get('/boarding/installing', tags=['boarding']) +def get_boarding_state_installing(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": boarding.get_state_installing(tenant_id=context.tenant_id)} + + +@app.get('/boarding/identify-users', tags=["boarding"]) +def get_boarding_state_identify_users(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": boarding.get_state_identify_users(tenant_id=context.tenant_id)} + + +@app.get('/boarding/manage-users', tags=["boarding"]) +def get_boarding_state_manage_users(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": boarding.get_state_manage_users(tenant_id=context.tenant_id)} + + +@app.get('/boarding/integrations', tags=["boarding"]) +def get_boarding_state_integrations(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": boarding.get_state_integrations(tenant_id=context.tenant_id)} + + +@app.get('/integrations/slack/channels', tags=["integrations"]) +def get_slack_channels(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": webhook.get_by_type(tenant_id=context.tenant_id, webhook_type='slack')} + + +@app.get('/integrations/slack/{integrationId}', tags=["integrations"]) +def get_slack_webhook(integrationId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": webhook.get(tenant_id=context.tenant_id, webhook_id=integrationId)} + + +@app.delete('/integrations/slack/{integrationId}', tags=["integrations"]) +def delete_slack_integration(integrationId: int, context: schemas.CurrentContext = Depends(OR_context)): + return webhook.delete(context.tenant_id, integrationId) + + +@app.post('/webhooks', tags=["webhooks"]) +@app.put('/webhooks', tags=["webhooks"]) +def add_edit_webhook(data: schemas.CreateEditWebhookSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": webhook.add_edit(tenant_id=context.tenant_id, data=data.dict(), replace_none=True)} + + +@app.get('/webhooks', tags=["webhooks"]) +def get_webhooks(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": webhook.get_by_tenant(tenant_id=context.tenant_id, replace_none=True)} + + +@app.delete('/webhooks/{webhookId}', tags=["webhooks"]) +def delete_webhook(webhookId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": webhook.delete(tenant_id=context.tenant_id, webhook_id=webhookId)} + + +@app.get('/client/members', tags=["client"]) +def get_members(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": users.get_members(tenant_id=context.tenant_id)} + + +@app.get('/client/members/{memberId}/reset', tags=["client"]) +def reset_reinvite_member(memberId: int, context: schemas.CurrentContext = Depends(OR_context)): + return users.reset_member(tenant_id=context.tenant_id, editor_id=context.user_id, user_id_to_update=memberId) + + +@app.delete('/client/members/{memberId}', tags=["client"]) +def delete_member(memberId: int, context: schemas.CurrentContext = Depends(OR_context)): + return users.delete_member(tenant_id=context.tenant_id, user_id=context.user_id, id_to_delete=memberId) + + +@app.get('/account/new_api_key', tags=["account"]) +def generate_new_user_token(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": users.generate_new_api_key(user_id=context.user_id)} + + +@app.post('/account', tags=["account"]) +@app.put('/account', tags=["account"]) +def edit_account(data: schemas.EditUserSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data.dict(), + editor_id=context.user_id) + + +@app.post('/account/password', tags=["account"]) +@app.put('/account/password', tags=["account"]) +def change_client_password(data: schemas.EditUserPasswordSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return users.change_password(email=context.email, old_password=data.old_password, + new_password=data.new_password, tenant_id=context.tenant_id, + user_id=context.user_id) + + +@app.post('/{projectId}/custom_metrics/try', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"]) +def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.try_live(project_id=projectId, data=data)} + + +@app.post('/{projectId}/custom_metrics/chart', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/chart', tags=["customMetrics"]) +def get_custom_metric_chart(projectId: int, data: schemas.CustomMetricChartPayloadSchema2 = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=data.metric_id, + data=data)} + + +@app.get('/{projectId}/custom_metrics/series', tags=["customMetrics"]) +def get_series_for_alert(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.get_series_for_alert(project_id=projectId, user_id=context.user_id)} + + +@app.post('/{projectId}/custom_metrics', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics', tags=["customMetrics"]) +def add_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return custom_metrics.create(project_id=projectId, user_id=context.user_id, data=data) + + +@app.get('/{projectId}/custom_metrics', tags=["customMetrics"]) +def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)} + + +@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)} + + +@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"]) +def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=metric_id, + data=data)} + + +@app.post('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +@app.put('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def update_custom_metric(projectId: int, metric_id: int, data: schemas.UpdateCustomMetricsSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": custom_metrics.update(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)} + + +@app.delete('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"]) +def delete_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": custom_metrics.delete(project_id=projectId, user_id=context.user_id, metric_id=metric_id)} + + +@app.post('/{projectId}/saved_search', tags=["savedSearch"]) +@app.put('/{projectId}/saved_search', tags=["savedSearch"]) +def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return saved_search.create(project_id=projectId, user_id=context.user_id, data=data) + + +@app.get('/{projectId}/saved_search', tags=["savedSearch"]) +def get_saved_searches(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.get_all(project_id=projectId, user_id=context.user_id)} + + +@app.get('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def get_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.get(project_id=projectId, search_id=search_id, user_id=context.user_id)} + + +@app.post('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +@app.put('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def update_saved_search(projectId: int, search_id: int, data: schemas.SavedSearchSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.update(user_id=context.user_id, search_id=search_id, data=data, project_id=projectId)} + + +@app.delete('/{projectId}/saved_search/{search_id}', tags=["savedSearch"]) +def delete_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": saved_search.delete(project_id=projectId, user_id=context.user_id, search_id=search_id)} diff --git a/api/routers/core_dynamic.py b/api/routers/core_dynamic.py new file mode 100644 index 000000000..c149266b5 --- /dev/null +++ b/api/routers/core_dynamic.py @@ -0,0 +1,233 @@ +from typing import Optional + +from decouple import config +from fastapi import Body, Depends, HTTPException, status, BackgroundTasks +from starlette.responses import RedirectResponse + +import schemas +from chalicelib.core import assist +from chalicelib.core import integrations_manager +from chalicelib.core import sessions +from chalicelib.core import tenants, users, metadata, projects, license +from chalicelib.core import webhook +from chalicelib.core.collaboration_slack import Slack +from chalicelib.utils import captcha +from chalicelib.utils import helper +from or_dependencies import OR_context +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@public_app.get('/signup', tags=['signup']) +def get_all_signup(): + return {"data": {"tenants": tenants.tenants_exists(), + "sso": None, + "ssoProvider": None, + "edition": helper.get_edition()}} + + +@public_app.post('/login', tags=["authentication"]) +def login(data: schemas.UserLoginSchema = Body(...)): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid captcha." + ) + + r = users.authenticate(data.email, data.password, for_plugin=False) + if r is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="You’ve entered invalid Email or Password." + ) + + tenant_id = r.pop("tenantId") + + r["limits"] = { + "teamMember": -1, + "projects": -1, + "metadata": metadata.get_remaining_metadata_with_count(tenant_id)} + + c = tenants.get_by_tenant_id(tenant_id) + c.pop("createdAt") + c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True) + c["smtp"] = helper.has_smtp() + c["iceServers"] = assist.get_ice_servers() + r["smtp"] = c["smtp"] + r["iceServers"] = c["iceServers"] + return { + 'jwt': r.pop('jwt'), + 'data': { + "user": r, + "client": c + } + } + + +@app.get('/account', tags=['accounts']) +def get_account(context: schemas.CurrentContext = Depends(OR_context)): + r = users.get(tenant_id=context.tenant_id, user_id=context.user_id) + return { + 'data': { + **r, + "limits": { + "teamMember": -1, + "projects": -1, + "metadata": metadata.get_remaining_metadata_with_count(context.tenant_id) + }, + **license.get_status(context.tenant_id), + "smtp": helper.has_smtp(), + "iceServers": assist.get_ice_servers() + } + } + + +@app.get('/projects/limit', tags=['projects']) +def get_projects_limit(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": { + "current": projects.count_by_tenant(tenant_id=context.tenant_id), + "remaining": -1 + }} + + +@app.get('/projects/{projectId}', tags=['projects']) +def get_project(projectId: int, last_tracker_version: Optional[str] = None, + context: schemas.CurrentContext = Depends(OR_context)): + data = projects.get_project(tenant_id=context.tenant_id, project_id=projectId, include_last_session=True, + include_gdpr=True, last_tracker_version=last_tracker_version) + if data is None: + return {"errors": ["project not found"]} + return {"data": data} + + +@app.put('/integrations/slack', tags=['integrations']) +@app.post('/integrations/slack', tags=['integrations']) +def add_slack_client(data: schemas.AddSlackSchema, context: schemas.CurrentContext = Depends(OR_context)): + n = Slack.add_channel(tenant_id=context.tenant_id, url=data.url, name=data.name) + if n is None: + return { + "errors": ["We couldn't send you a test message on your Slack channel. Please verify your webhook url."] + } + return {"data": n} + + +@app.put('/integrations/slack/{integrationId}', tags=['integrations']) +@app.post('/integrations/slack/{integrationId}', tags=['integrations']) +def edit_slack_integration(integrationId: int, data: schemas.EditSlackSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + if len(data.url) > 0: + old = webhook.get(tenant_id=context.tenant_id, webhook_id=integrationId) + if old["endpoint"] != data.url: + if not Slack.say_hello(data.url): + return { + "errors": [ + "We couldn't send you a test message on your Slack channel. Please verify your webhook url."] + } + return {"data": webhook.update(tenant_id=context.tenant_id, webhook_id=integrationId, + changes={"name": data.name, "endpoint": data.url})} + + +# this endpoint supports both jira & github based on `provider` attribute +@app.post('/integrations/issues', tags=["integrations"]) +def add_edit_jira_cloud_github(data: schemas.JiraGithubSchema, + context: schemas.CurrentContext = Depends(OR_context)): + provider = data.provider.upper() + error, integration = integrations_manager.get_integration(tool=provider, tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + return {"data": integration.add_edit(data=data.dict())} + + +@app.post('/client/members', tags=["client"]) +@app.put('/client/members', tags=["client"]) +def add_member(background_tasks: BackgroundTasks, data: schemas.CreateMemberSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return users.create_member(tenant_id=context.tenant_id, user_id=context.user_id, data=data.dict(), + background_tasks=background_tasks) + + +@public_app.get('/users/invitation', tags=['users']) +def process_invitation_link(token: str): + if token is None or len(token) < 64: + return {"errors": ["please provide a valid invitation"]} + user = users.get_by_invitation_token(token) + if user is None: + return {"errors": ["invitation not found"]} + if user["expiredInvitation"]: + return {"errors": ["expired invitation, please ask your admin to send a new one"]} + if user["expiredChange"] is not None and not user["expiredChange"] \ + and user["changePwdToken"] is not None and user["changePwdAge"] < -5 * 60: + pass_token = user["changePwdToken"] + else: + pass_token = users.allow_password_change(user_id=user["userId"]) + return RedirectResponse(url=config("SITE_URL") + config("change_password_link") % (token, pass_token)) + + +@public_app.post('/password/reset', tags=["users"]) +@public_app.put('/password/reset', tags=["users"]) +def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema = Body(...)): + if data is None or len(data.invitation) < 64 or len(data.passphrase) < 8: + return {"errors": ["please provide a valid invitation & pass"]} + user = users.get_by_invitation_token(token=data.invitation, pass_token=data.passphrase) + if user is None: + return {"errors": ["invitation not found"]} + if user["expiredChange"]: + return {"errors": ["expired change, please re-use the invitation link"]} + + return users.set_password_invitation(new_password=data.password, user_id=user["userId"]) + + +@app.put('/client/members/{memberId}', tags=["client"]) +@app.post('/client/members/{memberId}', tags=["client"]) +def edit_member(memberId: int, data: schemas.EditMemberSchema, + context: schemas.CurrentContext = Depends(OR_context)): + return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data.dict(), + user_id_to_update=memberId) + + +@app.get('/metadata/session_search', tags=["metadata"]) +def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] = None, + context: schemas.CurrentContext = Depends(OR_context)): + if key is None or value is None or len(value) == 0 and len(key) == 0: + return {"errors": ["please provide a key&value for search"]} + if len(value) == 0: + return {"errors": ["please provide a value for search"]} + if len(key) == 0: + return {"errors": ["please provide a key for search"]} + return { + "data": sessions.search_by_metadata(tenant_id=context.tenant_id, user_id=context.user_id, m_value=value, + m_key=key, project_id=projectId)} + + +@app.get('/plans', tags=["plan"]) +def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": license.get_status(context.tenant_id) + } + + +@public_app.get('/general_stats', tags=["private"], include_in_schema=False) +def get_general_stats(): + return {"data": {"sessions:": sessions.count_all()}} + + +@app.get('/client', tags=['projects']) +def get_client(context: schemas.CurrentContext = Depends(OR_context)): + r = tenants.get_by_tenant_id(context.tenant_id) + if r is not None: + r.pop("createdAt") + r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True) + return { + 'data': r + } + + +@app.get('/projects', tags=['projects']) +def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True, + stack_integrations=True, version=True, + last_tracker_version=last_tracker_version)} diff --git a/ee/api/chalicelib/blueprints/__init__.py b/api/routers/crons/__init__.py similarity index 100% rename from ee/api/chalicelib/blueprints/__init__.py rename to api/routers/crons/__init__.py diff --git a/api/routers/crons/core_crons.py b/api/routers/crons/core_crons.py new file mode 100644 index 000000000..5643ce1a6 --- /dev/null +++ b/api/routers/crons/core_crons.py @@ -0,0 +1,15 @@ +from chalicelib.core import weekly_report, jobs + + +async def run_scheduled_jobs() -> None: + jobs.execute_jobs() + + +async def weekly_report2() -> None: + weekly_report.cron() + + +cron_jobs = [ + {"func": run_scheduled_jobs, "trigger": "interval", "seconds": 60, "misfire_grace_time": 20}, + {"func": weekly_report2, "trigger": "cron", "day_of_week": "mon", "hour": 5, "misfire_grace_time": 60 * 60} +] diff --git a/api/routers/crons/core_dynamic_crons.py b/api/routers/crons/core_dynamic_crons.py new file mode 100644 index 000000000..78d91856d --- /dev/null +++ b/api/routers/crons/core_dynamic_crons.py @@ -0,0 +1,10 @@ +from chalicelib.core import telemetry + + +def telemetry_cron() -> None: + telemetry.compute() + + +cron_jobs = [ + {"func": telemetry_cron, "trigger": "cron", "day_of_week": "*"} +] diff --git a/ee/api/chalicelib/blueprints/subs/__init__.py b/api/routers/subs/__init__.py similarity index 100% rename from ee/api/chalicelib/blueprints/subs/__init__.py rename to api/routers/subs/__init__.py diff --git a/api/routers/subs/dashboard.py b/api/routers/subs/dashboard.py new file mode 100644 index 000000000..169893693 --- /dev/null +++ b/api/routers/subs/dashboard.py @@ -0,0 +1,346 @@ +from fastapi import Body + +import schemas +from chalicelib.core import dashboard +from chalicelib.core import metadata +from chalicelib.utils import helper +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@app.get('/{projectId}/dashboard/metadata', tags=["dashboard", "metrics"]) +def get_metadata_map(projectId: int): + metamap = [] + for m in metadata.get(project_id=projectId): + metamap.append({"name": m["key"], "key": f"metadata{m['index']}"}) + return {"data": metamap} + + +@app.post('/{projectId}/dashboard/sessions', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/sessions', tags=["dashboard", "metrics"]) +def get_dashboard_processed_sessions(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_processed_sessions(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/errors', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/errors', tags=["dashboard", "metrics"]) +def get_dashboard_errors(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_errors(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/errors_trend', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/errors_trend', tags=["dashboard", "metrics"]) +def get_dashboard_errors_trend(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_errors_trend(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/application_activity', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/application_activity', tags=["dashboard", "metrics"]) +def get_dashboard_application_activity(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_application_activity(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/page_metrics', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/page_metrics', tags=["dashboard", "metrics"]) +def get_dashboard_page_metrics(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_page_metrics(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/user_activity', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/user_activity', tags=["dashboard", "metrics"]) +def get_dashboard_user_activity(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_user_activity(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/performance', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/performance', tags=["dashboard", "metrics"]) +def get_dashboard_performance(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_performance(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/slowest_images', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/slowest_images', tags=["dashboard", "metrics"]) +def get_dashboard_slowest_images(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_slowest_images(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/missing_resources', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/missing_resources', tags=["dashboard", "metrics"]) +def get_performance_sessions(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_missing_resources_trend(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/dashboard/network', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/network', tags=["dashboard", "metrics"]) +def get_network_widget(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_network(project_id=projectId, **data.dict())} + + +@app.get('/{projectId}/dashboard/{widget}/search', tags=["dashboard", "metrics"]) +def get_dashboard_autocomplete(projectId: int, widget: str, q: str, type: str = "", platform: str = None, + key: str = ""): + if q is None or len(q) == 0: + return {"data": []} + q = '^' + q + + if widget in ['performance']: + data = dashboard.search(q, type, project_id=projectId, + platform=platform, performance=True) + elif widget in ['pages', 'pages_dom_buildtime', 'top_metrics', 'time_to_render', + 'impacted_sessions_by_slow_pages', 'pages_response_time']: + data = dashboard.search(q, type, project_id=projectId, + platform=platform, pages_only=True) + elif widget in ['resources_loading_time']: + data = dashboard.search(q, type, project_id=projectId, + platform=platform, performance=False) + elif widget in ['time_between_events', 'events']: + data = dashboard.search(q, type, project_id=projectId, + platform=platform, performance=False, events_only=True) + elif widget in ['metadata']: + data = dashboard.search(q, None, project_id=projectId, + platform=platform, metadata=True, key=key) + else: + return {"errors": [f"unsupported widget: {widget}"]} + return {'data': data} + + +# 1 +@app.post('/{projectId}/dashboard/slowest_resources', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/slowest_resources', tags=["dashboard", "metrics"]) +def get_dashboard_slowest_resources(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_slowest_resources(project_id=projectId, **data.dict())} + + +# 2 +@app.post('/{projectId}/dashboard/resources_loading_time', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/resources_loading_time', tags=["dashboard", "metrics"]) +def get_dashboard_resources(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_resources_loading_time(project_id=projectId, **data.dict())} + + +# 3 +@app.post('/{projectId}/dashboard/pages_dom_buildtime', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/pages_dom_buildtime', tags=["dashboard", "metrics"]) +def get_dashboard_pages_dom(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_pages_dom_build_time(project_id=projectId, **data.dict())} + + +# 4 +@app.post('/{projectId}/dashboard/busiest_time_of_day', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/busiest_time_of_day', tags=["dashboard", "metrics"]) +def get_dashboard_busiest_time_of_day(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_busiest_time_of_day(project_id=projectId, **data.dict())} + + +# 5 +@app.post('/{projectId}/dashboard/sessions_location', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/sessions_location', tags=["dashboard", "metrics"]) +def get_dashboard_sessions_location(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_sessions_location(project_id=projectId, **data.dict())} + + +# 6 +@app.post('/{projectId}/dashboard/speed_location', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/speed_location', tags=["dashboard", "metrics"]) +def get_dashboard_speed_location(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_speed_index_location(project_id=projectId, **data.dict())} + + +# 7 +@app.post('/{projectId}/dashboard/pages_response_time', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/pages_response_time', tags=["dashboard", "metrics"]) +def get_dashboard_pages_response_time(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_pages_response_time(project_id=projectId, **data.dict())} + + +# 8 +@app.post('/{projectId}/dashboard/pages_response_time_distribution', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/pages_response_time_distribution', tags=["dashboard", "metrics"]) +def get_dashboard_pages_response_time_distribution(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_pages_response_time_distribution(project_id=projectId, **data.dict())} + + +# 9 +@app.post('/{projectId}/dashboard/top_metrics', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/top_metrics', tags=["dashboard", "metrics"]) +def get_dashboard_top_metrics(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_top_metrics(project_id=projectId, **data.dict())} + + +# 10 +@app.post('/{projectId}/dashboard/time_to_render', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/time_to_render', tags=["dashboard", "metrics"]) +def get_dashboard_time_to_render(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_time_to_render(project_id=projectId, **data.dict())} + + +# 11 +@app.post('/{projectId}/dashboard/impacted_sessions_by_slow_pages', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/impacted_sessions_by_slow_pages', tags=["dashboard", "metrics"]) +def get_dashboard_impacted_sessions_by_slow_pages(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_impacted_sessions_by_slow_pages(project_id=projectId, **data.dict())} + + +# 12 +@app.post('/{projectId}/dashboard/memory_consumption', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/memory_consumption', tags=["dashboard", "metrics"]) +def get_dashboard_memory_consumption(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_memory_consumption(project_id=projectId, **data.dict())} + + +# 12.1 +@app.post('/{projectId}/dashboard/fps', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/fps', tags=["dashboard", "metrics"]) +def get_dashboard_avg_fps(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_avg_fps(project_id=projectId, **data.dict())} + + +# 12.2 +@app.post('/{projectId}/dashboard/cpu', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/cpu', tags=["dashboard", "metrics"]) +def get_dashboard_avg_cpu(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_avg_cpu(project_id=projectId, **data.dict())} + + +# 13 +@app.post('/{projectId}/dashboard/crashes', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/crashes', tags=["dashboard", "metrics"]) +def get_dashboard_impacted_sessions_by_slow_pages(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_crashes(project_id=projectId, **data.dict())} + + +# 14 +@app.post('/{projectId}/dashboard/domains_errors', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/domains_errors', tags=["dashboard", "metrics"]) +def get_dashboard_domains_errors(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_domains_errors(project_id=projectId, **data.dict())} + + +# 14.1 +@app.post('/{projectId}/dashboard/domains_errors_4xx', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/domains_errors_4xx', tags=["dashboard", "metrics"]) +def get_dashboard_domains_errors_4xx(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_domains_errors_4xx(project_id=projectId, **data.dict())} + + +# 14.2 +@app.post('/{projectId}/dashboard/domains_errors_5xx', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/domains_errors_5xx', tags=["dashboard", "metrics"]) +def get_dashboard_domains_errors_5xx(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_domains_errors_5xx(project_id=projectId, **data.dict())} + + +# 15 +@app.post('/{projectId}/dashboard/slowest_domains', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/slowest_domains', tags=["dashboard", "metrics"]) +def get_dashboard_slowest_domains(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_slowest_domains(project_id=projectId, **data.dict())} + + +# 16 +@app.post('/{projectId}/dashboard/errors_per_domains', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/errors_per_domains', tags=["dashboard", "metrics"]) +def get_dashboard_errors_per_domains(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_errors_per_domains(project_id=projectId, **data.dict())} + + +# 17 +@app.post('/{projectId}/dashboard/sessions_per_browser', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/sessions_per_browser', tags=["dashboard", "metrics"]) +def get_dashboard_sessions_per_browser(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_sessions_per_browser(project_id=projectId, **data.dict())} + + +# 18 +@app.post('/{projectId}/dashboard/calls_errors', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/calls_errors', tags=["dashboard", "metrics"]) +def get_dashboard_calls_errors(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_calls_errors(project_id=projectId, **data.dict())} + + +# 18.1 +@app.post('/{projectId}/dashboard/calls_errors_4xx', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/calls_errors_4xx', tags=["dashboard", "metrics"]) +def get_dashboard_calls_errors_4xx(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_calls_errors_4xx(project_id=projectId, **data.dict())} + + +# 18.2 +@app.post('/{projectId}/dashboard/calls_errors_5xx', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/calls_errors_5xx', tags=["dashboard", "metrics"]) +def get_dashboard_calls_errors_5xx(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_calls_errors_5xx(project_id=projectId, **data.dict())} + + +# 19 +@app.post('/{projectId}/dashboard/errors_per_type', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/errors_per_type', tags=["dashboard", "metrics"]) +def get_dashboard_errors_per_type(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_errors_per_type(project_id=projectId, **data.dict())} + + +# 20 +@app.post('/{projectId}/dashboard/resources_by_party', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/resources_by_party', tags=["dashboard", "metrics"]) +def get_dashboard_resources_by_party(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_resources_by_party(project_id=projectId, **data.dict())} + + +# 21 +@app.post('/{projectId}/dashboard/resource_type_vs_response_end', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/resource_type_vs_response_end', tags=["dashboard", "metrics"]) +def get_dashboard_errors_per_resource_type(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.resource_type_vs_response_end(project_id=projectId, **data.dict())} + + +# 22 +@app.post('/{projectId}/dashboard/resources_vs_visually_complete', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/resources_vs_visually_complete', tags=["dashboard", "metrics"]) +def get_dashboard_resources_vs_visually_complete(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_resources_vs_visually_complete(project_id=projectId, **data.dict())} + + +# 23 +@app.post('/{projectId}/dashboard/impacted_sessions_by_js_errors', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/impacted_sessions_by_js_errors', tags=["dashboard", "metrics"]) +def get_dashboard_impacted_sessions_by_js_errors(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_impacted_sessions_by_js_errors(project_id=projectId, **data.dict())} + + +# 24 +@app.post('/{projectId}/dashboard/resources_count_by_type', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/resources_count_by_type', tags=["dashboard", "metrics"]) +def get_dashboard_resources_count_by_type(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": dashboard.get_resources_count_by_type(project_id=projectId, **data.dict())} + + +# # 25 +# @app.post('/{projectId}/dashboard/time_between_events', tags=["dashboard", "metrics"]) +# @app.get('/{projectId}/dashboard/time_between_events', tags=["dashboard", "metrics"]) +# def get_dashboard_resources_count_by_type(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): +# return {"errors": ["please choose 2 events"]} + + +@app.post('/{projectId}/dashboard/overview', tags=["dashboard", "metrics"]) +@app.get('/{projectId}/dashboard/overview', tags=["dashboard", "metrics"]) +def get_dashboard_group(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": [ + *helper.explode_widget(key="count_sessions", + data=dashboard.get_processed_sessions(project_id=projectId, **data.dict())), + *helper.explode_widget(data={**dashboard.get_application_activity(project_id=projectId, **data.dict()), + "chart": dashboard.get_performance(project_id=projectId, **data.dict()) + .get("chart", [])}), + *helper.explode_widget(data=dashboard.get_page_metrics(project_id=projectId, **data.dict())), + *helper.explode_widget(data=dashboard.get_user_activity(project_id=projectId, **data.dict())), + *helper.explode_widget(data=dashboard.get_pages_dom_build_time(project_id=projectId, **data.dict()), + key="avg_pages_dom_buildtime"), + *helper.explode_widget(data=dashboard.get_pages_response_time(project_id=projectId, **data.dict()), + key="avg_pages_response_time"), + *helper.explode_widget(dashboard.get_top_metrics(project_id=projectId, **data.dict())), + *helper.explode_widget(data=dashboard.get_time_to_render(project_id=projectId, **data.dict()), + key="avg_time_to_render"), + *helper.explode_widget(dashboard.get_memory_consumption(project_id=projectId, **data.dict())), + *helper.explode_widget(dashboard.get_avg_cpu(project_id=projectId, **data.dict())), + *helper.explode_widget(dashboard.get_avg_fps(project_id=projectId, **data.dict())), + ]} diff --git a/api/routers/subs/insights.py b/api/routers/subs/insights.py new file mode 100644 index 000000000..cce4917d4 --- /dev/null +++ b/api/routers/subs/insights.py @@ -0,0 +1,108 @@ +from fastapi import Body + +import schemas +from chalicelib.core import insights +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@app.post('/{projectId}/insights/journey', tags=["insights"]) +@app.get('/{projectId}/insights/journey', tags=["insights"]) +def get_insights_journey(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.journey(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/users_acquisition', tags=["insights"]) +@app.get('/{projectId}/insights/users_acquisition', tags=["insights"]) +def get_users_acquisition(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.users_acquisition(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/users_retention', tags=["insights"]) +@app.get('/{projectId}/insights/users_retention', tags=["insights"]) +def get_users_retention(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.users_retention(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_retention', tags=["insights"]) +@app.get('/{projectId}/insights/feature_retention', tags=["insights"]) +def get_feature_rentention(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_retention(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_acquisition', tags=["insights"]) +@app.get('/{projectId}/insights/feature_acquisition', tags=["insights"]) +def get_feature_acquisition(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_acquisition(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_popularity_frequency', tags=["insights"]) +@app.get('/{projectId}/insights/feature_popularity_frequency', tags=["insights"]) +def get_feature_popularity_frequency(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_popularity_frequency(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_intensity', tags=["insights"]) +@app.get('/{projectId}/insights/feature_intensity', tags=["insights"]) +def get_feature_intensity(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_intensity(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_adoption', tags=["insights"]) +@app.get('/{projectId}/insights/feature_adoption', tags=["insights"]) +def get_feature_adoption(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_adoption(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/feature_adoption_top_users', tags=["insights"]) +@app.get('/{projectId}/insights/feature_adoption_top_users', tags=["insights"]) +def get_feature_adoption(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.feature_adoption_top_users(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/users_active', tags=["insights"]) +@app.get('/{projectId}/insights/users_active', tags=["insights"]) +def get_users_active(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.users_active(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/users_power', tags=["insights"]) +@app.get('/{projectId}/insights/users_power', tags=["insights"]) +def get_users_power(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.users_power(project_id=projectId, **data.dict())} + + +@app.post('/{projectId}/insights/users_slipping', tags=["insights"]) +@app.get('/{projectId}/insights/users_slipping', tags=["insights"]) +def get_users_slipping(projectId: int, data: schemas.MetricPayloadSchema = Body(...)): + return {"data": insights.users_slipping(project_id=projectId, **data.dict())} + +# +# +# @app.route('/{projectId}/dashboard/{widget}/search', methods=['GET']) +# def get_dashboard_autocomplete(projectId:int, widget): +# params = app.current_request.query_params +# if params is None or params.get('q') is None or len(params.get('q')) == 0: +# return {"data": []} +# params['q'] = '^' + params['q'] +# +# if widget in ['performance']: +# data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, +# platform=params.get('platform', None), performance=True) +# elif widget in ['pages', 'pages_dom_buildtime', 'top_metrics', 'time_to_render', +# 'impacted_sessions_by_slow_pages', 'pages_response_time']: +# data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, +# platform=params.get('platform', None), pages_only=True) +# elif widget in ['resources_loading_time']: +# data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, +# platform=params.get('platform', None), performance=False) +# elif widget in ['time_between_events', 'events']: +# data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, +# platform=params.get('platform', None), performance=False, events_only=True) +# elif widget in ['metadata']: +# data = dashboard.search(params.get('q', ''), None, project_id=projectId, +# platform=params.get('platform', None), metadata=True, key=params.get("key")) +# else: +# return {"errors": [f"unsupported widget: {widget}"]} +# return {'data': data} diff --git a/api/run-dev.sh b/api/run-dev.sh new file mode 100755 index 000000000..76682286d --- /dev/null +++ b/api/run-dev.sh @@ -0,0 +1,3 @@ +#!/bin/zsh + +uvicorn app:app --reload \ No newline at end of file diff --git a/api/schemas.py b/api/schemas.py new file mode 100644 index 000000000..0dfd949ac --- /dev/null +++ b/api/schemas.py @@ -0,0 +1,655 @@ +from enum import Enum +from typing import Optional, List, Union, Literal + +from pydantic import BaseModel, Field, EmailStr, HttpUrl, root_validator + +from chalicelib.utils.TimeUTC import TimeUTC + + +def attribute_to_camel_case(snake_str): + components = snake_str.split("_") + return components[0] + ''.join(x.title() for x in components[1:]) + + +class _Grecaptcha(BaseModel): + g_recaptcha_response: Optional[str] = Field(None, alias='g-recaptcha-response') + + +class UserLoginSchema(_Grecaptcha): + email: EmailStr = Field(...) + password: str = Field(...) + + +class UserSignupSchema(UserLoginSchema): + fullname: str = Field(...) + organizationName: str = Field(...) + projectName: str = Field(default="my first project") + + class Config: + alias_generator = attribute_to_camel_case + + +class EditUserSchema(BaseModel): + name: Optional[str] = Field(None) + email: Optional[str] = Field(None) + admin: Optional[bool] = Field(False) + appearance: Optional[dict] = Field({}) + + +class ForgetPasswordPayloadSchema(_Grecaptcha): + email: str = Field(...) + + +class EditUserPasswordSchema(BaseModel): + old_password: str = Field(...) + new_password: str = Field(...) + + class Config: + alias_generator = attribute_to_camel_case + + +class UpdateTenantSchema(BaseModel): + name: Optional[str] = Field(None) + opt_out: Optional[bool] = Field(None) + + class Config: + alias_generator = attribute_to_camel_case + + +class CreateProjectSchema(BaseModel): + name: str = Field("my first project") + + +class CurrentAPIContext(BaseModel): + tenant_id: int = Field(...) + + +class CurrentContext(CurrentAPIContext): + user_id: int = Field(...) + email: str = Field(...) + + +class AddSlackSchema(BaseModel): + name: str = Field(...) + url: HttpUrl = Field(...) + + +class EditSlackSchema(BaseModel): + name: Optional[str] = Field(None) + url: HttpUrl = Field(...) + + +class SearchErrorsSchema(BaseModel): + platform: Optional[str] = Field(None) + startDate: Optional[int] = Field(TimeUTC.now(-7)) + endDate: Optional[int] = Field(TimeUTC.now()) + density: Optional[int] = Field(7) + sort: Optional[str] = Field(None) + order: Optional[str] = Field(None) + + +class CreateNotificationSchema(BaseModel): + token: str = Field(...) + notifications: List = Field(...) + + +class NotificationsViewSchema(BaseModel): + ids: Optional[List] = Field(default=[]) + startTimestamp: Optional[int] = Field(default=None) + endTimestamp: Optional[int] = Field(default=None) + + +class JiraGithubSchema(BaseModel): + provider: str = Field(...) + username: str = Field(...) + token: str = Field(...) + url: str = Field(...) + + +class CreateEditWebhookSchema(BaseModel): + webhookId: Optional[int] = Field(None) + endpoint: str = Field(...) + authHeader: Optional[str] = Field(None) + name: Optional[str] = Field(...) + + +class CreateMemberSchema(BaseModel): + userId: Optional[int] = Field(None) + name: str = Field(...) + email: str = Field(...) + admin: bool = Field(False) + + +class EditMemberSchema(BaseModel): + name: str = Field(...) + email: str = Field(...) + admin: bool = Field(False) + + +class EditPasswordByInvitationSchema(BaseModel): + invitation: str = Field(...) + passphrase: str = Field(..., alias="pass") + password: str = Field(...) + + +class AssignmentSchema(BaseModel): + assignee: str = Field(...) + description: str = Field(...) + title: str = Field(...) + issue_type: str = Field(...) + + class Config: + alias_generator = attribute_to_camel_case + + +class CommentAssignmentSchema(BaseModel): + message: str = Field(...) + + +class IntegrationNotificationSchema(BaseModel): + comment: Optional[str] = Field(None) + + +class GdprSchema(BaseModel): + maskEmails: bool = Field(...) + sampleRate: int = Field(...) + maskNumbers: bool = Field(...) + defaultInputMode: str = Field(...) + + +class SampleRateSchema(BaseModel): + rate: int = Field(...) + captureAll: bool = Field(False) + + +class WeeklyReportConfigSchema(BaseModel): + weekly_report: bool = Field(True) + + class Config: + alias_generator = attribute_to_camel_case + + +class GetHeatmapPayloadSchema(BaseModel): + startDate: int = Field(TimeUTC.now(delta_days=-30)) + endDate: int = Field(TimeUTC.now()) + url: str = Field(...) + + +class DatadogSchema(BaseModel): + apiKey: str = Field(...) + applicationKey: str = Field(...) + + +class StackdriverSchema(BaseModel): + serviceAccountCredentials: str = Field(...) + logName: str = Field(...) + + +class NewrelicSchema(BaseModel): + applicationId: str = Field(...) + xQueryKey: str = Field(...) + region: str = Field(...) + + +class RollbarSchema(BaseModel): + accessToken: str = Field(...) + + +class BugsnagBasicSchema(BaseModel): + authorizationToken: str = Field(...) + + +class BugsnagSchema(BugsnagBasicSchema): + bugsnagProjectId: str = Field(...) + + +class CloudwatchBasicSchema(BaseModel): + awsAccessKeyId: str = Field(...) + awsSecretAccessKey: str = Field(...) + region: str = Field(...) + + +class CloudwatchSchema(CloudwatchBasicSchema): + logGroupName: str = Field(...) + + +class ElasticsearchBasicSchema(BaseModel): + host: str = Field(...) + port: int = Field(...) + apiKeyId: str = Field(...) + apiKey: str = Field(...) + + +class ElasticsearchSchema(ElasticsearchBasicSchema): + indexes: str = Field(...) + + +class SumologicSchema(BaseModel): + accessId: str = Field(...) + accessKey: str = Field(...) + region: str = Field(...) + + +class MetadataBasicSchema(BaseModel): + index: Optional[int] = Field(None) + key: str = Field(...) + + +class MetadataListSchema(BaseModel): + list: List[MetadataBasicSchema] = Field(...) + + +class EmailPayloadSchema(BaseModel): + auth: str = Field(...) + email: EmailStr = Field(...) + link: str = Field(...) + message: str = Field(...) + + +class MemberInvitationPayloadSchema(BaseModel): + auth: str = Field(...) + email: EmailStr = Field(...) + invitation_link: str = Field(...) + client_id: str = Field(...) + sender_name: str = Field(...) + + class Config: + alias_generator = attribute_to_camel_case + + +class ErrorIdsPayloadSchema(BaseModel): + errors: List[str] = Field([]) + + +class _AlertMessageSchema(BaseModel): + type: str = Field(...) + value: str = Field(...) + + +class AlertDetectionChangeType(str, Enum): + percent = "percent" + change = "change" + + +class _AlertOptionSchema(BaseModel): + message: List[_AlertMessageSchema] = Field([]) + currentPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(...) + previousPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(15) + lastNotification: Optional[int] = Field(None) + renotifyInterval: Optional[int] = Field(720) + change: Optional[AlertDetectionChangeType] = Field(None) + + +class AlertColumn(str, Enum): + performance__dom_content_loaded__average = "performance.dom_content_loaded.average" + performance__first_meaningful_paint__average = "performance.first_meaningful_paint.average" + performance__page_load_time__average = "performance.page_load_time.average" + performance__dom_build_time__average = "performance.dom_build_time.average" + performance__speed_index__average = "performance.speed_index.average" + performance__page_response_time__average = "performance.page_response_time.average" + performance__ttfb__average = "performance.ttfb.average" + performance__time_to_render__average = "performance.time_to_render.average" + performance__image_load_time__average = "performance.image_load_time.average" + performance__request_load_time__average = "performance.request_load_time.average" + resources__load_time__average = "resources.load_time.average" + resources__missing__count = "resources.missing.count" + errors__4xx_5xx__count = "errors.4xx_5xx.count" + errors__4xx__count = "errors.4xx.count" + errors__5xx__count = "errors.5xx.count" + errors__javascript__impacted_sessions__count = "errors.javascript.impacted_sessions.count" + performance__crashes__count = "performance.crashes.count" + errors__javascript__count = "errors.javascript.count" + errors__backend__count = "errors.backend.count" + custom = "CUSTOM" + + +class MathOperator(str, Enum): + _equal = "=" + _less = "<" + _greater = ">" + _less_eq = "<=" + _greater_eq = ">=" + + +class _AlertQuerySchema(BaseModel): + left: AlertColumn = Field(...) + right: float = Field(...) + # operator: Literal["<", ">", "<=", ">="] = Field(...) + operator: MathOperator = Field(...) + + +class AlertDetectionMethod(str, Enum): + threshold = "threshold" + change = "change" + + +class AlertSchema(BaseModel): + name: str = Field(...) + detection_method: AlertDetectionMethod = Field(...) + description: Optional[str] = Field(None) + options: _AlertOptionSchema = Field(...) + query: _AlertQuerySchema = Field(...) + series_id: Optional[int] = Field(None) + + @root_validator + def alert_validator(cls, values): + if values.get("query") is not None and values["query"].left == AlertColumn.custom: + assert values.get("series_id") is not None, "series_id should not be null for CUSTOM alert" + if values.get("detectionMethod") is not None \ + and values["detectionMethod"] == AlertDetectionMethod.change \ + and values.get("options") is not None: + assert values["options"].change is not None, \ + "options.change should not be null for detection method 'change'" + return values + + class Config: + alias_generator = attribute_to_camel_case + + +class SourcemapUploadPayloadSchema(BaseModel): + urls: List[str] = Field(..., alias="URL") + + +class ErrorSource(str, Enum): + js_exception = "js_exception" + bugsnag = "bugsnag" + cloudwatch = "cloudwatch" + datadog = "datadog" + newrelic = "newrelic" + rollbar = "rollbar" + sentry = "sentry" + stackdriver = "stackdriver" + sumologic = "sumologic" + + +class EventType(str, Enum): + click = "CLICK" + input = "INPUT" + location = "LOCATION" + custom = "CUSTOM" + request = "REQUEST" + graphql = "GRAPHQL" + state_action = "STATEACTION" + error = "ERROR" + metadata = "METADATA" + click_ios = "CLICK_IOS" + input_ios = "INPUT_IOS" + view_ios = "VIEW_IOS" + custom_ios = "CUSTOM_IOS" + request_ios = "REQUEST_IOS" + error_ios = "ERROR_IOS" + + +class PerformanceEventType(str, Enum): + location_dom_complete = "DOM_COMPLETE" + location_largest_contentful_paint_time = "LARGEST_CONTENTFUL_PAINT_TIME" + time_between_events = "TIME_BETWEEN_EVENTS" + location_ttfb = "TTFB" + location_avg_cpu_load = "AVG_CPU_LOAD" + location_avg_memory_usage = "AVG_MEMORY_USAGE" + fetch_failed = "FETCH_FAILED" + # fetch_duration = "FETCH_DURATION" + + +class FilterType(str, Enum): + user_os = "USEROS" + user_browser = "USERBROWSER" + user_device = "USERDEVICE" + user_country = "USERCOUNTRY" + user_id = "USERID" + user_anonymous_id = "USERANONYMOUSID" + referrer = "REFERRER" + rev_id = "REVID" + # IOS + user_os_ios = "USEROS_IOS" + user_device_ios = "USERDEVICE_IOS" + user_country_ios = "USERCOUNTRY_IOS" + user_id_ios = "USERID_IOS" + user_anonymous_id_ios = "USERANONYMOUSID_IOS" + rev_id_ios = "REVID_IOS" + # + duration = "DURATION" + platform = "PLATFORM" + metadata = "METADATA" + issue = "ISSUE" + events_count = "EVENTS_COUNT" + utm_source = "UTM_SOURCE" + utm_medium = "UTM_MEDIUM" + utm_campaign = "UTM_CAMPAIGN" + + +class SearchEventOperator(str, Enum): + _is = "is" + _is_any = "isAny" + _on = "on" + _on_any = "onAny" + _is_not = "isNot" + _not_on = "notOn" + _contains = "contains" + _not_contains = "notContains" + _starts_with = "startsWith" + _ends_with = "endsWith" + + +class PlatformType(str, Enum): + mobile = "mobile" + desktop = "desktop" + tablet = "tablet" + + +class SearchEventOrder(str, Enum): + _then = "then" + _or = "or" + _and = "and" + + +class IssueType(str, Enum): + click_rage = 'click_rage' + dead_click = 'dead_click' + excessive_scrolling = 'excessive_scrolling' + bad_request = 'bad_request' + missing_resource = 'missing_resource' + memory = 'memory' + cpu = 'cpu' + slow_resource = 'slow_resource' + slow_page_load = 'slow_page_load' + crash = 'crash' + custom = 'custom' + js_exception = 'js_exception' + + +class _SessionSearchEventRaw(BaseModel): + custom: Optional[List[Union[int, str]]] = Field(None, min_items=1) + customOperator: Optional[MathOperator] = Field(None) + key: Optional[str] = Field(None) + value: Union[str, List[str]] = Field(...) + type: Union[EventType, PerformanceEventType] = Field(...) + operator: SearchEventOperator = Field(...) + source: Optional[ErrorSource] = Field(default=ErrorSource.js_exception) + + @root_validator + def event_validator(cls, values): + if isinstance(values.get("type"), PerformanceEventType): + if values.get("type") == PerformanceEventType.fetch_failed: + return values + assert values.get("custom") is not None, "custom should not be null for PerformanceEventType" + assert values.get("customOperator") is not None \ + , "customOperator should not be null for PerformanceEventType" + if values["type"] == PerformanceEventType.time_between_events: + assert len(values.get("value", [])) == 2, \ + f"must provide 2 Events as value for {PerformanceEventType.time_between_events}" + assert isinstance(values["value"][0], _SessionSearchEventRaw) \ + and isinstance(values["value"][1], _SessionSearchEventRaw) \ + , f"event should be of type _SessionSearchEventRaw for {PerformanceEventType.time_between_events}" + else: + for c in values["custom"]: + assert isinstance(c, int), f"custom value should be of type int for {values.get('type')}" + return values + + +class _SessionSearchEventSchema(_SessionSearchEventRaw): + value: Union[List[_SessionSearchEventRaw], str, List[str]] = Field(...) + + +class _SessionSearchFilterSchema(BaseModel): + custom: Optional[List[str]] = Field(None) + key: Optional[str] = Field(None) + value: Union[Optional[Union[IssueType, PlatformType, int, str]], + Optional[List[Union[IssueType, PlatformType, int, str]]]] = Field(...) + type: FilterType = Field(...) + operator: Union[SearchEventOperator, MathOperator] = Field(...) + source: Optional[ErrorSource] = Field(default=ErrorSource.js_exception) + + @root_validator + def filter_validator(cls, values): + if values.get("type") == FilterType.issue: + for v in values.get("value"): + assert isinstance(v, IssueType), f"value should be of type IssueType for {values.get('type')} filter" + elif values.get("type") == FilterType.platform: + for v in values.get("value"): + assert isinstance(v, PlatformType), \ + f"value should be of type PlatformType for {values.get('type')} filter" + elif values.get("type") == FilterType.events_count: + assert isinstance(values.get("operator"), MathOperator), \ + f"operator should be of type MathOperator for {values.get('type')} filter" + for v in values.get("value"): + assert isinstance(v, int), f"value should be of type int for {values.get('type')} filter" + else: + assert isinstance(values.get("operator"), SearchEventOperator), \ + f"operator should be of type SearchEventOperator for {values.get('type')} filter" + return values + + +class SessionsSearchPayloadSchema(BaseModel): + events: List[_SessionSearchEventSchema] = Field([]) + filters: List[_SessionSearchFilterSchema] = Field([]) + # custom:dict=Field(...) + # rangeValue:str=Field(...) + startDate: int = Field(None) + endDate: int = Field(None) + sort: str = Field(...) + order: str = Field(default="DESC") + # platform: Optional[PlatformType] = Field(None) + events_order: Optional[SearchEventOrder] = Field(default=SearchEventOrder._then) + + class Config: + alias_generator = attribute_to_camel_case + + +class SessionsSearchCountSchema(SessionsSearchPayloadSchema): + sort: Optional[str] = Field(default=None) + order: Optional[str] = Field(default=None) + + +class FunnelSearchPayloadSchema(SessionsSearchPayloadSchema): + range_value: Optional[str] = Field(None) + sort: Optional[str] = Field(None) + order: Optional[str] = Field(None) + + class Config: + alias_generator = attribute_to_camel_case + + +class FunnelSchema(BaseModel): + name: str = Field(...) + filter: FunnelSearchPayloadSchema = Field([]) + is_public: bool = Field(False) + + class Config: + alias_generator = attribute_to_camel_case + + +class UpdateFunnelSchema(FunnelSchema): + name: Optional[str] = Field(None) + filter: Optional[FunnelSearchPayloadSchema] = Field(None) + is_public: Optional[bool] = Field(None) + + +class FunnelInsightsPayloadSchema(SessionsSearchPayloadSchema): + sort: Optional[str] = Field(None) + order: Optional[str] = Field(None) + + +class MetricPayloadSchema(BaseModel): + startTimestamp: int = Field(TimeUTC.now(delta_days=-1)) + endTimestamp: int = Field(TimeUTC.now()) + density: int = Field(7) + filters: List[dict] = Field([]) + type: Optional[str] = Field(None) + + class Config: + alias_generator = attribute_to_camel_case + + +class AssistSearchPayloadSchema(BaseModel): + filters: List[dict] = Field([]) + + +class SentrySchema(BaseModel): + projectSlug: str = Field(...) + organizationSlug: str = Field(...) + token: str = Field(...) + + +class MobileSignPayloadSchema(BaseModel): + keys: List[str] = Field(...) + + +class CustomMetricSeriesFilterSchema(SessionsSearchPayloadSchema): + startDate: Optional[int] = Field(None) + endDate: Optional[int] = Field(None) + sort: Optional[str] = Field(None) + order: Optional[str] = Field(None) + + +class CustomMetricCreateSeriesSchema(BaseModel): + name: Optional[str] = Field(None) + index: Optional[int] = Field(None) + filter: Optional[CustomMetricSeriesFilterSchema] = Field([]) + + +class CreateCustomMetricsSchema(BaseModel): + name: str = Field(...) + series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1) + is_public: Optional[bool] = Field(False) + + class Config: + alias_generator = attribute_to_camel_case + + +class MetricViewType(str, Enum): + line_chart = "lineChart" + progress = "progress" + + +class CustomMetricChartPayloadSchema(BaseModel): + startDate: int = Field(TimeUTC.now(-7)) + endDate: int = Field(TimeUTC.now()) + density: int = Field(7) + viewType: MetricViewType = Field(MetricViewType.line_chart) + + class Config: + alias_generator = attribute_to_camel_case + + +class CustomMetricChartPayloadSchema2(CustomMetricChartPayloadSchema): + metric_id: int = Field(...) + + +class TryCustomMetricsSchema(CreateCustomMetricsSchema, CustomMetricChartPayloadSchema): + name: Optional[str] = Field(None) + + +class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema): + series_id: Optional[int] = Field(None) + + class Config: + alias_generator = attribute_to_camel_case + + +class UpdateCustomMetricsSchema(CreateCustomMetricsSchema): + series: List[CustomMetricUpdateSeriesSchema] = Field(..., min_items=1) + + +class SavedSearchSchema(FunnelSchema): + pass diff --git a/backend/pkg/db/types/project.go b/backend/pkg/db/types/project.go index 9a267d0c0..74ec90c47 100644 --- a/backend/pkg/db/types/project.go +++ b/backend/pkg/db/types/project.go @@ -1,5 +1,7 @@ package types +import "log" + type Project struct { ProjectID uint32 ProjectKey string @@ -19,6 +21,10 @@ type Project struct { func (p *Project) GetMetadataNo(key string) uint { + if p == nil { + log.Printf("GetMetadataNo: Project is nil") + return 0 + } if p.Metadata1 != nil && *(p.Metadata1) == key { return 1 } diff --git a/backend/pkg/url/assets/url.go b/backend/pkg/url/assets/url.go index b304b35ab..b087878b9 100644 --- a/backend/pkg/url/assets/url.go +++ b/backend/pkg/url/assets/url.go @@ -43,7 +43,8 @@ func isCachable(rawurl string) bool { ext == ".woff" || ext == ".woff2" || ext == ".ttf" || - ext == ".otf" + ext == ".otf" || + ext == ".eot" } func GetFullCachableURL(baseURL string, relativeURL string) (string, bool) { diff --git a/backend/services/alerts/main.go b/backend/services/alerts/main.go deleted file mode 100644 index b11d3ae04..000000000 --- a/backend/services/alerts/main.go +++ /dev/null @@ -1,86 +0,0 @@ -package main - -import ( - "database/sql" - "log" - "os" - "os/signal" - "syscall" - "time" - - "openreplay/backend/pkg/db/postgres" - "openreplay/backend/pkg/env" - _ "github.com/lib/pq" -) - -func main() { - log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - POSTGRES_STRING := env.String("POSTGRES_STRING") - NOTIFICATIONS_STRING := env.String("ALERT_NOTIFICATION_STRING") - log.Printf("Notifications: %s \nPG: %s\n", NOTIFICATIONS_STRING, POSTGRES_STRING) - pg := postgres.NewConn(POSTGRES_STRING) - defer pg.Close() - - pgs, err := sql.Open("postgres", POSTGRES_STRING+ "?sslmode=disable") - if err != nil { - log.Fatal(err) - } - defer pgs.Close() - - manager := NewManager(NOTIFICATIONS_STRING, POSTGRES_STRING, pgs, pg) - if err := pg.IterateAlerts(func(a *postgres.Alert, err error) { - if err != nil { - log.Printf("Postgres error: %v\n", err) - return - } - log.Printf("Alert initialization: %+v\n", *a) - //log.Printf("CreatedAt: %s\n", *a.CreatedAt) - err = manager.Update(a) - if err != nil { - log.Printf("Alert parse error: %v | Alert: %+v\n", err, *a) - return - } - }); err != nil { - log.Fatalf("Postgres error: %v\n", err) - } - - listener, err := postgres.NewAlertsListener(POSTGRES_STRING) - if err != nil { - log.Fatalf("Postgres listener error: %v\n", err) - } - defer listener.Close() - - sigchan := make(chan os.Signal, 1) - signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) - - tickAlert := time.Tick(1 * time.Minute) - - log.Printf("Alert service started\n") - manager.RequestAll() - //return - for { - select { - case sig := <-sigchan: - log.Printf("Caught signal %v: terminating\n", sig) - listener.Close() - pg.Close() - os.Exit(0) - case <-tickAlert: - log.Printf("Requesting all...%d alerts\n", manager.Length()) - manager.RequestAll() - case iPointer := <-listener.Alerts: - log.Printf("Alert update: %+v\n", *iPointer) - //log.Printf("CreatedAt: %s\n", *iPointer.CreatedAt) - //log.Printf("Notification received for AlertId: %d\n", iPointer.AlertID) - err := manager.Update(iPointer) - if err != nil { - log.Printf("Alert parse error: %+v | Alert: %v\n", err, *iPointer) - } - case err := <-listener.Errors: - log.Printf("listener error: %v\n", err) - if err.Error() == "conn closed" { - panic("Listener conn lost") - } - } - } -} diff --git a/backend/services/alerts/manager.go b/backend/services/alerts/manager.go deleted file mode 100644 index 11ddb9363..000000000 --- a/backend/services/alerts/manager.go +++ /dev/null @@ -1,171 +0,0 @@ -package main - -import ( - "database/sql" - "fmt" - "log" - "sync" - "sync/atomic" - "time" - - "openreplay/backend/pkg/db/postgres" -) - -const PGParallelLimit = 2 - -var pgCount int64 - -type manager struct { - postgresString string - notificationsUrl string - alertsCache map[uint32]*postgres.Alert - cacheMutex sync.Mutex - pgParallel chan bool - pgs *sql.DB - pg *postgres.Conn - pgMutex sync.Mutex - notifications map[uint32]*postgres.TenantNotification - notificationsGo *sync.WaitGroup - notificationsMutex sync.Mutex -} - -func NewManager(notificationsUrl string, postgresString string, pgs *sql.DB, pg *postgres.Conn) *manager { - return &manager{ - postgresString: postgresString, - notificationsUrl: notificationsUrl, - alertsCache: make(map[uint32]*postgres.Alert), - cacheMutex: sync.Mutex{}, - pgParallel: make(chan bool, PGParallelLimit), - pgs: pgs, - pg: pg, - pgMutex: sync.Mutex{}, - notifications: make(map[uint32]*postgres.TenantNotification), - notificationsGo: &sync.WaitGroup{}, - notificationsMutex: sync.Mutex{}, - } - -} - -func (m *manager) Length() int { - return len(m.alertsCache) -} - -func (m *manager) Update(a *postgres.Alert) error { - m.cacheMutex.Lock() - defer m.cacheMutex.Unlock() - _, exists := m.alertsCache[a.AlertID] - if exists && a.DeletedAt != nil { - log.Println("deleting alert from memory") - delete(m.alertsCache, a.AlertID) - return nil - } else { - m.alertsCache[a.AlertID] = a - } - return nil -} -func (m *manager) processAlert(a *postgres.Alert) { - defer func() { - defer m.notificationsGo.Done() - <-m.pgParallel - }() - if !a.CanCheck() { - log.Printf("cannot check %s", a.Name) - return - } - //log.Printf("checking %+v", a) - log.Printf("quering %s", a.Name) - //--- For stats: - atomic.AddInt64(&pgCount, 1) - q, err := a.Build() - if err != nil { - log.Println(err) - return - } - - rows, err := q.RunWith(m.pgs).Query() - - if err != nil { - log.Println(err) - return - } - defer rows.Close() - - for rows.Next() { - var ( - value sql.NullFloat64 - valid bool - ) - if err := rows.Scan(&value, &valid); err != nil { - log.Println(err) - continue - } - - if valid && value.Valid { - log.Printf("%s: valid", a.Name) - m.notificationsMutex.Lock() - m.notifications[a.AlertID] = &postgres.TenantNotification{ - TenantId: a.TenantId, - Title: a.Name, - Description: fmt.Sprintf("has been triggered, %s = %.0f (%s %.0f).", a.Query.Left, value.Float64, a.Query.Operator, a.Query.Right), - ButtonText: "Check metrics for more details", - ButtonUrl: fmt.Sprintf("/%d/metrics", a.ProjectID), - ImageUrl: nil, - Options: map[string]interface{}{"source": "ALERT", "sourceId": a.AlertID, "sourceMeta": a.DetectionMethod, "message": a.Options.Message, "projectId": a.ProjectID, "data": map[string]interface{}{"title": a.Name, "limitValue": a.Query.Right, "actualValue": value.Float64, "operator": a.Query.Operator, "trigger": a.Query.Left, "alertId": a.AlertID, "detectionMethod": a.DetectionMethod, "currentPeriod": a.Options.CurrentPeriod, "previousPeriod": a.Options.PreviousPeriod, "createdAt": time.Now().Unix() * 1000}}, - } - m.notificationsMutex.Unlock() - } - } - -} -func (m *manager) RequestAll() { - now := time.Now().Unix() - m.cacheMutex.Lock() - for _, a := range m.alertsCache { - m.pgParallel <- true - m.notificationsGo.Add(1) - go m.processAlert(a) - //m.processAlert(a) - } - //log.Println("releasing cache") - m.cacheMutex.Unlock() - //log.Println("waiting for all alerts to finish") - m.notificationsGo.Wait() - log.Printf("done %d PG queries in: %ds", pgCount, time.Now().Unix()-now) - pgCount = 0 - //log.Printf("Processing %d Notifications", len(m.notifications)) - m.notificationsMutex.Lock() - go m.ProcessNotifications(m.notifications) - m.notificationsMutex.Unlock() - m.notifications = make(map[uint32]*postgres.TenantNotification) - //log.Printf("Notifications purged: %d", len(m.notifications)) -} - -func (m *manager) ProcessNotifications(allNotifications map[uint32]*postgres.TenantNotification) { - if len(allNotifications) == 0 { - log.Println("No notifications to process") - return - } - log.Printf("sending %d notifications", len(allNotifications)) - allIds := make([]uint32, 0, len(allNotifications)) - toSend := postgres.Notifications{ - Notifications: []*postgres.TenantNotification{}, - } - for k, n := range allNotifications { - //log.Printf("notification for %d", k) - allIds = append(allIds, k) - toSend.Notifications = append(toSend.Notifications, n) - } - toSend.Send(m.notificationsUrl) - if err := m.pg.SaveLastNotification(allIds); err != nil { - log.Printf("Error saving LastNotification time: %v", err) - if err.Error() == "conn closed" { - m.pg = postgres.NewConn(m.postgresString) - //if err != nil { - // panic(fmt.Sprintf("Postgres renew notifications connection error: %v\n", err)) - //} - if err := m.pg.SaveLastNotification(allIds); err != nil { - panic(fmt.Sprintf("Error saving LastNotification time, suicide: %v", err)) - } - } - } -} diff --git a/backend/services/ender/builder/builder.go b/backend/services/ender/builder/builder.go index f17116501..c0690a3ad 100644 --- a/backend/services/ender/builder/builder.go +++ b/backend/services/ender/builder/builder.go @@ -3,6 +3,7 @@ package builder import ( "net/url" "strings" + "time" "openreplay/backend/pkg/intervals" . "openreplay/backend/pkg/messages" @@ -42,6 +43,7 @@ func getResourceType(initiator string, URL string) string { type builder struct { readyMsgs []Message timestamp uint64 + lastProcessedTimestamp int64 peBuilder *pageEventBuilder ptaBuilder *performanceTrackAggrBuilder ieBuilder *inputEventBuilder @@ -112,6 +114,10 @@ func (b *builder) handleMessage(message Message, messageID uint64) { if b.timestamp <= timestamp { // unnecessary? TODO: test and remove b.timestamp = timestamp } + + b.lastProcessedTimestamp = time.Now().UnixNano()/1e6 + + // Might happen before the first timestamp. switch msg := message.(type) { case *SessionStart, @@ -294,6 +300,7 @@ func (b *builder) checkTimeouts(ts int64) bool { } lastTsGap := ts - int64(b.timestamp) + //b.lastProcessedTimestamp //log.Printf("checking timeouts for sess %v: %v now, %v sesstime; gap %v",b.sid, ts, b.timestamp, lastTsGap) if lastTsGap > intervals.EVENTS_SESSION_END_TIMEOUT { if rm := b.ddDetector.Build(); rm != nil { diff --git a/backend/services/sink/main.go b/backend/services/sink/main.go index b1bdf8d25..5893e93e6 100644 --- a/backend/services/sink/main.go +++ b/backend/services/sink/main.go @@ -20,8 +20,13 @@ import ( func main() { log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile) - writer := NewWriter(env.Uint16("FS_ULIMIT"), env.String("FS_DIR")) - + FS_DIR := env.String("FS_DIR"); + if _, err := os.Stat(FS_DIR); os.IsNotExist(err) { + log.Fatalf("%v doesn't exist. %v", FS_DIR, err) + } + + writer := NewWriter(env.Uint16("FS_ULIMIT"), FS_DIR) + count := 0 consumer := queue.NewMessageConsumer( diff --git a/ee/api/.chalice/config.json b/ee/api/.chalice/config.json deleted file mode 100644 index db58c76ba..000000000 --- a/ee/api/.chalice/config.json +++ /dev/null @@ -1,81 +0,0 @@ -{ - "version": "2.0", - "app_name": "parrot", - "environment_variables": { - }, - "stages": { - "default-ee": { - "api_gateway_stage": "default-ee", - "manage_iam_role": false, - "iam_role_arn": "", - "autogen_policy": true, - "environment_variables": { - "isFOS": "false", - "isEE": "true", - "stage": "default-ee", - "jwt_issuer": "openreplay-default-ee", - "sentryURL": "", - "pg_host": "127.0.0.1", - "pg_port": "9202", - "pg_dbname": "app", - "pg_user": "", - "pg_password": "", - "ch_host": "", - "ch_port": "", - "alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s", - "email_signup": "http://127.0.0.1:8000/async/email_signup/%s", - "email_funnel": "http://127.0.0.1:8000/async/funnel/%s", - "email_plans": "http://127.0.0.1:8000/async/plans/%s", - "email_basic": "http://127.0.0.1:8000/async/basic/%s", - "assign_link": "http://127.0.0.1:8000/async/email_assignment", - "captcha_server": "", - "captcha_key": "", - "sessions_bucket": "mobs", - "sessions_region": "us-east-1", - "put_S3_TTL": "20", - "sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps", - "sourcemaps_bucket": "sourcemaps", - "peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers", - "js_cache_bucket": "sessions-assets", - "async_Token": "", - "EMAIL_HOST": "", - "EMAIL_PORT": "587", - "EMAIL_USER": "", - "EMAIL_PASSWORD": "", - "EMAIL_USE_TLS": "true", - "EMAIL_USE_SSL": "false", - "EMAIL_SSL_KEY": "", - "EMAIL_SSL_CERT": "", - "EMAIL_FROM": "OpenReplay", - "SITE_URL": "", - "announcement_url": "", - "jwt_secret": "SET A RANDOM STRING HERE", - "jwt_algorithm": "HS512", - "jwt_exp_delta_seconds": "2592000", - "S3_HOST": "", - "S3_KEY": "", - "S3_SECRET": "", - "LICENSE_KEY": "", - "SAML2_MD_URL": "", - "idp_entityId": "", - "idp_sso_url": "", - "idp_x509cert": "", - "idp_sls_url": "", - "idp_name": "", - "sso_exp_delta_seconds": "172800", - "sso_landing": "/login?jwt=%s", - "invitation_link": "/api/users/invitation?token=%s", - "change_password_link": "/reset-password?invitation=%s&&pass=%s", - "iosBucket": "openreplay-ios-images", - "version_number": "1.3.6", - "assist_secret": "" - }, - "lambda_timeout": 150, - "lambda_memory_size": 400, - "subnet_ids": [ - ], - "security_group_ids": [ - ] - } - } -} diff --git a/ee/api/.env.default b/ee/api/.env.default new file mode 100644 index 000000000..6fff1793c --- /dev/null +++ b/ee/api/.env.default @@ -0,0 +1,53 @@ +EMAIL_FROM=OpenReplay +EMAIL_HOST= +EMAIL_PASSWORD= +EMAIL_PORT=587 +EMAIL_SSL_CERT= +EMAIL_SSL_KEY= +EMAIL_USER= +EMAIL_USE_SSL=false +EMAIL_USE_TLS=true +LICENSE_KEY= +S3_HOST= +S3_KEY= +S3_SECRET= +SAML2_MD_URL= +SITE_URL= +alert_ntf=http://127.0.0.1:8000/async/alerts/notifications/%s +announcement_url= +assign_link=http://127.0.0.1:8000/async/email_assignment +async_Token= +captcha_key= +captcha_server= +ch_host= +ch_port= +change_password_link=/reset-password?invitation=%s&&pass=%s +email_basic=http://127.0.0.1:8000/async/basic/%s +email_plans=http://127.0.0.1:8000/async/plans/%s +email_signup=http://127.0.0.1:8000/async/email_signup/%s +idp_entityId= +idp_sls_url= +idp_sso_url= +idp_x509cert= +invitation_link=/api/users/invitation?token=%s +isEE=true +isFOS=false +js_cache_bucket=sessions-assets +jwt_algorithm=HS512 +jwt_exp_delta_seconds=2592000 +jwt_issuer=openreplay-default-ee +jwt_secret="SET A RANDOM STRING HERE" +peers=http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers +pg_dbname=app +pg_host=127.0.0.1 +pg_password= +pg_port=9202 +pg_user= +put_S3_TTL=20 +sentryURL= +sessions_bucket=mobs +sessions_region=us-east-1 +sourcemaps_bucket=sourcemaps +sourcemaps_reader=http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps +stage=default-ee +version_number=1.0.0 diff --git a/ee/api/.gitignore b/ee/api/.gitignore index 41d3d640f..8afea0ab6 100644 --- a/ee/api/.gitignore +++ b/ee/api/.gitignore @@ -178,6 +178,7 @@ README/* Pipfile /chalicelib/core/alerts.py +/chalicelib/core/alerts_processor.py /chalicelib/core/announcements.py /chalicelib/blueprints/bp_app_api.py /chalicelib/blueprints/bp_core.py @@ -186,6 +187,7 @@ Pipfile /chalicelib/core/errors_favorite_viewed.py /chalicelib/core/events.py /chalicelib/core/events_ios.py +/chalicelib/core/funnels.py /chalicelib/core/integration_base.py /chalicelib/core/integration_base_issue.py /chalicelib/core/integration_github.py @@ -204,13 +206,13 @@ Pipfile /chalicelib/core/log_tool_sentry.py /chalicelib/core/log_tool_stackdriver.py /chalicelib/core/log_tool_sumologic.py +/chalicelib/core/metadata.py /chalicelib/core/mobile.py /chalicelib/core/sessions.py /chalicelib/core/sessions_assignments.py /chalicelib/core/sessions_favorite_viewed.py /chalicelib/core/sessions_metas.py /chalicelib/core/sessions_mobs.py -/chalicelib/core/sessions.py /chalicelib/core/significance.py /chalicelib/core/slack.py /chalicelib/core/socket_ios.py @@ -235,12 +237,30 @@ Pipfile /chalicelib/utils/smtp.py /chalicelib/utils/strings.py /chalicelib/utils/TimeUTC.py -/chalicelib/core/heatmaps.py +/chalicelib/blueprints/app/__init__.py +/routers/app/__init__.py +/routers/crons/__init__.py +/routers/subs/__init__.py +/routers/__init__.py +/chalicelib/core/assist.py +/auth/auth_apikey.py +/auth/auth_jwt.py +/chalicelib/blueprints/subs/bp_insights.py +/build.sh +/routers/core.py +/routers/crons/core_crons.py +/routers/subs/dashboard.py +/db_changes.sql +/Dockerfile.bundle /entrypoint.bundle.sh /entrypoint.sh -/env_handler.py +/chalicelib/core/heatmaps.py +/routers/subs/insights.py +/schemas.py /chalicelib/blueprints/app/v1_api.py -/build.sh -/chalicelib/core/assist.py -/chalicelib/blueprints/app/__init__.py -/Dockerfile.bundle +/routers/app/v1_api.py +/chalicelib/core/custom_metrics.py +/chalicelib/core/performance_event.py +/chalicelib/core/saved_search.py +/app_alerts.py +/build_alerts.sh diff --git a/ee/api/Dockerfile b/ee/api/Dockerfile index 649e1f686..284d752ff 100644 --- a/ee/api/Dockerfile +++ b/ee/api/Dockerfile @@ -1,10 +1,11 @@ -FROM python:3.6-slim +FROM python:3.9.7-slim LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" RUN apt-get update && apt-get install -y pkg-config libxmlsec1-dev gcc && rm -rf /var/lib/apt/lists/* WORKDIR /work COPY . . -RUN pip install -r requirements.txt -t ./vendor --upgrade -RUN pip install chalice==1.22.2 +RUN pip install -r requirements.txt +RUN mv .env.default .env # Add Tini # Startup daemon diff --git a/ee/api/Dockerfile.alerts b/ee/api/Dockerfile.alerts new file mode 100644 index 000000000..5809de5e6 --- /dev/null +++ b/ee/api/Dockerfile.alerts @@ -0,0 +1,19 @@ +FROM python:3.9.7-slim +LABEL Maintainer="Rajesh Rajendran" +LABEL Maintainer="KRAIEM Taha Yassine" +RUN apt-get update && apt-get install -y pkg-config libxmlsec1-dev gcc && rm -rf /var/lib/apt/lists/* +WORKDIR /work +COPY . . +RUN pip install -r requirements.txt +RUN mv .env.default .env && mv app_alerts.py app.py +ENV pg_minconn 2 + +# Add Tini +# Startup daemon +ENV TINI_VERSION v0.19.0 +ARG envarg +ENV ENTERPRISE_BUILD ${envarg} +ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini +RUN chmod +x /tini +ENTRYPOINT ["/tini", "--"] +CMD ./entrypoint.sh \ No newline at end of file diff --git a/ee/api/app.py b/ee/api/app.py index e12b64e0b..fdf7f60b8 100644 --- a/ee/api/app.py +++ b/ee/api/app.py @@ -1,129 +1,86 @@ -import sentry_sdk -from chalice import Chalice, Response -from sentry_sdk import configure_scope +import logging +import queue + +from apscheduler.schedulers.asyncio import AsyncIOScheduler +from decouple import config +from fastapi import FastAPI, Request +from fastapi.middleware.cors import CORSMiddleware +from starlette import status +from starlette.responses import StreamingResponse, JSONResponse -from chalicelib import _overrides -from chalicelib.blueprints import bp_authorizers -from chalicelib.blueprints import bp_core, bp_core_crons -from chalicelib.blueprints import bp_core_dynamic, bp_core_dynamic_crons -from chalicelib.blueprints import bp_ee, bp_ee_crons, bp_saml -from chalicelib.blueprints.app import v1_api, v1_api_ee -from chalicelib.blueprints.subs import bp_dashboard from chalicelib.utils import helper from chalicelib.utils import pg_client -from chalicelib.utils.helper import environ +from routers import core, core_dynamic, ee, saml +from routers.app import v1_api, v1_api_ee +from routers.crons import core_crons +from routers.crons import core_dynamic_crons +from routers.subs import dashboard -app = Chalice(app_name='parrot') -app.debug = not helper.is_production() or helper.is_local() - -sentry_sdk.init(environ["sentryURL"]) - -# Monkey-patch print for DataDog hack -import sys -import traceback - -old_tb = traceback.print_exception -old_f = sys.stdout -old_e = sys.stderr -OR_SESSION_TOKEN = None - - -class F: - def write(self, x): - if OR_SESSION_TOKEN is not None and x != '\n' and not helper.is_local(): - old_f.write(f"[or_session_token={OR_SESSION_TOKEN}] {x}") - else: - old_f.write(x) - - def flush(self): - pass - - -def tb_print_exception(etype, value, tb, limit=None, file=None, chain=True): - if OR_SESSION_TOKEN is not None and not helper.is_local(): - value = type(value)(f"[or_session_token={OR_SESSION_TOKEN}] " + str(value)) - - old_tb(etype, value, tb, limit, file, chain) - - -if helper.is_production(): - traceback.print_exception = tb_print_exception - -sys.stdout = F() -sys.stderr = F() -# ---End Monkey-patch - - -_overrides.chalice_app(app) +app = FastAPI() @app.middleware('http') -def or_middleware(event, get_response): +async def or_middleware(request: Request, call_next): from chalicelib.core import unlock if not unlock.is_valid(): - return Response(body={"errors": ["expired license"]}, status_code=403) - if "{projectid}" in event.path.lower(): - from chalicelib.core import projects - if event.context["authorizer"].get("authorizer_identity") == "api_key" \ - and not projects.is_authorized( - project_id=projects.get_internal_project_id(event.uri_params["projectId"]), - tenant_id=event.context["authorizer"]["tenantId"]) \ - or event.context["authorizer"].get("authorizer_identity", "jwt") == "jwt" \ - and not projects.is_authorized(project_id=event.uri_params["projectId"], - tenant_id=event.context["authorizer"]["tenantId"]): - print("unauthorized project") - pg_client.close() - return Response(body={"errors": ["unauthorized project"]}, status_code=401) - global OR_SESSION_TOKEN - OR_SESSION_TOKEN = app.current_request.headers.get('vnd.openreplay.com.sid', - app.current_request.headers.get('vnd.asayer.io.sid')) - if "authorizer" in event.context and event.context["authorizer"] is None: - print("Deleted user!!") - pg_client.close() - return Response(body={"errors": ["Deleted user"]}, status_code=403) + return JSONResponse(content={"errors": ["expired license"]}, status_code=status.HTTP_403_FORBIDDEN) + global OR_SESSION_TOKEN + OR_SESSION_TOKEN = request.headers.get('vnd.openreplay.com.sid', request.headers.get('vnd.asayer.io.sid')) try: if helper.TRACK_TIME: import time now = int(time.time() * 1000) - response = get_response(event) - if response.status_code == 200 and response.body is not None and response.body.get("errors") is not None: - if "not found" in response.body["errors"][0]: - response = Response(status_code=404, body=response.body) - else: - response = Response(status_code=400, body=response.body) - if response.status_code // 100 == 5 and helper.allow_sentry() and OR_SESSION_TOKEN is not None and not helper.is_local(): - with configure_scope() as scope: - scope.set_tag('stage', environ["stage"]) - scope.set_tag('openReplaySessionToken', OR_SESSION_TOKEN) - scope.set_extra("context", event.context) - sentry_sdk.capture_exception(Exception(response.body)) + response: StreamingResponse = await call_next(request) if helper.TRACK_TIME: print(f"Execution time: {int(time.time() * 1000) - now} ms") except Exception as e: - if helper.allow_sentry() and OR_SESSION_TOKEN is not None and not helper.is_local(): - with configure_scope() as scope: - scope.set_tag('stage', environ["stage"]) - scope.set_tag('openReplaySessionToken', OR_SESSION_TOKEN) - scope.set_extra("context", event.context) - sentry_sdk.capture_exception(e) - response = Response(body={"Code": "InternalServerError", - "Message": "An internal server error occurred [level=Fatal]."}, - status_code=500) + pg_client.close() + raise e pg_client.close() return response -# Open source -app.register_blueprint(bp_authorizers.app) -app.register_blueprint(bp_core.app) -app.register_blueprint(bp_core_crons.app) -app.register_blueprint(bp_core_dynamic.app) -app.register_blueprint(bp_core_dynamic_crons.app) -app.register_blueprint(bp_dashboard.app) -app.register_blueprint(v1_api.app) -app.register_blueprint(v1_api_ee.app) -# Enterprise -app.register_blueprint(bp_ee.app) -app.register_blueprint(bp_ee_crons.app) -app.register_blueprint(bp_saml.app) +origins = [ + "*", +] + +app.add_middleware( + CORSMiddleware, + allow_origins=origins, + allow_credentials=True, + allow_methods=["*"], + allow_headers=["*"], +) +app.include_router(core.public_app) +app.include_router(core.app) +app.include_router(core.app_apikey) +app.include_router(core_dynamic.public_app) +app.include_router(core_dynamic.app) +app.include_router(core_dynamic.app_apikey) +app.include_router(ee.public_app) +app.include_router(ee.app) +app.include_router(ee.app_apikey) +app.include_router(saml.public_app) +app.include_router(saml.app) +app.include_router(saml.app_apikey) +app.include_router(dashboard.app) +# app.include_router(insights.app) +app.include_router(v1_api.app_apikey) +app.include_router(v1_api_ee.app_apikey) + +app.queue_system = queue.Queue() +app.schedule = AsyncIOScheduler() +app.schedule.start() + +for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs: + app.schedule.add_job(id=job["func"].__name__, **job) +from chalicelib.core import traces + +app.schedule.add_job(id="trace_worker", **traces.cron_jobs[0]) + +for job in app.schedule.get_jobs(): + print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)}) + +logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO)) +logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO)) diff --git a/ee/api/auth/__init__.py b/ee/api/auth/__init__.py new file mode 100644 index 000000000..e69de29bb diff --git a/ee/api/auth/auth_project.py b/ee/api/auth/auth_project.py new file mode 100644 index 000000000..c1e1d38cd --- /dev/null +++ b/ee/api/auth/auth_project.py @@ -0,0 +1,27 @@ +from fastapi import Request +from starlette import status +from starlette.exceptions import HTTPException + +import schemas +from chalicelib.core import projects +from or_dependencies import OR_context + + +class ProjectAuthorizer: + def __init__(self, project_identifier): + self.project_identifier: str = project_identifier + + async def __call__(self, request: Request) -> None: + if len(request.path_params.keys()) == 0 or request.path_params.get(self.project_identifier) is None: + return + current_user: schemas.CurrentContext = await OR_context(request) + project_identifier = request.path_params[self.project_identifier] + user_id = current_user.user_id if request.state.authorizer_identity == "jwt" else None + if (self.project_identifier == "projectId" \ + and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id, + user_id=user_id)) \ + or (self.project_identifier.lower() == "projectKey" \ + and not projects.is_authorized(project_id=projects.get_internal_project_id(project_identifier), + tenant_id=current_user.tenant_id, user_id=user_id)): + print("unauthorized project") + raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="unauthorized project.") diff --git a/ee/api/chalicelib/_overrides.py b/ee/api/chalicelib/_overrides.py deleted file mode 100644 index 2bf0b6d2a..000000000 --- a/ee/api/chalicelib/_overrides.py +++ /dev/null @@ -1,104 +0,0 @@ -from chalice import Chalice, CORSConfig -from chalicelib.blueprints import bp_authorizers -from chalicelib.core import authorizers - -import sched -import threading -import time -from datetime import datetime -import pytz -from croniter import croniter - -base_time = datetime.now(pytz.utc) - -cors_config = CORSConfig( - allow_origin='*', - allow_headers=['vnd.openreplay.com.sid', 'vnd.asayer.io.sid'], - # max_age=600, - # expose_headers=['X-Special-Header'], - allow_credentials=True -) - - -def chalice_app(app): - def app_route(self, path, **kwargs): - kwargs.setdefault('cors', cors_config) - kwargs.setdefault('authorizer', bp_authorizers.jwt_authorizer) - handler_type = 'route' - name = kwargs.pop('name', None) - registration_kwargs = {'path': path, 'kwargs': kwargs, 'authorizer': kwargs.get("authorizer")} - - def _register_handler(user_handler): - handler_name = name - if handler_name is None: - handler_name = user_handler.__name__ - if registration_kwargs is not None: - kwargs = registration_kwargs - else: - kwargs = {} - - if kwargs['authorizer'] == bp_authorizers.jwt_authorizer \ - or kwargs['authorizer'] == bp_authorizers.api_key_authorizer: - def _user_handler(context=None, **args): - if context is not None: - args['context'] = context - else: - authorizer_context = app.current_request.context['authorizer'] - if kwargs['authorizer'] == bp_authorizers.jwt_authorizer: - args['context'] = authorizers.jwt_context(authorizer_context) - else: - args['context'] = authorizer_context - return user_handler(**args) - - wrapped = self._wrap_handler(handler_type, handler_name, _user_handler) - self._register_handler(handler_type, handler_name, _user_handler, wrapped, kwargs) - else: - wrapped = self._wrap_handler(handler_type, handler_name, user_handler) - self._register_handler(handler_type, handler_name, user_handler, wrapped, kwargs) - return wrapped - - return _register_handler - - app.route = app_route.__get__(app, Chalice) - - def app_schedule(self, expression, name=None, description=''): - handler_type = 'schedule' - registration_kwargs = {'expression': expression, - 'description': description} - - def _register_handler(user_handler): - handler_name = name - if handler_name is None: - handler_name = user_handler.__name__ - kwargs = registration_kwargs - cron_expression = kwargs["expression"].to_string()[len("cron("):-1] - if len(cron_expression.split(" ")) > 5: - cron_expression = " ".join(cron_expression.split(" ")[:-1]) - cron_expression = cron_expression.replace("?", "*") - cron_shell(user_handler, cron_expression) - - wrapped = self._wrap_handler(handler_type, handler_name, user_handler) - self._register_handler(handler_type, handler_name, user_handler, wrapped, kwargs) - return wrapped - - return _register_handler - - app.schedule = app_schedule.__get__(app, Chalice) - - def spawn(function, args): - th = threading.Thread(target=function, kwargs=args) - th.setDaemon(True) - th.start() - - def cron_shell(function, cron_expression): - def to_start(): - scheduler = sched.scheduler(time.time, time.sleep) - citer = croniter(cron_expression, base_time) - while True: - next_execution = citer.get_next(datetime) - print(f"{function.__name__} next execution: {next_execution}") - scheduler.enterabs(next_execution.timestamp(), 1, function, argument=(None,)) - scheduler.run() - print(f"{function.__name__} executed: {next_execution}") - - spawn(to_start, None) diff --git a/ee/api/chalicelib/blueprints/app/v1_api_ee.py b/ee/api/chalicelib/blueprints/app/v1_api_ee.py deleted file mode 100644 index 5682bf5b2..000000000 --- a/ee/api/chalicelib/blueprints/app/v1_api_ee.py +++ /dev/null @@ -1,16 +0,0 @@ -from chalice import Blueprint - -from chalicelib import _overrides -from chalicelib.blueprints import bp_authorizers -from chalicelib.utils import assist_helper - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/v1/assist/credentials', methods=['GET'], authorizer=bp_authorizers.api_key_authorizer) -def get_assist_credentials(context): - credentials = assist_helper.get_temporary_credentials() - if "errors" in credentials: - return credentials - return {"data": credentials} diff --git a/ee/api/chalicelib/blueprints/bp_authorizers.py b/ee/api/chalicelib/blueprints/bp_authorizers.py deleted file mode 100644 index 14abd3988..000000000 --- a/ee/api/chalicelib/blueprints/bp_authorizers.py +++ /dev/null @@ -1,38 +0,0 @@ -from chalice import Blueprint, AuthResponse -from chalicelib.utils import helper -from chalicelib.core import authorizers - -from chalicelib.core import users - -app = Blueprint(__name__) - - -@app.authorizer() -def api_key_authorizer(auth_request): - r = authorizers.api_key_authorizer(auth_request.token) - if r is None: - return AuthResponse(routes=[], principal_id=None) - r["authorizer_identity"] = "api_key" - print(r) - return AuthResponse( - routes=['*'], - principal_id=r['tenantId'], - context=r - ) - - -@app.authorizer(ttl_seconds=60) -def jwt_authorizer(auth_request): - jwt_payload = authorizers.jwt_authorizer(auth_request.token) - if jwt_payload is None \ - or jwt_payload.get("iat") is None or jwt_payload.get("aud") is None \ - or not users.auth_exists(user_id=jwt_payload["userId"], tenant_id=jwt_payload["tenantId"], - jwt_iat=jwt_payload["iat"], jwt_aud=jwt_payload["aud"]): - return AuthResponse(routes=[], principal_id=None) - jwt_payload["authorizer_identity"] = "jwt" - print(jwt_payload) - return AuthResponse( - routes=['*'], - principal_id=jwt_payload['userId'], - context=jwt_payload - ) diff --git a/ee/api/chalicelib/blueprints/bp_core_dynamic.py b/ee/api/chalicelib/blueprints/bp_core_dynamic.py deleted file mode 100644 index 6be1380f5..000000000 --- a/ee/api/chalicelib/blueprints/bp_core_dynamic.py +++ /dev/null @@ -1,470 +0,0 @@ -from chalice import Blueprint, Response - -from chalicelib import _overrides -from chalicelib.core import assist -from chalicelib.core import boarding -from chalicelib.core import errors -from chalicelib.core import license -from chalicelib.core import metadata, errors_favorite_viewed, slack, alerts, sessions, integrations_manager -from chalicelib.core import notifications -from chalicelib.core import projects -from chalicelib.core import signup -from chalicelib.core import tenants -from chalicelib.core import users -from chalicelib.core import webhook -from chalicelib.core.collaboration_slack import Slack -from chalicelib.utils import captcha, SAML2_helper -from chalicelib.utils import helper -from chalicelib.utils.helper import environ - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/login', methods=['POST'], authorizer=None) -def login(): - data = app.current_request.json_body - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): - return {"errors": ["Invalid captcha."]} - r = users.authenticate(data['email'], data['password'], for_plugin=False) - if r is None: - return Response(status_code=401, body={ - 'errors': ['You’ve entered invalid Email or Password.'] - }) - elif "errors" in r: - return r - - tenant_id = r.pop("tenantId") - # change this in open-source - r = {**r, - "limits": { - "teamMember": int(environ.get("numberOfSeats", 0)), - "projects": -1, - "metadata": metadata.get_remaining_metadata_with_count(tenant_id)}, - **license.get_status(tenant_id), - "smtp": environ["EMAIL_HOST"] is not None and len(environ["EMAIL_HOST"]) > 0, - "saml2": SAML2_helper.is_saml2_available(), - "iceServers": assist.get_ice_servers() - } - c = tenants.get_by_tenant_id(tenant_id) - c.pop("createdAt") - c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True, version=True) - return { - 'jwt': r.pop('jwt'), - 'data': { - "user": r, - "client": c - } - } - - -@app.route('/account', methods=['GET']) -def get_account(context): - r = users.get(tenant_id=context['tenantId'], user_id=context['userId']) - return { - 'data': { - **r, - "limits": { - "teamMember": int(environ.get("numberOfSeats", 0)), - "projects": -1, - "metadata": metadata.get_remaining_metadata_with_count(context['tenantId']) - }, - **license.get_status(context["tenantId"]), - "smtp": environ["EMAIL_HOST"] is not None and len(environ["EMAIL_HOST"]) > 0, - "saml2": SAML2_helper.is_saml2_available(), - "iceServers": assist.get_ice_servers() - } - } - - -@app.route('/projects', methods=['GET']) -def get_projects(context): - return {"data": projects.get_projects(tenant_id=context["tenantId"], recording_state=True, gdpr=True, recorded=True, - stack_integrations=True, version=True)} - - -@app.route('/projects', methods=['POST', 'PUT']) -def create_project(context): - data = app.current_request.json_body - return projects.create(tenant_id=context["tenantId"], user_id=context["userId"], data=data) - - -@app.route('/projects/{projectId}', methods=['POST', 'PUT']) -def create_edit_project(projectId, context): - data = app.current_request.json_body - - return projects.edit(tenant_id=context["tenantId"], user_id=context["userId"], data=data, project_id=projectId) - - -@app.route('/projects/{projectId}', methods=['GET']) -def get_project(projectId, context): - data = projects.get_project(tenant_id=context["tenantId"], project_id=projectId, include_last_session=True, - include_gdpr=True) - if data is None: - return {"errors": ["project not found"]} - return {"data": data} - - -@app.route('/projects/{projectId}', methods=['DELETE']) -def delete_project(projectId, context): - return projects.delete(tenant_id=context["tenantId"], user_id=context["userId"], project_id=projectId) - - -@app.route('/projects/limit', methods=['GET']) -def get_projects_limit(context): - return {"data": { - "current": projects.count_by_tenant(tenant_id=context["tenantId"]), - "remaining": -1 # change this in open-source - }} - - -@app.route('/client', methods=['GET']) -def get_client(context): - r = tenants.get_by_tenant_id(context['tenantId']) - if r is not None: - r.pop("createdAt") - r["projects"] = projects.get_projects(tenant_id=context['tenantId'], recording_state=True, recorded=True, - stack_integrations=True, version=True) - return { - 'data': r - } - - -@app.route('/client/new_api_key', methods=['GET']) -def generate_new_tenant_token(context): - return { - 'data': tenants.generate_new_api_key(context['tenantId']) - } - - -@app.route('/client', methods=['PUT', 'POST']) -def put_client(context): - data = app.current_request.json_body - return tenants.update(tenant_id=context["tenantId"], user_id=context["userId"], data=data) - - -@app.route('/signup', methods=['GET'], authorizer=None) -def get_all_signup(): - return {"data": {"tenants": tenants.tenants_exists(), - "sso": SAML2_helper.is_saml2_available(), - "ssoProvider": SAML2_helper.get_saml2_provider(), - "edition": helper.get_edition()}} - - -@app.route('/signup', methods=['POST', 'PUT'], authorizer=None) -def signup_handler(): - data = app.current_request.json_body - return signup.create_step1(data) - - -@app.route('/integrations/slack', methods=['POST', 'PUT']) -def add_slack_client(context): - data = app.current_request.json_body - if "url" not in data or "name" not in data: - return {"errors": ["please provide a url and a name"]} - n = Slack.add_channel(tenant_id=context["tenantId"], url=data["url"], name=data["name"]) - if n is None: - return { - "errors": ["We couldn't send you a test message on your Slack channel. Please verify your webhook url."] - } - return {"data": n} - - -@app.route('/integrations/slack/{integrationId}', methods=['POST', 'PUT']) -def edit_slack_integration(integrationId, context): - data = app.current_request.json_body - if data.get("url") and len(data["url"]) > 0: - old = webhook.get(tenant_id=context["tenantId"], webhook_id=integrationId) - if old["endpoint"] != data["url"]: - if not Slack.say_hello(data["url"]): - return { - "errors": [ - "We couldn't send you a test message on your Slack channel. Please verify your webhook url."] - } - return {"data": webhook.update(tenant_id=context["tenantId"], webhook_id=integrationId, - changes={"name": data.get("name", ""), "endpoint": data["url"]})} - - -@app.route('/{projectId}/errors/search', methods=['POST']) -def errors_search(projectId, context): - data = app.current_request.json_body - params = app.current_request.query_params - if params is None: - params = {} - - return errors.search(data, projectId, user_id=context["userId"], status=params.get("status", "ALL"), - favorite_only="favorite" in params) - - -@app.route('/{projectId}/errors/stats', methods=['GET']) -def errors_stats(projectId, context): - params = app.current_request.query_params - if params is None: - params = {} - - return errors.stats(projectId, user_id=context["userId"], **params) - - -@app.route('/{projectId}/errors/{errorId}', methods=['GET']) -def errors_get_details(projectId, errorId, context): - params = app.current_request.query_params - if params is None: - params = {} - - data = errors.get_details(project_id=projectId, user_id=context["userId"], error_id=errorId, **params) - if data.get("data") is not None: - errors_favorite_viewed.viewed_error(project_id=projectId, user_id=context['userId'], error_id=errorId) - return data - - -@app.route('/{projectId}/errors/{errorId}/stats', methods=['GET']) -def errors_get_details_right_column(projectId, errorId, context): - params = app.current_request.query_params - if params is None: - params = {} - - data = errors.get_details_chart(project_id=projectId, user_id=context["userId"], error_id=errorId, **params) - return data - - -@app.route('/{projectId}/errors/{errorId}/sourcemaps', methods=['GET']) -def errors_get_details_sourcemaps(projectId, errorId, context): - data = errors.get_trace(project_id=projectId, error_id=errorId) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/async/alerts/notifications/{step}', methods=['POST', 'PUT'], authorizer=None) -def send_alerts_notification_async(step): - data = app.current_request.json_body - if data.pop("auth") != environ["async_Token"]: - return {"errors": ["missing auth"]} - if step == "slack": - slack.send_batch(notifications_list=data.get("notifications")) - elif step == "email": - alerts.send_by_email_batch(notifications_list=data.get("notifications")) - elif step == "webhook": - webhook.trigger_batch(data_list=data.get("notifications")) - - -@app.route('/notifications', methods=['GET']) -def get_notifications(context): - return {"data": notifications.get_all(tenant_id=context['tenantId'], user_id=context['userId'])} - - -@app.route('/notifications/{notificationId}/view', methods=['GET']) -def view_notifications(notificationId, context): - return {"data": notifications.view_notification(notification_ids=[notificationId], user_id=context['userId'])} - - -@app.route('/notifications/view', methods=['POST', 'PUT']) -def batch_view_notifications(context): - data = app.current_request.json_body - return {"data": notifications.view_notification(notification_ids=data.get("ids", []), - startTimestamp=data.get("startTimestamp"), - endTimestamp=data.get("endTimestamp"), - user_id=context['userId'], - tenant_id=context["tenantId"])} - - -@app.route('/notifications', methods=['POST', 'PUT'], authorizer=None) -def create_notifications(): - data = app.current_request.json_body - if data.get("token", "") != "nF46JdQqAM5v9KI9lPMpcu8o9xiJGvNNWOGL7TJP": - return {"errors": ["missing token"]} - return notifications.create(data.get("notifications", [])) - - -@app.route('/boarding', methods=['GET']) -def get_boarding_state(context): - return {"data": boarding.get_state(tenant_id=context["tenantId"])} - - -@app.route('/boarding/installing', methods=['GET']) -def get_boarding_state_installing(context): - return {"data": boarding.get_state_installing(tenant_id=context["tenantId"])} - - -@app.route('/boarding/identify-users', methods=['GET']) -def get_boarding_state_identify_users(context): - return {"data": boarding.get_state_identify_users(tenant_id=context["tenantId"])} - - -@app.route('/boarding/manage-users', methods=['GET']) -def get_boarding_state_manage_users(context): - return {"data": boarding.get_state_manage_users(tenant_id=context["tenantId"])} - - -@app.route('/boarding/integrations', methods=['GET']) -def get_boarding_state_integrations(context): - return {"data": boarding.get_state_integrations(tenant_id=context["tenantId"])} - - -# this endpoint supports both jira & github based on `provider` attribute -@app.route('/integrations/issues', methods=['POST', 'PUT']) -def add_edit_jira_cloud_github(context): - data = app.current_request.json_body - provider = data.get("provider", "").upper() - error, integration = integrations_manager.get_integration(tool=provider, tenant_id=context["tenantId"], - user_id=context["userId"]) - if error is not None: - return error - return {"data": integration.add_edit(data=data)} - - -@app.route('/integrations/slack/{integrationId}', methods=['GET']) -def get_slack_webhook(integrationId, context): - return {"data": webhook.get(tenant_id=context["tenantId"], webhook_id=integrationId)} - - -@app.route('/integrations/slack/channels', methods=['GET']) -def get_slack_integration(context): - return {"data": webhook.get_by_type(tenant_id=context["tenantId"], webhook_type='slack')} - - -@app.route('/integrations/slack/{integrationId}', methods=['DELETE']) -def delete_slack_integration(integrationId, context): - return webhook.delete(context["tenantId"], integrationId) - - -@app.route('/webhooks', methods=['POST', 'PUT']) -def add_edit_webhook(context): - data = app.current_request.json_body - return {"data": webhook.add_edit(tenant_id=context["tenantId"], data=data, replace_none=True)} - - -@app.route('/webhooks', methods=['GET']) -def get_webhooks(context): - return {"data": webhook.get_by_tenant(tenant_id=context["tenantId"], replace_none=True)} - - -@app.route('/webhooks/{webhookId}', methods=['DELETE']) -def delete_webhook(webhookId, context): - return {"data": webhook.delete(tenant_id=context["tenantId"], webhook_id=webhookId)} - - -@app.route('/client/members', methods=['GET']) -def get_members(context): - return {"data": users.get_members(tenant_id=context['tenantId'])} - - -@app.route('/client/members', methods=['PUT', 'POST']) -def add_member(context): - # if SAML2_helper.is_saml2_available(): - # return {"errors": ["please use your SSO server to add teammates"]} - data = app.current_request.json_body - return users.create_member(tenant_id=context['tenantId'], user_id=context['userId'], data=data) - - -@app.route('/users/invitation', methods=['GET'], authorizer=None) -def process_invitation_link(): - params = app.current_request.query_params - if params is None or len(params.get("token", "")) < 64: - return {"errors": ["please provide a valid invitation"]} - user = users.get_by_invitation_token(params["token"]) - if user is None: - return {"errors": ["invitation not found"]} - if user["expiredInvitation"]: - return {"errors": ["expired invitation, please ask your admin to send a new one"]} - if user["expiredChange"] is not None and not user["expiredChange"] \ - and user["changePwdToken"] is not None and user["changePwdAge"] < -5 * 60: - pass_token = user["changePwdToken"] - else: - pass_token = users.allow_password_change(user_id=user["userId"]) - return Response( - status_code=307, - body='', - headers={'Location': environ["SITE_URL"] + environ["change_password_link"] % (params["token"], pass_token), - 'Content-Type': 'text/plain'}) - - -@app.route('/password/reset', methods=['POST', 'PUT'], authorizer=None) -def change_password_by_invitation(): - data = app.current_request.json_body - if data is None or len(data.get("invitation", "")) < 64 or len(data.get("pass", "")) < 8: - return {"errors": ["please provide a valid invitation & pass"]} - user = users.get_by_invitation_token(token=data["invitation"], pass_token=data["pass"]) - if user is None: - return {"errors": ["invitation not found"]} - if user["expiredChange"]: - return {"errors": ["expired change, please re-use the invitation link"]} - - return users.set_password_invitation(new_password=data["password"], user_id=user["userId"], - tenant_id=user["tenantId"]) - - -@app.route('/client/members/{memberId}', methods=['PUT', 'POST']) -def edit_member(memberId, context): - data = app.current_request.json_body - return users.edit(tenant_id=context['tenantId'], editor_id=context['userId'], changes=data, - user_id_to_update=memberId) - - -@app.route('/client/members/{memberId}/reset', methods=['GET']) -def reset_reinvite_member(memberId, context): - return users.reset_member(tenant_id=context['tenantId'], editor_id=context['userId'], user_id_to_update=memberId) - - -@app.route('/client/members/{memberId}', methods=['DELETE']) -def delete_member(memberId, context): - return users.delete_member(tenant_id=context["tenantId"], user_id=context['userId'], id_to_delete=memberId) - - -@app.route('/account/new_api_key', methods=['GET']) -def generate_new_user_token(context): - return {"data": users.generate_new_api_key(user_id=context['userId'])} - - -@app.route('/account', methods=['POST', 'PUT']) -def edit_account(context): - data = app.current_request.json_body - return users.edit(tenant_id=context['tenantId'], user_id_to_update=context['userId'], changes=data, - editor_id=context['userId']) - - -@app.route('/account/password', methods=['PUT', 'POST']) -def change_client_password(context): - data = app.current_request.json_body - return users.change_password(email=context['email'], old_password=data["oldPassword"], - new_password=data["newPassword"], tenant_id=context["tenantId"], - user_id=context["userId"]) - - -@app.route('/metadata/session_search', methods=['GET']) -def search_sessions_by_metadata(context): - params = app.current_request.query_params - if params is None: - return {"errors": ["please provide a key&value for search"]} - value = params.get('value', '') - key = params.get('key', '') - project_id = params.get('projectId') - if project_id is not None \ - and not projects.is_authorized(project_id=project_id, tenant_id=context["tenantId"]): - return {"errors": ["unauthorized project"]} - if len(value) == 0 and len(key) == 0: - return {"errors": ["please provide a key&value for search"]} - if len(value) == 0: - return {"errors": ["please provide a value for search"]} - if len(key) == 0: - return {"errors": ["please provide a key for search"]} - return { - "data": sessions.search_by_metadata(tenant_id=context["tenantId"], user_id=context["userId"], m_value=value, - m_key=key, - project_id=project_id)} - - -@app.route('/plans', methods=['GET']) -def get_current_plan(context): - return { - "data": license.get_status(context["tenantId"]) - } - - -@app.route('/alerts/notifications', methods=['POST', 'PUT'], authorizer=None) -def send_alerts_notifications(): - data = app.current_request.json_body - return {"data": alerts.process_notifications(data.get("notifications", []))} diff --git a/ee/api/chalicelib/blueprints/bp_core_dynamic_crons.py b/ee/api/chalicelib/blueprints/bp_core_dynamic_crons.py deleted file mode 100644 index b149c8807..000000000 --- a/ee/api/chalicelib/blueprints/bp_core_dynamic_crons.py +++ /dev/null @@ -1,21 +0,0 @@ -from chalice import Blueprint, Cron -from chalicelib import _overrides -from chalicelib.utils import helper - -app = Blueprint(__name__) -_overrides.chalice_app(app) -from chalicelib.core import telemetry -from chalicelib.core import unlock - - -# Run every day. -@app.schedule(Cron('0', '0', '?', '*', '*', '*')) -def telemetry_cron(event): - telemetry.compute() - - -@app.schedule(Cron('0/60', '*', '*', '*', '?', '*')) -def unlock_cron(event): - print("validating license") - unlock.check() - print(f"valid: {unlock.is_valid()}") diff --git a/ee/api/chalicelib/blueprints/bp_ee.py b/ee/api/chalicelib/blueprints/bp_ee.py deleted file mode 100644 index c71668e36..000000000 --- a/ee/api/chalicelib/blueprints/bp_ee.py +++ /dev/null @@ -1,58 +0,0 @@ -from chalice import Blueprint - -from chalicelib import _overrides -from chalicelib.core import roles -from chalicelib.core import unlock -from chalicelib.utils import assist_helper - -app = Blueprint(__name__) -_overrides.chalice_app(app) - -unlock.check() - - -@app.route('/client/roles', methods=['GET']) -def get_roles(context): - return { - 'data': roles.get_roles(tenant_id=context["tenantId"]) - } - - -@app.route('/client/roles', methods=['POST', 'PUT']) -def add_role(context): - data = app.current_request.json_body - data = roles.create(tenant_id=context['tenantId'], user_id=context['userId'], name=data["name"], - description=data.get("description"), permissions=data["permissions"]) - if "errors" in data: - return data - - return { - 'data': data - } - - -@app.route('/client/roles/{roleId}', methods=['POST', 'PUT']) -def edit_role(roleId, context): - data = app.current_request.json_body - data = roles.update(tenant_id=context['tenantId'], user_id=context['userId'], role_id=roleId, changes=data) - if "errors" in data: - return data - - return { - 'data': data - } - - -@app.route('/client/roles/{roleId}', methods=['DELETE']) -def delete_role(roleId, context): - data = roles.delete(tenant_id=context['tenantId'], user_id=context["userId"], role_id=roleId) - if "errors" in data: - return data - return { - 'data': data - } - - -@app.route('/assist/credentials', methods=['GET']) -def get_assist_credentials(context): - return {"data": assist_helper.get_full_config()} diff --git a/ee/api/chalicelib/blueprints/bp_ee_crons.py b/ee/api/chalicelib/blueprints/bp_ee_crons.py deleted file mode 100644 index 3333fbb20..000000000 --- a/ee/api/chalicelib/blueprints/bp_ee_crons.py +++ /dev/null @@ -1,6 +0,0 @@ -from chalice import Blueprint -from chalice import Cron -from chalicelib import _overrides - -app = Blueprint(__name__) -_overrides.chalice_app(app) \ No newline at end of file diff --git a/ee/api/chalicelib/blueprints/subs/bp_dashboard.py b/ee/api/chalicelib/blueprints/subs/bp_dashboard.py deleted file mode 100644 index b868f7c64..000000000 --- a/ee/api/chalicelib/blueprints/subs/bp_dashboard.py +++ /dev/null @@ -1,606 +0,0 @@ -from chalice import Blueprint -from chalicelib.utils import helper -from chalicelib import _overrides - -from chalicelib.core import dashboard - -from chalicelib.core import metadata - -app = Blueprint(__name__) -_overrides.chalice_app(app) - - -@app.route('/{projectId}/dashboard/metadata', methods=['GET']) -def get_metadata_map(projectId, context): - metamap = [] - for m in metadata.get(project_id=projectId): - metamap.append({"name": m["key"], "key": f"metadata{m['index']}"}) - return {"data": metamap} - - -@app.route('/{projectId}/dashboard/sessions', methods=['GET', 'POST']) -def get_dashboard_processed_sessions(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_processed_sessions(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/errors', methods=['GET', 'POST']) -def get_dashboard_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/errors_trend', methods=['GET', 'POST']) -def get_dashboard_errors_trend(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_trend(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/application_activity', methods=['GET', 'POST']) -def get_dashboard_application_activity(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_application_activity(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/page_metrics', methods=['GET', 'POST']) -def get_dashboard_page_metrics(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_page_metrics(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/user_activity', methods=['GET', 'POST']) -def get_dashboard_user_activity(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_user_activity(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/performance', methods=['GET', 'POST']) -def get_dashboard_performance(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_performance(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/slowest_images', methods=['GET', 'POST']) -def get_dashboard_slowest_images(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_images(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/missing_resources', methods=['GET', 'POST']) -def get_performance_sessions(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_missing_resources_trend(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/network', methods=['GET', 'POST']) -def get_network_widget(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_network(project_id=projectId, **{**data, **args})} - - -@app.route('/{projectId}/dashboard/{widget}/search', methods=['GET']) -def get_dashboard_autocomplete(projectId, widget, context): - params = app.current_request.query_params - if params is None: - return {"data": []} - - if widget in ['performance']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=True) - elif widget in ['pages', 'pages_dom_buildtime', 'top_metrics', 'time_to_render', - 'impacted_sessions_by_slow_pages', 'pages_response_time']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), pages_only=True) - elif widget in ['resources_loading_time']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=False) - elif widget in ['time_between_events', 'events']: - data = dashboard.search(params.get('q', ''), params.get('type', ''), project_id=projectId, - platform=params.get('platform', None), performance=False, events_only=True) - elif widget in ['metadata']: - data = dashboard.search(params.get('q', ''), None, project_id=projectId, - platform=params.get('platform', None), metadata=True, key=params.get("key")) - else: - return {"errors": [f"unsupported widget: {widget}"]} - return {'data': data} - - -# 1 -@app.route('/{projectId}/dashboard/slowest_resources', methods=['GET', 'POST']) -def get_dashboard_slowest_resources(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_resources(project_id=projectId, **{**data, **args})} - - -# 2 -@app.route('/{projectId}/dashboard/resources_loading_time', methods=['GET', 'POST']) -def get_dashboard_resources(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_loading_time(project_id=projectId, **{**data, **args})} - - -# 3 -@app.route('/{projectId}/dashboard/pages_dom_buildtime', methods=['GET', 'POST']) -def get_dashboard_pages_dom(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_dom_build_time(project_id=projectId, **{**data, **args})} - - -# 4 -@app.route('/{projectId}/dashboard/busiest_time_of_day', methods=['GET', 'POST']) -def get_dashboard_busiest_time_of_day(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_busiest_time_of_day(project_id=projectId, **{**data, **args})} - - -# 5 -@app.route('/{projectId}/dashboard/sessions_location', methods=['GET', 'POST']) -def get_dashboard_sessions_location(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_sessions_location(project_id=projectId, **{**data, **args})} - - -# 6 -@app.route('/{projectId}/dashboard/speed_location', methods=['GET', 'POST']) -def get_dashboard_speed_location(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_speed_index_location(project_id=projectId, **{**data, **args})} - - -# 7 -@app.route('/{projectId}/dashboard/pages_response_time', methods=['GET', 'POST']) -def get_dashboard_pages_response_time(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_response_time(project_id=projectId, **{**data, **args})} - - -# 8 -@app.route('/{projectId}/dashboard/pages_response_time_distribution', methods=['GET', 'POST']) -def get_dashboard_pages_response_time_distribution(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_pages_response_time_distribution(project_id=projectId, **{**data, **args})} - - -# 9 -@app.route('/{projectId}/dashboard/top_metrics', methods=['GET', 'POST']) -def get_dashboard_top_metrics(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_top_metrics(project_id=projectId, **{**data, **args})} - - -# 10 -@app.route('/{projectId}/dashboard/time_to_render', methods=['GET', 'POST']) -def get_dashboard_time_to_render(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_time_to_render(project_id=projectId, **{**data, **args})} - - -# 11 -@app.route('/{projectId}/dashboard/impacted_sessions_by_slow_pages', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_slow_pages(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_impacted_sessions_by_slow_pages(project_id=projectId, **{**data, **args})} - - -# 12 -@app.route('/{projectId}/dashboard/memory_consumption', methods=['GET', 'POST']) -def get_dashboard_memory_consumption(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_memory_consumption(project_id=projectId, **{**data, **args})} - - -# 12.1 -@app.route('/{projectId}/dashboard/fps', methods=['GET', 'POST']) -def get_dashboard_avg_fps(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_avg_fps(project_id=projectId, **{**data, **args})} - - -# 12.2 -@app.route('/{projectId}/dashboard/cpu', methods=['GET', 'POST']) -def get_dashboard_avg_cpu(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_avg_cpu(project_id=projectId, **{**data, **args})} - - -# 13 -@app.route('/{projectId}/dashboard/crashes', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_slow_pages(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_crashes(project_id=projectId, **{**data, **args})} - - -# 14 -@app.route('/{projectId}/dashboard/domains_errors', methods=['GET', 'POST']) -def get_dashboard_domains_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors(project_id=projectId, **{**data, **args})} - - -# 14.1 -@app.route('/{projectId}/dashboard/domains_errors_4xx', methods=['GET', 'POST']) -def get_dashboard_domains_errors_4xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors_4xx(project_id=projectId, **{**data, **args})} - - -# 14.2 -@app.route('/{projectId}/dashboard/domains_errors_5xx', methods=['GET', 'POST']) -def get_dashboard_domains_errors_5xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_domains_errors_5xx(project_id=projectId, **{**data, **args})} - - -# 15 -@app.route('/{projectId}/dashboard/slowest_domains', methods=['GET', 'POST']) -def get_dashboard_slowest_domains(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_slowest_domains(project_id=projectId, **{**data, **args})} - - -# 16 -@app.route('/{projectId}/dashboard/errors_per_domains', methods=['GET', 'POST']) -def get_dashboard_errors_per_domains(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_per_domains(project_id=projectId, **{**data, **args})} - - -# 17 -@app.route('/{projectId}/dashboard/sessions_per_browser', methods=['GET', 'POST']) -def get_dashboard_sessions_per_browser(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_sessions_per_browser(project_id=projectId, **{**data, **args})} - - -# 18 -@app.route('/{projectId}/dashboard/calls_errors', methods=['GET', 'POST']) -def get_dashboard_calls_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors(project_id=projectId, **{**data, **args})} - - -# 18.1 -@app.route('/{projectId}/dashboard/calls_errors_4xx', methods=['GET', 'POST']) -def get_dashboard_calls_errors_4xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors_4xx(project_id=projectId, **{**data, **args})} - - -# 18.2 -@app.route('/{projectId}/dashboard/calls_errors_5xx', methods=['GET', 'POST']) -def get_dashboard_calls_errors_5xx(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_calls_errors_5xx(project_id=projectId, **{**data, **args})} - - -# 19 -@app.route('/{projectId}/dashboard/errors_per_type', methods=['GET', 'POST']) -def get_dashboard_errors_per_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_errors_per_type(project_id=projectId, **{**data, **args})} - - -# 20 -@app.route('/{projectId}/dashboard/resources_by_party', methods=['GET', 'POST']) -def get_dashboard_resources_by_party(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_by_party(project_id=projectId, **{**data, **args})} - - -# 21 -@app.route('/{projectId}/dashboard/resource_type_vs_response_end', methods=['GET', 'POST']) -def get_dashboard_errors_per_resource_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.resource_type_vs_response_end(project_id=projectId, **{**data, **args})} - - -# 22 -@app.route('/{projectId}/dashboard/resources_vs_visually_complete', methods=['GET', 'POST']) -def get_dashboard_resources_vs_visually_complete(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_vs_visually_complete(project_id=projectId, **{**data, **args})} - - -# 23 -@app.route('/{projectId}/dashboard/impacted_sessions_by_js_errors', methods=['GET', 'POST']) -def get_dashboard_impacted_sessions_by_js_errors(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_impacted_sessions_by_js_errors(project_id=projectId, **{**data, **args})} - - -# 24 -@app.route('/{projectId}/dashboard/resources_count_by_type', methods=['GET', 'POST']) -def get_dashboard_resources_count_by_type(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": dashboard.get_resources_count_by_type(project_id=projectId, **{**data, **args})} - - -# 25 -@app.route('/{projectId}/dashboard/time_between_events', methods=['GET']) -def get_dashboard_resources_count_by_type(projectId, context): - return {"errors": ["please choose 2 events"]} - - -@app.route('/{projectId}/dashboard/overview', methods=['GET', 'POST']) -def get_dashboard_group(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": [ - *helper.explode_widget(key="count_sessions", - data=dashboard.get_processed_sessions(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data={**dashboard.get_application_activity(project_id=projectId, **{**data, **args}), - "chart": dashboard.get_performance(project_id=projectId, **{**data, **args}) - .get("chart", [])}), - *helper.explode_widget(data=dashboard.get_page_metrics(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_user_activity(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_pages_dom_build_time(project_id=projectId, **{**data, **args}), - key="avg_pages_dom_buildtime"), - *helper.explode_widget(data=dashboard.get_pages_response_time(project_id=projectId, **{**data, **args}), - key="avg_pages_response_time"), - *helper.explode_widget(dashboard.get_top_metrics(project_id=projectId, **{**data, **args})), - *helper.explode_widget(data=dashboard.get_time_to_render(project_id=projectId, **{**data, **args}), - key="avg_time_to_render"), - *helper.explode_widget(dashboard.get_memory_consumption(project_id=projectId, **{**data, **args})), - *helper.explode_widget(dashboard.get_avg_cpu(project_id=projectId, **{**data, **args})), - *helper.explode_widget(dashboard.get_avg_fps(project_id=projectId, **{**data, **args})), - ]} - - -@app.route('/{projectId}/dashboard/errors_crashes', methods=['GET', 'POST']) -def get_dashboard_group(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": [ - {"key": "errors", - "data": dashboard.get_errors(project_id=projectId, **{**data, **args})}, - {"key": "errors_trend", - "data": dashboard.get_errors_trend(project_id=projectId, **{**data, **args})}, - {"key": "crashes", - "data": dashboard.get_crashes(project_id=projectId, **{**data, **args})}, - {"key": "domains_errors", - "data": dashboard.get_domains_errors(project_id=projectId, **{**data, **args})}, - {"key": "errors_per_domains", - "data": dashboard.get_errors_per_domains(project_id=projectId, **{**data, **args})}, - {"key": "calls_errors", - "data": dashboard.get_calls_errors(project_id=projectId, **{**data, **args})}, - {"key": "errors_per_type", - "data": dashboard.get_errors_per_type(project_id=projectId, **{**data, **args})}, - {"key": "impacted_sessions_by_js_errors", - "data": dashboard.get_impacted_sessions_by_js_errors(project_id=projectId, **{**data, **args})} - ]} - - -@app.route('/{projectId}/dashboard/resources', methods=['GET', 'POST']) -def get_dashboard_group(projectId, context): - data = app.current_request.json_body - if data is None: - data = {} - params = app.current_request.query_params - args = dashboard.dashboard_args(params) - - return {"data": [ - {"key": "slowest_images", - "data": dashboard.get_slowest_images(project_id=projectId, **{**data, **args})}, - {"key": "missing_resources", - "data": dashboard.get_missing_resources_trend(project_id=projectId, **{**data, **args})}, - {"key": "slowest_resources", - "data": dashboard.get_slowest_resources(project_id=projectId, type='all', **{**data, **args})}, - {"key": "resources_loading_time", - "data": dashboard.get_resources_loading_time(project_id=projectId, **{**data, **args})}, - {"key": "resources_by_party", - "data": dashboard.get_resources_by_party(project_id=projectId, **{**data, **args})}, - {"key": "resource_type_vs_response_end", - "data": dashboard.resource_type_vs_response_end(project_id=projectId, **{**data, **args})}, - {"key": "resources_vs_visually_complete", - "data": dashboard.get_resources_vs_visually_complete(project_id=projectId, **{**data, **args})}, - {"key": "resources_count_by_type", - "data": dashboard.get_resources_count_by_type(project_id=projectId, **{**data, **args})} - ]} diff --git a/ee/api/chalicelib/core/alerts_listener.py b/ee/api/chalicelib/core/alerts_listener.py new file mode 100644 index 000000000..40241f51e --- /dev/null +++ b/ee/api/chalicelib/core/alerts_listener.py @@ -0,0 +1,27 @@ +from chalicelib.utils import pg_client, helper + + +def get_all_alerts(): + with pg_client.PostgresClient(long_query=True) as cur: + query = """SELECT tenant_id, + alert_id, + project_id, + detection_method, + query, + options, + (EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at, + alerts.name, + alerts.series_id, + filter + FROM public.alerts + LEFT JOIN metric_series USING (series_id) + INNER JOIN projects USING (project_id) + WHERE alerts.deleted_at ISNULL + AND alerts.active + AND projects.active + AND projects.deleted_at ISNULL + AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL) + ORDER BY alerts.created_at;""" + cur.execute(query=query) + all_alerts = helper.list_to_camel_case(cur.fetchall()) + return all_alerts diff --git a/ee/api/chalicelib/core/authorizers.py b/ee/api/chalicelib/core/authorizers.py index ea326c2a1..149d570ab 100644 --- a/ee/api/chalicelib/core/authorizers.py +++ b/ee/api/chalicelib/core/authorizers.py @@ -1,10 +1,10 @@ import jwt +from decouple import config from chalicelib.core import tenants from chalicelib.core import users from chalicelib.utils import helper from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ def jwt_authorizer(token): @@ -14,8 +14,8 @@ def jwt_authorizer(token): try: payload = jwt.decode( token[1], - environ["jwt_secret"], - algorithms=environ["jwt_algorithm"], + config("jwt_secret"), + algorithms=config("jwt_algorithm"), audience=[f"plugin:{helper.get_stage_name()}", f"front:{helper.get_stage_name()}"] ) except jwt.ExpiredSignatureError: @@ -43,14 +43,14 @@ def generate_jwt(id, tenant_id, iat, aud, exp=None): payload={ "userId": id, "tenantId": tenant_id, - "exp": iat // 1000 + int(environ["jwt_exp_delta_seconds"]) + TimeUTC.get_utc_offset() // 1000 \ - if exp is None else exp + TimeUTC.get_utc_offset() // 1000, - "iss": environ["jwt_issuer"], + "exp": iat // 1000 + int(config("jwt_exp_delta_seconds")) + TimeUTC.get_utc_offset() // 1000 \ + if exp is None else exp+ TimeUTC.get_utc_offset() // 1000, + "iss": config("jwt_issuer"), "iat": iat // 1000, "aud": aud }, - key=environ["jwt_secret"], - algorithm=environ["jwt_algorithm"] + key=config("jwt_secret"), + algorithm=config("jwt_algorithm") ) return token.decode("utf-8") diff --git a/ee/api/chalicelib/core/funnels.py b/ee/api/chalicelib/core/funnels.py deleted file mode 100644 index 9c550244d..000000000 --- a/ee/api/chalicelib/core/funnels.py +++ /dev/null @@ -1,275 +0,0 @@ -import chalicelib.utils.helper -from chalicelib.core import events, significance, sessions -from chalicelib.utils.TimeUTC import TimeUTC - -from chalicelib.utils import helper, pg_client -from chalicelib.utils import dev -import json - -REMOVE_KEYS = ["key", "_key", "startDate", "endDate"] - -ALLOW_UPDATE_FOR = ["name", "filter"] - - -def filter_stages(stages): - ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type, - events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type, - events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type, - events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ] - return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None] - - -def create(project_id, user_id, name, filter, is_public): - helper.delete_keys_from_dict(filter, REMOVE_KEYS) - filter["events"] = filter_stages(stages=filter.get("events", [])) - with pg_client.PostgresClient() as cur: - query = cur.mogrify("""\ - INSERT INTO public.funnels (project_id, user_id, name, filter,is_public) - VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s) - RETURNING *;""", - {"user_id": user_id, "project_id": project_id, "name": name, "filter": json.dumps(filter), - "is_public": is_public}) - - cur.execute( - query - ) - r = cur.fetchone() - r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) - r = helper.dict_to_camel_case(r) - r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) - return {"data": r} - - -def update(funnel_id, user_id, name=None, filter=None, is_public=None): - s_query = [] - if filter is not None: - helper.delete_keys_from_dict(filter, REMOVE_KEYS) - s_query.append("filter = %(filter)s::jsonb") - if name is not None and len(name) > 0: - s_query.append("name = %(name)s") - if is_public is not None: - s_query.append("is_public = %(is_public)s") - if len(s_query) == 0: - return {"errors": ["Nothing to update"]} - with pg_client.PostgresClient() as cur: - query = cur.mogrify(f"""\ - UPDATE public.funnels - SET {" , ".join(s_query)} - WHERE funnel_id=%(funnel_id)s - RETURNING *;""", - {"user_id": user_id, "funnel_id": funnel_id, "name": name, - "filter": json.dumps(filter) if filter is not None else None, "is_public": is_public}) - # print("--------------------") - # print(query) - # print("--------------------") - cur.execute( - query - ) - r = cur.fetchone() - r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"]) - r = helper.dict_to_camel_case(r) - r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"]) - return {"data": r} - - -def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False): - with pg_client.PostgresClient() as cur: - team_query = """INNER JOIN - ( - SELECT collaborators.user_id - FROM public.users AS creator - INNER JOIN public.users AS collaborators USING (tenant_id) - WHERE creator.user_id=%(user_id)s - ) AS team USING (user_id)""" - cur.execute( - cur.mogrify( - f"""\ - SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public - {",filter" if details else ""} - FROM public.funnels {team_query} - WHERE project_id = %(project_id)s - AND funnels.deleted_at IS NULL - AND (funnels.user_id = %(user_id)s OR funnels.is_public);""", - {"project_id": project_id, "user_id": user_id} - ) - ) - - rows = cur.fetchall() - rows = helper.list_to_camel_case(rows) - for row in rows: - row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"]) - if details: - row["filter"]["events"] = filter_stages(row["filter"]["events"]) - get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date, - end_date=end_date) - counts = sessions.search2_pg(data=row["filter"], project_id=project_id, user_id=None, count_only=True) - row["sessionsCount"] = counts["countSessions"] - row["usersCount"] = counts["countUsers"] - overview = significance.get_overview(filter_d=row["filter"], project_id=project_id) - row["stages"] = overview["stages"] - row.pop("filter") - row["stagesCount"] = len(row["stages"]) - # TODO: ask david to count it alone - row["criticalIssuesCount"] = overview["criticalIssuesCount"] - row["missedConversions"] = 0 if len(row["stages"]) < 2 \ - else row["stages"][0]["sessionsCount"] - row["stages"][-1]["sessionsCount"] - return rows - - -def get_possible_issue_types(project_id): - return [{"type": t, "title": chalicelib.utils.helper.get_issue_title(t)} for t in - ['click_rage', 'dead_click', 'excessive_scrolling', - 'bad_request', 'missing_resource', 'memory', 'cpu', - 'slow_resource', 'slow_page_load', 'crash', 'custom_event_error', - 'js_error']] - - -def get_start_end_time(filter_d, range_value, start_date, end_date): - if start_date is not None and end_date is not None: - filter_d["startDate"], filter_d["endDate"] = start_date, end_date - elif range_value is not None and len(range_value) > 0: - filter_d["rangeValue"] = range_value - filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(range_value) - else: - filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(filter_d["rangeValue"]) - - -def delete(project_id, funnel_id, user_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify("""\ - UPDATE public.funnels - SET deleted_at = timezone('utc'::text, now()) - WHERE project_id = %(project_id)s - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id}) - ) - - return {"data": {"state": "success"}} - - -def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - return sessions.search2_pg(data=f["filter"], project_id=project_id, user_id=user_id) - - -def get_sessions_on_the_fly(funnel_id, project_id, user_id, data): - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - return sessions.search2_pg(data=data, project_id=project_id, user_id=user_id) - - -def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=f["filter"], project_id=project_id) - insights[-1]["dropDueToIssues"] = total_drop_due_to_issues - return {"data": {"stages": helper.list_to_camel_case(insights), - "totalDropDueToIssues": total_drop_due_to_issues}} - - -def get_top_insights_on_the_fly(funnel_id, project_id, data): - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id) - if len(insights) > 0: - insights[-1]["dropDueToIssues"] = total_drop_due_to_issues - return {"data": {"stages": helper.list_to_camel_case(insights), - "totalDropDueToIssues": total_drop_due_to_issues}} - - -def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_date=None): - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date) - return {"data": { - "issues": helper.dict_to_camel_case(significance.get_issues_list(filter_d=f["filter"], project_id=project_id)) - }} - - -@dev.timed -def get_issues_on_the_fly(funnel_id, project_id, data): - first_stage = data.get("firstStage") - last_stage = data.get("lastStage") - data["events"] = filter_stages(data.get("events", [])) - if len(data["events"]) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None), - start_date=data.get('startDate', None), - end_date=data.get('endDate', None)) - data = f["filter"] - return { - "issues": helper.dict_to_camel_case( - significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=first_stage, - last_stage=last_stage))} - - -def get(funnel_id, project_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - """\ - SELECT - * - FROM public.funnels - WHERE project_id = %(project_id)s - AND deleted_at IS NULL - AND funnel_id = %(funnel_id)s;""", - {"funnel_id": funnel_id, "project_id": project_id} - ) - ) - - f = helper.dict_to_camel_case(cur.fetchone()) - if f is None: - return None - - f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"]) - f["filter"]["events"] = filter_stages(stages=f["filter"]["events"]) - return f - - -@dev.timed -def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None): - if len(data.get("events", [])) == 0: - f = get(funnel_id=funnel_id, project_id=project_id) - if f is None: - return {"errors": ["funnel not found"]} - get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date), - end_date=data.get('endDate', end_date)) - data = f["filter"] - - # insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id) - issues = get_issues_on_the_fly(funnel_id=funnel_id, project_id=project_id, data=data).get("issues", {}) - issues = issues.get("significant", []) + issues.get("insignificant", []) - issue = None - for i in issues: - if i.get("issueId", "") == issue_id: - issue = i - break - return {"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id, issue=issue, - data=data) if issue is not None else {"total": 0, "sessions": []}, - # "stages": helper.list_to_camel_case(insights), - # "totalDropDueToIssues": total_drop_due_to_issues, - "issue": issue} diff --git a/ee/api/chalicelib/core/insights.py b/ee/api/chalicelib/core/insights.py new file mode 100644 index 000000000..387029fd4 --- /dev/null +++ b/ee/api/chalicelib/core/insights.py @@ -0,0 +1,1047 @@ +from chalicelib.core import sessions_metas +from chalicelib.utils import helper, dev +from chalicelib.utils import ch_client +from chalicelib.utils.TimeUTC import TimeUTC +from chalicelib.core.dashboard import __get_constraint_values, __complete_missing_steps +from chalicelib.core.dashboard import __get_basic_constraints, __get_meta_constraint + + +def __transform_journey(rows): + nodes = [] + links = [] + for r in rows: + source = r["source_event"][r["source_event"].index("_") + 1:] + target = r["target_event"][r["target_event"].index("_") + 1:] + if source not in nodes: + nodes.append(source) + if target not in nodes: + nodes.append(target) + links.append({"source": nodes.index(source), "target": nodes.index(target), "value": r["value"]}) + return {"nodes": nodes, "links": sorted(links, key=lambda x: x["value"], reverse=True)} + + +JOURNEY_DEPTH = 5 +JOURNEY_TYPES = { + "PAGES": {"table": "pages", "column": "url_path"}, + "CLICK": {"table": "clicks", "column": "label"}, + # "VIEW": {"table": "events_ios.views", "column": "name"}, TODO: enable this for SAAS only + "EVENT": {"table": "customs", "column": "name"} +} + + +@dev.timed +def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(), filters=[], **args): + event_start = None + event_table = JOURNEY_TYPES["CLICK"]["table"] + event_column = JOURNEY_TYPES["CLICK"]["column"] + extra_values = {} + meta_condition = [] + for f in filters: + if f["type"] == "START_POINT": + event_start = f["value"] + elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append(f"sessions_metadata.project_id = %(project_id)s") + meta_condition.append(f"sessions_metadata.datetime >= toDateTime(%(startTimestamp)s / 1000)") + meta_condition.append(f"sessions_metadata.datetime < toDateTime(%(endTimestamp)s / 1000)") + extra_values["user_id"] = f["value"] + ch_sub_query = __get_basic_constraints(table_name=event_table, data=args) + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT source_event, + target_event, + count(*) AS value + FROM (SELECT toString(event_number) || '_' || value AS target_event, + lagInFrame(toString(event_number) || '_' || value) OVER (PARTITION BY session_rank ORDER BY datetime ASC ROWS + BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS source_event + FROM (SELECT session_rank, + datetime, + value, + row_number AS event_number + FROM (SELECT session_rank, + groupArray(datetime) AS arr_datetime, + groupArray(value) AS arr_value, + arrayEnumerate(arr_datetime) AS row_number + {f"FROM (SELECT * FROM (SELECT *, MIN(mark) OVER ( PARTITION BY session_id , session_rank ORDER BY datetime ) AS max FROM (SELECT *, CASE WHEN value = %(event_start)s THEN datetime ELSE NULL END as mark" if event_start else ""} + FROM (SELECT session_id, + datetime, + value, + SUM(new_session) OVER (ORDER BY session_id, datetime) AS session_rank + FROM (SELECT *, + if(equals(source_timestamp, '1970-01-01'), 1, 0) AS new_session + FROM (SELECT session_id, + datetime, + {event_column} AS value, + lagInFrame(datetime) OVER (PARTITION BY session_id ORDER BY datetime ASC ROWS + BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS source_timestamp + FROM {event_table} {"INNER JOIN sessions_metadata USING(session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + ORDER BY session_id, datetime) AS related_events) AS ranked_events + ORDER BY session_rank, datetime + ) AS processed + {") AS marked) AS maxed WHERE datetime >= max) AS filtered" if event_start else ""} + GROUP BY session_rank + ORDER BY session_rank) + ARRAY JOIN + arr_datetime AS datetime, + arr_value AS value, + row_number + ORDER BY session_rank ASC, + row_number ASC) AS sorted_events + WHERE event_number <= %(JOURNEY_DEPTH)s) AS final + WHERE not empty(source_event) + AND not empty(target_event) + GROUP BY source_event, target_event + ORDER BY value DESC + LIMIT 20;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, "event_start": event_start, "JOURNEY_DEPTH": JOURNEY_DEPTH, + **__get_constraint_values(args), **extra_values} + + rows = ch.execute(query=ch_query, params=params) + # print(ch_query % params) + return __transform_journey(rows) + + +def __compute_weekly_percentage(rows): + if rows is None or len(rows) == 0: + return rows + t = -1 + for r in rows: + if r["week"] == 0: + t = r["usersCount"] + r["percentage"] = r["usersCount"] / t + return rows + + +def __complete_retention(rows, start_date, end_date=None): + if rows is None: + return [] + max_week = 10 + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + if i < len(rows) \ + and i != rows[i]["week"]: + rows.insert(i, neutral) + elif i >= len(rows): + rows.append(neutral) + return rows + + +def __complete_acquisition(rows, start_date, end_date=None): + if rows is None: + return [] + max_week = 10 + week = 0 + delta_date = 0 + while max_week > 0: + start_date += TimeUTC.MS_WEEK + if end_date is not None and start_date >= end_date: + break + delta = 0 + if delta_date + week >= len(rows) \ + or delta_date + week < len(rows) and rows[delta_date + week]["firstConnexionWeek"] > start_date: + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + rows.insert(delta_date + week + i, neutral) + delta = i + else: + for i in range(max_week): + if end_date is not None and start_date + i * TimeUTC.MS_WEEK >= end_date: + break + + neutral = { + "firstConnexionWeek": start_date, + "week": i, + "usersCount": 0, + "connectedUsers": [], + "percentage": 0 + } + if delta_date + week + i < len(rows) \ + and i != rows[delta_date + week + i]["week"]: + rows.insert(delta_date + week + i, neutral) + elif delta_date + week + i >= len(rows): + rows.append(neutral) + delta = i + week += delta + max_week -= 1 + delta_date += 1 + return rows + + +@dev.timed +def users_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], + **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + ch_sub_query = __get_basic_constraints(table_name='sessions_metadata', data=args) + meta_condition = __get_meta_constraint(args) + ch_sub_query += meta_condition + ch_sub_query.append("sessions_metadata.user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT toInt8((connexion_week - toDate(%(startTimestamp)s / 1000)) / 7) AS week, + COUNT(all_connexions.user_id) AS users_count, + groupArray(100)(all_connexions.user_id) AS connected_users + FROM (SELECT DISTINCT user_id + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + AND toStartOfWeek(sessions_metadata.datetime,1) = toDate(%(startTimestamp)s / 1000) + AND sessions_metadata.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND isNull((SELECT 1 + FROM sessions_metadata AS bmsess + WHERE bmsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bmsess.project_id = %(project_id)s + AND bmsess.user_id = sessions_metadata.user_id + LIMIT 1)) + ) AS users_list + INNER JOIN (SELECT DISTINCT user_id, toStartOfWeek(datetime,1) AS connexion_week + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + ) AS all_connexions USING (user_id) + GROUP BY connexion_week + ORDER BY connexion_week;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args)} + # print(ch_query % params) + rows = ch.execute(ch_query, params) + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def users_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + ch_sub_query = __get_basic_constraints(table_name='sessions_metadata', data=args) + meta_condition = __get_meta_constraint(args) + ch_sub_query += meta_condition + ch_sub_query.append("sessions_metadata.user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + ch_sub_query.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s / 1000)") + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT toUnixTimestamp(toDateTime(first_connexion_week))*1000 AS first_connexion_week, + week, + users_count, + connected_users + FROM ( + SELECT first_connexion_week, + toInt8((connexion_week - first_connexion_week) / 7) AS week, + COUNT(DISTINCT all_connexions.user_id) AS users_count, + groupArray(20)(all_connexions.user_id) AS connected_users + FROM (SELECT user_id, MIN(toStartOfWeek(sessions_metadata.datetime, 1)) AS first_connexion_week + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + AND sessions_metadata.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND isNull((SELECT 1 + FROM sessions_metadata AS bmsess + WHERE bmsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bmsess.project_id = %(project_id)s + AND bmsess.user_id = sessions_metadata.user_id + LIMIT 1)) + GROUP BY user_id) AS users_list + INNER JOIN (SELECT DISTINCT user_id, toStartOfWeek(datetime, 1) AS connexion_week + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + ORDER BY connexion_week, user_id + ) AS all_connexions USING (user_id) + WHERE first_connexion_week <= connexion_week + GROUP BY first_connexion_week, week + ORDER BY first_connexion_week, week + ) AS full_data;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args)} + # print(ch_query % params) + rows = ch.execute(ch_query, params) + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + ch_sub_query = __get_basic_constraints(table_name='feature', data=args) + meta_condition = __get_meta_constraint(args) + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query% params) + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + else: + print(f"no {event_table} most used value") + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": ""}], + "chart": __complete_retention(rows=[], start_date=startTimestamp, end_date=TimeUTC.now()) + } + extra_values["value"] = event_value + if len(meta_condition) == 0: + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + ch_sub_query += meta_condition + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT toInt8((connexion_week - toDate(%(startTimestamp)s / 1000)) / 7) AS week, + COUNT(DISTINCT all_connexions.user_id) AS users_count, + groupArray(100)(all_connexions.user_id) AS connected_users + FROM (SELECT DISTINCT user_id + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + AND toStartOfWeek(feature.datetime,1) = toDate(%(startTimestamp)s / 1000) + AND sessions_metadata.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND feature.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND isNull((SELECT 1 + FROM {event_table} AS bsess INNER JOIN sessions_metadata AS bmsess USING (session_id) + WHERE bsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bmsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bsess.project_id = %(project_id)s + AND bmsess.project_id = %(project_id)s + AND bmsess.user_id = sessions_metadata.user_id + AND bsess.{event_column}=%(value)s + LIMIT 1)) + ) AS users_list + INNER JOIN (SELECT DISTINCT user_id, toStartOfWeek(datetime,1) AS connexion_week + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + ORDER BY connexion_week, user_id + ) AS all_connexions USING (user_id) + GROUP BY connexion_week + ORDER BY connexion_week;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(ch_query % params) + rows = ch.execute(ch_query, params) + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "chart": __complete_retention(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + ch_sub_query = __get_basic_constraints(table_name='feature', data=args) + meta_condition = __get_meta_constraint(args) + + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query% params) + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + else: + print(f"no {event_table} most used value") + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": ""}], + "chart": __complete_acquisition(rows=[], start_date=startTimestamp, end_date=TimeUTC.now()) + } + extra_values["value"] = event_value + + if len(meta_condition) == 0: + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + + ch_sub_query += meta_condition + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT toUnixTimestamp(toDateTime(first_connexion_week))*1000 AS first_connexion_week, + week, + users_count, + connected_users + FROM ( + SELECT first_connexion_week, + toInt8((connexion_week - first_connexion_week) / 7) AS week, + COUNT(DISTINCT all_connexions.user_id) AS users_count, + groupArray(100)(all_connexions.user_id) AS connected_users + FROM (SELECT user_id, MIN(toStartOfWeek(feature.datetime, 1)) AS first_connexion_week + FROM sessions_metadata INNER JOIN {event_table} AS feature USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + AND sessions_metadata.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND feature.datetime < toDateTime(%(startTimestamp)s/1000 + 8 * 24 * 60 * 60 ) + AND isNull((SELECT 1 + FROM sessions_metadata AS bmsess + INNER JOIN {event_table} AS bsess USING (session_id) + WHERE bsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bmsess.datetime < toDateTime(%(startTimestamp)s / 1000) + AND bsess.project_id = %(project_id)s + AND bmsess.project_id = %(project_id)s + AND bmsess.user_id = sessions_metadata.user_id + AND bsess.{event_column} = %(value)s + LIMIT 1)) + GROUP BY user_id) AS users_list + INNER JOIN (SELECT DISTINCT user_id, toStartOfWeek(datetime, 1) AS connexion_week + FROM sessions_metadata INNER JOIN {event_table} AS feature USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + ORDER BY connexion_week, user_id + ) AS all_connexions USING (user_id) + WHERE first_connexion_week <= connexion_week + GROUP BY first_connexion_week, week + ORDER BY first_connexion_week, week + ) AS full_data;""" + + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(ch_query % params) + rows = ch.execute(ch_query, params) + rows = __compute_weekly_percentage(helper.list_to_camel_case(rows)) + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "chart": __complete_acquisition(rows=rows, start_date=startTimestamp, end_date=TimeUTC.now()) + } + + +@dev.timed +def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + startTimestamp = TimeUTC.trunc_week(startTimestamp) + endTimestamp = startTimestamp + 10 * TimeUTC.MS_WEEK + ch_sub_query = __get_basic_constraints(table_name='feature', data=args) + meta_condition = __get_meta_constraint(args) + + event_table = JOURNEY_TYPES["CLICK"]["table"] + event_column = JOURNEY_TYPES["CLICK"]["column"] + extra_values = {} + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + + with ch_client.ClickHouseClient() as ch: + if len(meta_condition) == 0: + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + ch_sub_query += meta_condition + ch_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM sessions_metadata + WHERE {" AND ".join(meta_condition)};""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + # print("---------------------") + all_user_count = ch.execute(ch_query, params) + if len(all_user_count) == 0 or all_user_count[0]["count"] == 0: + return [] + all_user_count = all_user_count[0]["count"] + ch_query = f"""SELECT {event_column} AS value, COUNT(DISTINCT user_id) AS count + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + AND length({event_column})>2 + GROUP BY value + ORDER BY count DESC + LIMIT 7;""" + + # print(ch_query % params) + # print("---------------------") + popularity = ch.execute(ch_query, params) + params["values"] = [p["value"] for p in popularity] + if len(params["values"]) == 0: + return [] + ch_query = f"""SELECT {event_column} AS value, COUNT(session_id) AS count + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + AND {event_column} IN %(values)s + GROUP BY value;""" + + # print(ch_query % params) + # print("---------------------") + frequencies = ch.execute(ch_query, params) + total_usage = sum([f["count"] for f in frequencies]) + frequencies = {f["value"]: f["count"] for f in frequencies} + for p in popularity: + p["popularity"] = p.pop("count") / all_user_count + p["frequency"] = frequencies[p["value"]] / total_usage + + return popularity + + +@dev.timed +def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + meta_condition = [] + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + + ch_sub_query = __get_basic_constraints(table_name='feature', data=args) + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + # print("---------------------") + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + # else: + # print(f"no {event_table} most used value") + # return {"target": 0, "adoption": 0, + # "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": ""}]} + + extra_values["value"] = event_value + + if len(meta_condition) == 0: + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + meta_condition.append("sessions_metadata.user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + ch_sub_query += meta_condition + ch_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM sessions_metadata + WHERE {" AND ".join(meta_condition)};""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + # print("---------------------") + all_user_count = ch.execute(ch_query, params) + if len(all_user_count) == 0 or all_user_count[0]["count"] == 0: + return {"adoption": 0, "target": 0, "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": event_value}], } + all_user_count = all_user_count[0]["count"] + + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT COUNT(DISTINCT user_id) AS count + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)};""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + # print("---------------------") + adoption = ch.execute(ch_query, params) + adoption = adoption[0]["count"] / all_user_count + return {"target": all_user_count, "adoption": adoption, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + meta_condition = [] + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("user_id IS NOT NULL") + meta_condition.append("not empty(sessions_metadata.user_id)") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + ch_sub_query = __get_basic_constraints(table_name='feature', data=args) + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + else: + print(f"no {event_table} most used value") + return {"users": [], + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": ""}]} + + extra_values["value"] = event_value + if len(meta_condition) == 0: + ch_sub_query.append("user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + ch_sub_query.append("sessions_metadata.project_id = %(project_id)s") + ch_sub_query.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + ch_sub_query.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT user_id, COUNT(DISTINCT session_id) AS count + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + GROUP BY user_id + ORDER BY count DESC + LIMIT 10;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + rows = ch.execute(ch_query, params) + return {"users": helper.list_to_camel_case(rows), + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), + filters=[], **args): + event_type = "CLICK" + event_value = '/' + extra_values = {} + default = True + meta_condition = [] + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + ch_sub_query = __get_basic_constraints(table_name="feature", data=args) + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + AND length({event_column}) > 2 + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + else: + print(f"no {event_table} most used value") + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": ""}], + "chart": __complete_acquisition(rows=[], start_date=startTimestamp, end_date=TimeUTC.now()) + } + extra_values["value"] = event_value + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT toUnixTimestamp(day)*1000 AS timestamp, count + FROM (SELECT toStartOfDay(feature.datetime) AS day, COUNT(DISTINCT session_id) AS count + FROM {event_table} AS feature {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY day + ORDER BY day) AS raw_results;""" + params = {"step_size": TimeUTC.MS_DAY, "project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + rows = ch.execute(ch_query, params) + return {"chart": __complete_missing_steps(rows=rows, start_time=startTimestamp, end_time=endTimestamp, + density=(endTimestamp - startTimestamp) // TimeUTC.MS_DAY, + neutral={"count": 0}), + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}]} + + +@dev.timed +def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], + **args): + event_table = JOURNEY_TYPES["CLICK"]["table"] + event_column = JOURNEY_TYPES["CLICK"]["column"] + extra_values = {} + meta_condition = [] + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_table = JOURNEY_TYPES[f["value"]]["table"] + event_column = JOURNEY_TYPES[f["value"]]["column"] + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + ch_sub_query = __get_basic_constraints(table_name="feature", data=args) + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT {event_column} AS value, AVG(DISTINCT session_id) AS avg + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY avg DESC + LIMIT 7;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + # print(ch_query % params) + rows = ch.execute(ch_query, params) + + return rows + + +PERIOD_TO_FUNCTION = { + "DAY": "toStartOfDay", + "WEEK": "toStartOfWeek" +} + + +@dev.timed +def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], + **args): + meta_condition = __get_meta_constraint(args) + period = "DAY" + extra_values = {} + for f in filters: + if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]: + period = f["value"] + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + extra_values["user_id"] = f["value"] + period_function = PERIOD_TO_FUNCTION[period] + ch_sub_query = __get_basic_constraints(table_name="sessions_metadata", data=args) + ch_sub_query += meta_condition + ch_sub_query.append("sessions_metadata.user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT SUM(count) / intDiv(%(endTimestamp)s - %(startTimestamp)s, %(step_size)s) AS avg + FROM (SELECT {period_function}(sessions_metadata.datetime) AS period, count(DISTINCT user_id) AS count + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + GROUP BY period) AS daily_users;""" + params = {"step_size": TimeUTC.MS_DAY if period == "DAY" else TimeUTC.MS_WEEK, + "project_id": project_id, + "startTimestamp": TimeUTC.trunc_day(startTimestamp) if period == "DAY" else TimeUTC.trunc_week( + startTimestamp), "endTimestamp": endTimestamp, **__get_constraint_values(args), + **extra_values} + # print(ch_query % params) + # print("---------------------") + avg = ch.execute(ch_query, params) + if len(avg) == 0 or avg[0]["avg"] == 0: + return {"avg": 0, "chart": []} + avg = avg[0]["avg"] + # TODO: optimize this when DB structure changes, optimization from 3s to 1s + ch_query = f"""SELECT toUnixTimestamp(toDateTime(period))*1000 AS timestamp, count + FROM (SELECT {period_function}(sessions_metadata.datetime) AS period, count(DISTINCT user_id) AS count + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + GROUP BY period + ORDER BY period) AS raw_results;""" + # print(ch_query % params) + # print("---------------------") + rows = ch.execute(ch_query, params) + return {"avg": avg, "chart": rows} + + +@dev.timed +def users_power(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], **args): + ch_sub_query = __get_basic_constraints(table_name="sessions_metadata", data=args) + meta_condition = __get_meta_constraint(args) + ch_sub_query += meta_condition + ch_sub_query.append("sessions_metadata.user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT ifNotFinite(AVG(count),0) AS avg + FROM(SELECT COUNT(user_id) AS count + FROM (SELECT user_id, COUNT(DISTINCT toStartOfDay(datetime)) AS number_of_days + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + GROUP BY user_id) AS users_connexions + GROUP BY number_of_days + ORDER BY number_of_days) AS results;""" + params = {"project_id": project_id, + "startTimestamp": startTimestamp, "endTimestamp": endTimestamp, **__get_constraint_values(args)} + # print(ch_query % params) + # print("---------------------") + avg = ch.execute(ch_query, params) + if len(avg) == 0 or avg[0]["avg"] == 0: + return {"avg": 0, "partition": []} + avg = avg[0]["avg"] + ch_query = f"""SELECT number_of_days, COUNT(user_id) AS count + FROM (SELECT user_id, COUNT(DISTINCT toStartOfDay(datetime)) AS number_of_days + FROM sessions_metadata + WHERE {" AND ".join(ch_sub_query)} + GROUP BY user_id) AS users_connexions + GROUP BY number_of_days + ORDER BY number_of_days;""" + + # print(ch_query % params) + # print("---------------------") + rows = ch.execute(ch_query, params) + + return {"avg": avg, "partition": helper.list_to_camel_case(rows)} + + +@dev.timed +def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTimestamp=TimeUTC.now(), filters=[], + **args): + ch_sub_query = __get_basic_constraints(table_name="feature", data=args) + event_type = "PAGES" + event_value = "/" + extra_values = {} + default = True + meta_condition = [] + for f in filters: + if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]): + event_type = f["value"] + elif f["type"] == "EVENT_VALUE": + event_value = f["value"] + default = False + elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]: + meta_condition.append(f"sessions_metadata.user_id = %(user_id)s") + meta_condition.append("sessions_metadata.project_id = %(project_id)s") + meta_condition.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + meta_condition.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + extra_values["user_id"] = f["value"] + event_table = JOURNEY_TYPES[event_type]["table"] + event_column = JOURNEY_TYPES[event_type]["column"] + + meta_condition += __get_meta_constraint(args) + ch_sub_query += meta_condition + with ch_client.ClickHouseClient() as ch: + if default: + # get most used value + ch_query = f"""SELECT {event_column} AS value, COUNT(*) AS count + FROM {event_table} AS feature + {"INNER JOIN sessions_metadata USING (session_id)" if len(meta_condition) > 0 else ""} + WHERE {" AND ".join(ch_sub_query)} + GROUP BY value + ORDER BY count DESC + LIMIT 1;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(ch_query % params) + row = ch.execute(ch_query, params) + if len(row) > 0: + event_value = row[0]["value"] + else: + print(f"no {event_table} most used value") + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, + {"type": "EVENT_VALUE", "value": ""}], + "list": [] + } + extra_values["value"] = event_value + if len(meta_condition) == 0: + ch_sub_query.append("sessions_metadata.user_id IS NOT NULL") + ch_sub_query.append("not empty(sessions_metadata.user_id)") + ch_sub_query.append("sessions_metadata.project_id = %(project_id)s") + ch_sub_query.append("sessions_metadata.datetime >= toDateTime(%(startTimestamp)s/1000)") + ch_sub_query.append("sessions_metadata.datetime < toDateTime(%(endTimestamp)s/1000)") + ch_sub_query.append(f"feature.{event_column} = %(value)s") + ch_query = f"""SELECT user_id, + toUnixTimestamp(last_time)*1000 AS last_time, + interactions_count, + toUnixTimestamp(first_seen) * 1000 AS first_seen, + toUnixTimestamp(last_seen) * 1000 AS last_seen + FROM (SELECT user_id, last_time, interactions_count, MIN(datetime) AS first_seen, MAX(datetime) AS last_seen + FROM (SELECT user_id, MAX(datetime) AS last_time, COUNT(DISTINCT session_id) AS interactions_count + FROM {event_table} AS feature INNER JOIN sessions_metadata USING (session_id) + WHERE {" AND ".join(ch_sub_query)} + GROUP BY user_id ) AS user_last_usage INNER JOIN sessions_metadata USING (user_id) + WHERE now() - last_time > 7 + GROUP BY user_id, last_time, interactions_count + ORDER BY interactions_count DESC, last_time DESC + LIMIT 50) AS raw_results;""" + params = {"project_id": project_id, "startTimestamp": startTimestamp, + "endTimestamp": endTimestamp, **__get_constraint_values(args), **extra_values} + print(ch_query % params) + rows = ch.execute(ch_query, params) + return { + "startTimestamp": startTimestamp, + "filters": [{"type": "EVENT_TYPE", "value": event_type}, {"type": "EVENT_VALUE", "value": event_value}], + "list": helper.list_to_camel_case(rows) + } + + +@dev.timed +def search(text, feature_type, project_id, platform=None): + if not feature_type: + resource_type = "ALL" + data = search(text=text, feature_type=resource_type, project_id=project_id, platform=platform) + return data + args = {} if platform is None else {"platform": platform} + ch_sub_query = __get_basic_constraints(table_name="feature", data=args) + meta_condition = __get_meta_constraint(args) + ch_sub_query += meta_condition + params = {"startTimestamp": TimeUTC.now() - 1 * TimeUTC.MS_MONTH, + "endTimestamp": TimeUTC.now(), + "project_id": project_id, + "value": text.lower(), + "platform_0": platform} + if feature_type == "ALL": + with ch_client.ClickHouseClient() as ch: + sub_queries = [] + for e in JOURNEY_TYPES: + sub_queries.append(f"""(SELECT DISTINCT {JOURNEY_TYPES[e]["column"]} AS value, '{e}' AS "type" + FROM {JOURNEY_TYPES[e]["table"]} AS feature + WHERE {" AND ".join(ch_sub_query)} AND positionUTF8({JOURNEY_TYPES[e]["column"]},%(value)s)!=0 + LIMIT 10)""") + ch_query = "UNION ALL".join(sub_queries) + print(ch_query % params) + rows = ch.execute(ch_query, params) + elif JOURNEY_TYPES.get(feature_type) is not None: + with ch_client.ClickHouseClient() as ch: + ch_query = f"""SELECT DISTINCT {JOURNEY_TYPES[feature_type]["column"]} AS value, '{feature_type}' AS "type" + FROM {JOURNEY_TYPES[feature_type]["table"]} AS feature + WHERE {" AND ".join(ch_sub_query)} AND positionUTF8({JOURNEY_TYPES[feature_type]["column"]},%(value)s)!=0 + LIMIT 10;""" + print(ch_query % params) + rows = ch.execute(ch_query, params) + else: + return [] + return [helper.dict_to_camel_case(row) for row in rows] \ No newline at end of file diff --git a/ee/api/chalicelib/core/license.py b/ee/api/chalicelib/core/license.py index 905c4f2ec..2423567de 100644 --- a/ee/api/chalicelib/core/license.py +++ b/ee/api/chalicelib/core/license.py @@ -1,6 +1,7 @@ -from chalicelib.utils.helper import environ -from chalicelib.utils import pg_client +from decouple import config + from chalicelib.core import unlock +from chalicelib.utils import pg_client def get_status(tenant_id): @@ -16,7 +17,7 @@ def get_status(tenant_id): "versionNumber": r.get("version_number", ""), "license": license[0:2] + "*" * (len(license) - 4) + license[-2:], "expirationDate": unlock.get_expiration_date(), - "teamMember": int(environ.get("numberOfSeats", 0)) + "teamMember": config("numberOfSeats", cast=int, default=0) }, "count": { "teamMember": r.get("t_users"), diff --git a/ee/api/chalicelib/core/metadata.py b/ee/api/chalicelib/core/metadata.py deleted file mode 100644 index 293a8cd4c..000000000 --- a/ee/api/chalicelib/core/metadata.py +++ /dev/null @@ -1,263 +0,0 @@ -from chalicelib.utils import pg_client, helper, dev - -from chalicelib.core import projects - -import re - -MAX_INDEXES = 10 - - -def _get_column_names(): - return [f"metadata_{i}" for i in range(1, MAX_INDEXES + 1)] - - -def get(project_id): - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - f"""\ - SELECT - {",".join(_get_column_names())} - FROM public.projects - WHERE project_id = %(project_id)s AND deleted_at ISNULL - LIMIT 1;""", {"project_id": project_id}) - ) - metas = cur.fetchone() - results = [] - if metas is not None: - for i, k in enumerate(metas.keys()): - if metas[k] is not None: - results.append({"key": metas[k], "index": i + 1}) - return results - - -regex = re.compile(r'^[a-z0-9_-]+$', re.IGNORECASE) - - -def index_to_colname(index): - if index <= 0 or index > MAX_INDEXES: - raise Exception("metadata index out or bound") - return f"metadata_{index}" - - -def __get_available_index(project_id): - used_indexs = get(project_id) - used_indexs = [i["index"] for i in used_indexs] - if len(used_indexs) >= MAX_INDEXES: - return -1 - i = 1 - while i in used_indexs: - i += 1 - return i - - -def __edit(project_id, col_index, colname, new_name): - if new_name is None or len(new_name) == 0: - return {"errors": ["key value invalid"]} - old_metas = get(project_id) - old_metas = {k["index"]: k for k in old_metas} - if col_index not in list(old_metas.keys()): - return {"errors": ["custom field not found"]} - - with pg_client.PostgresClient() as cur: - if old_metas[col_index]["key"].lower() != new_name: - cur.execute(cur.mogrify(f"""UPDATE public.projects - SET {colname} = %(value)s - WHERE project_id = %(project_id)s AND deleted_at ISNULL - RETURNING {colname};""", - {"project_id": project_id, "value": new_name})) - new_name = cur.fetchone()[colname] - old_metas[col_index]["key"] = new_name - return {"data": old_metas[col_index]} - - -def edit(tenant_id, project_id, index: int, new_name: str): - return __edit(project_id=project_id, col_index=index, colname=index_to_colname(index), new_name=new_name) - - -def delete(tenant_id, project_id, index: int): - index = int(index) - old_segments = get(project_id) - old_segments = [k["index"] for k in old_segments] - if index not in old_segments: - return {"errors": ["custom field not found"]} - - with pg_client.PostgresClient() as cur: - colname = index_to_colname(index) - query = cur.mogrify(f"""UPDATE public.projects - SET {colname}= NULL - WHERE project_id = %(project_id)s AND deleted_at ISNULL;""", - {"project_id": project_id}) - cur.execute(query=query) - query = cur.mogrify(f"""UPDATE public.sessions - SET {colname}= NULL - WHERE project_id = %(project_id)s""", - {"project_id": project_id}) - cur.execute(query=query) - - return {"data": get(project_id)} - - -def add(tenant_id, project_id, new_name): - index = __get_available_index(project_id=project_id) - if index < 1: - return {"errors": ["maximum allowed metadata reached"]} - with pg_client.PostgresClient() as cur: - colname = index_to_colname(index) - cur.execute( - cur.mogrify( - f"""UPDATE public.projects SET {colname}= %(key)s WHERE project_id =%(project_id)s RETURNING {colname};""", - {"key": new_name, "project_id": project_id})) - col_val = cur.fetchone()[colname] - return {"data": {"key": col_val, "index": index}} - - -def search(tenant_id, project_id, key, value): - value = value + "%" - s_query = [] - for f in _get_column_names(): - s_query.append(f"CASE WHEN {f}=%(key)s THEN TRUE ELSE FALSE END AS {f}") - - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - f"""\ - SELECT - {",".join(s_query)} - FROM public.projects - WHERE - project_id = %(project_id)s AND deleted_at ISNULL - LIMIT 1;""", - {"key": key, "project_id": project_id}) - ) - all_metas = cur.fetchone() - key = None - for c in all_metas: - if all_metas[c]: - key = c - break - if key is None: - return {"errors": ["key not found"]} - cur.execute( - cur.mogrify( - f"""\ - SELECT - DISTINCT "{key}" AS "{key}" - FROM public.sessions - {f'WHERE "{key}"::text ILIKE %(value)s' if value is not None and len(value) > 0 else ""} - ORDER BY "{key}" - LIMIT 20;""", - {"value": value, "project_id": project_id}) - ) - value = cur.fetchall() - return {"data": [k[key] for k in value]} - - -def get_available_keys(project_id): - all_metas = get(project_id=project_id) - return [k["key"] for k in all_metas] - - -def get_by_session_id(project_id, session_id): - all_metas = get(project_id=project_id) - if len(all_metas) == 0: - return [] - keys = {index_to_colname(k["index"]): k["key"] for k in all_metas} - with pg_client.PostgresClient() as cur: - cur.execute( - cur.mogrify( - f"""\ - select {",".join(keys.keys())} - FROM public.sessions - WHERE project_id= %(project_id)s AND session_id=%(session_id)s;""", - {"session_id": session_id, "project_id": project_id}) - ) - session_metas = cur.fetchall() - results = [] - for m in session_metas: - r = {} - for k in m.keys(): - r[keys[k]] = m[k] - results.append(r) - return results - - -def get_keys_by_projects(project_ids): - if project_ids is None or len(project_ids) == 0: - return {} - with pg_client.PostgresClient() as cur: - query = cur.mogrify( - f"""\ - SELECT - project_id, - {",".join(_get_column_names())} - FROM public.projects - WHERE project_id IN %(project_ids)s AND deleted_at ISNULL;""", - {"project_ids": tuple(project_ids)}) - - cur.execute(query) - rows = cur.fetchall() - results = {} - for r in rows: - project_id = r.pop("project_id") - results[project_id] = {} - for m in r: - if r[m] is not None: - results[project_id][m] = r[m] - return results - - -def add_edit_delete(tenant_id, project_id, new_metas): - old_metas = get(project_id) - old_indexes = [k["index"] for k in old_metas] - new_indexes = [k["index"] for k in new_metas if "index" in k] - new_keys = [k["key"] for k in new_metas] - - add_metas = [k["key"] for k in new_metas - if "index" not in k] - new_metas = {k["index"]: {"key": k["key"]} for - k in new_metas if - "index" in k} - old_metas = {k["index"]: {"key": k["key"]} for k in old_metas} - - if len(new_keys) > 20: - return {"errors": ["you cannot add more than 20 key"]} - for k in new_metas.keys(): - if re.match(regex, new_metas[k]["key"]) is None: - return {"errors": [f"invalid key {k}"]} - for k in add_metas: - if re.match(regex, k) is None: - return {"errors": [f"invalid key {k}"]} - if len(new_indexes) > len(set(new_indexes)): - return {"errors": ["duplicate indexes"]} - if len(new_keys) > len(set(new_keys)): - return {"errors": ["duplicate keys"]} - to_delete = list(set(old_indexes) - set(new_indexes)) - - with pg_client.PostgresClient() as cur: - for d in to_delete: - delete(tenant_id=tenant_id, project_id=project_id, index=d) - - for k in add_metas: - add(tenant_id=tenant_id, project_id=project_id, new_name=k) - - for k in new_metas.keys(): - if new_metas[k]["key"].lower() != old_metas[k]["key"]: - edit(tenant_id=tenant_id, project_id=project_id, index=k, new_name=new_metas[k]["key"]) - - return {"data": get(project_id)} - - -@dev.timed -def get_remaining_metadata_with_count(tenant_id): - all_projects = projects.get_projects(tenant_id=tenant_id) - results = [] - for p in all_projects: - used_metas = get(p["projectId"]) - if MAX_INDEXES < 0: - remaining = -1 - else: - remaining = MAX_INDEXES - len(used_metas) - results.append({**p, "limit": MAX_INDEXES, "remaining": remaining, "count": len(used_metas)}) - - return results diff --git a/ee/api/chalicelib/core/projects.py b/ee/api/chalicelib/core/projects.py index cb1e7b1de..2728e5077 100644 --- a/ee/api/chalicelib/core/projects.py +++ b/ee/api/chalicelib/core/projects.py @@ -1,5 +1,6 @@ import json +import schemas from chalicelib.core import users from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC @@ -40,8 +41,28 @@ def __create(tenant_id, name): return get_project(tenant_id=tenant_id, project_id=project_id, include_gdpr=True) -def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False, version=False): +def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False, version=False, + last_tracker_version=None, user_id=None): with pg_client.PostgresClient() as cur: + tracker_query = "" + if last_tracker_version is not None and len(last_tracker_version) > 0: + tracker_query = cur.mogrify( + """,(SELECT tracker_version FROM public.sessions + WHERE sessions.project_id = s.project_id + AND tracker_version=%(version)s AND tracker_version IS NOT NULL LIMIT 1) AS tracker_version""", + {"version": last_tracker_version}).decode('UTF-8') + elif version: + tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version" + + role_query = """INNER JOIN LATERAL (SELECT 1 + FROM users + INNER JOIN roles USING (role_id) + LEFT JOIN roles_projects USING (role_id) + WHERE users.user_id = %(user_id)s + AND users.deleted_at ISNULL + AND users.tenant_id = %(tenant_id)s + AND (roles.all_projects OR roles_projects.project_id = s.project_id) + ) AS role_project ON (TRUE)""" cur.execute( cur.mogrify(f"""\ SELECT @@ -49,13 +70,14 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st {',s.gdpr' if gdpr else ''} {',COALESCE((SELECT TRUE FROM public.sessions WHERE sessions.project_id = s.project_id LIMIT 1), FALSE) AS recorded' if recorded else ''} {',stack_integrations.count>0 AS stack_integrations' if stack_integrations else ''} - {',(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version' if version else ''} + {tracker_query} FROM public.projects AS s {'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''} - where s.tenant_id =%(tenant_id)s + {role_query if user_id is not None else ""} + WHERE s.tenant_id =%(tenant_id)s AND s.deleted_at IS NULL ORDER BY s.project_id;""", - {"tenant_id": tenant_id}) + {"tenant_id": tenant_id, "user_id": user_id}) ) rows = cur.fetchall() if recording_state: @@ -77,15 +99,27 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st return helper.list_to_camel_case(rows) -def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=None): +def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=None, version=False, + last_tracker_version=None): with pg_client.PostgresClient() as cur: + tracker_query = "" + if last_tracker_version is not None and len(last_tracker_version) > 0: + tracker_query = cur.mogrify( + """,(SELECT tracker_version FROM public.sessions + WHERE sessions.project_id = s.project_id + AND tracker_version=%(version)s AND tracker_version IS NOT NULL LIMIT 1) AS tracker_version""", + {"version": last_tracker_version}).decode('UTF-8') + elif version: + tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version" + query = cur.mogrify(f"""\ SELECT s.project_id, - s.name, - s.project_key + s.project_key, + s.name {",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_id = %(project_id)s) AS last_recorded_session_at" if include_last_session else ""} {',s.gdpr' if include_gdpr else ''} + {tracker_query} FROM public.projects AS s where s.tenant_id =%(tenant_id)s AND s.project_id =%(project_id)s @@ -100,25 +134,52 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr= return helper.dict_to_camel_case(row) -def is_authorized(project_id, tenant_id): +def is_authorized(project_id, tenant_id, user_id=None): if project_id is None or not str(project_id).isdigit(): return False - return get_project(tenant_id=tenant_id, project_id=project_id) is not None + with pg_client.PostgresClient() as cur: + role_query = """INNER JOIN LATERAL (SELECT 1 + FROM users + INNER JOIN roles USING (role_id) + LEFT JOIN roles_projects USING (role_id) + WHERE users.user_id = %(user_id)s + AND users.deleted_at ISNULL + AND users.tenant_id = %(tenant_id)s + AND (roles.all_projects OR roles_projects.project_id = %(project_id)s) + ) AS role_project ON (TRUE)""" + + query = cur.mogrify(f"""\ + SELECT project_id + FROM public.projects AS s + {role_query if user_id is not None else ""} + where s.tenant_id =%(tenant_id)s + AND s.project_id =%(project_id)s + AND s.deleted_at IS NULL + LIMIT 1;""", + {"tenant_id": tenant_id, "project_id": project_id, "user_id": user_id}) + cur.execute( + query=query + ) + row = cur.fetchone() + return row is not None -def create(tenant_id, user_id, data): - admin = users.get(user_id=user_id, tenant_id=tenant_id) - if not admin["admin"] and not admin["superAdmin"]: - return {"errors": ["unauthorized"]} - return {"data": __create(tenant_id=tenant_id, name=data.get("name", "my first project"))} +def create(tenant_id, user_id, data: schemas.CreateProjectSchema, skip_authorization=False): + if not skip_authorization: + admin = users.get(user_id=user_id, tenant_id=tenant_id) + if not admin["admin"] and not admin["superAdmin"]: + return {"errors": ["unauthorized"]} + if admin["roleId"] is not None and not admin["allProjects"]: + return {"errors": ["unauthorized: you need allProjects permission to create a new project"]} + return {"data": __create(tenant_id=tenant_id, name=data.name)} -def edit(tenant_id, user_id, project_id, data): +def edit(tenant_id, user_id, project_id, data: schemas.CreateProjectSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} return {"data": __update(tenant_id=tenant_id, project_id=project_id, - changes={"name": data.get("name", "my first project")})} + changes={"name": data.name})} def delete(tenant_id, user_id, project_id): @@ -128,8 +189,7 @@ def delete(tenant_id, user_id, project_id): return {"errors": ["unauthorized"]} with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""\ - UPDATE public.projects + cur.mogrify("""UPDATE public.projects SET deleted_at = timezone('utc'::text, now()), active = FALSE @@ -228,3 +288,44 @@ def update_capture_status(project_id, changes): ) return changes + + +def get_project_by_key(tenant_id, project_key, include_last_session=False, include_gdpr=None): + with pg_client.PostgresClient() as cur: + query = cur.mogrify(f"""\ + SELECT + s.project_key, + s.name + {",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_key = %(project_key)s) AS last_recorded_session_at" if include_last_session else ""} + {',s.gdpr' if include_gdpr else ''} + FROM public.projects AS s + where s.project_key =%(project_key)s + AND s.tenant_id =%(tenant_id)s + AND s.deleted_at IS NULL + LIMIT 1;""", + {"project_key": project_key, "tenant_id": tenant_id}) + + cur.execute( + query=query + ) + row = cur.fetchone() + return helper.dict_to_camel_case(row) + + +def is_authorized_batch(project_ids, tenant_id): + if project_ids is None or not len(project_ids): + return False + with pg_client.PostgresClient() as cur: + query = cur.mogrify("""\ + SELECT project_id + FROM public.projects + where tenant_id =%(tenant_id)s + AND project_id IN %(project_ids)s + AND deleted_at IS NULL;""", + {"tenant_id": tenant_id, "project_ids": tuple(project_ids)}) + + cur.execute( + query=query + ) + rows = cur.fetchall() + return [r["project_id"] for r in rows] diff --git a/ee/api/chalicelib/core/reset_password.py b/ee/api/chalicelib/core/reset_password.py index e51816e85..194c37704 100644 --- a/ee/api/chalicelib/core/reset_password.py +++ b/ee/api/chalicelib/core/reset_password.py @@ -1,26 +1,25 @@ +import schemas from chalicelib.core import users from chalicelib.utils import email_helper, captcha, helper -def reset(data): +def reset(data: schemas.ForgetPasswordPayloadSchema): print("====================== reset password ===============") print(data) - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): print("error: Invalid captcha.") return {"errors": ["Invalid captcha."]} - if "email" not in data: - return {"errors": ["email not found in body"]} if not helper.has_smtp(): return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]} - a_user = users.get_by_email_only(data["email"]) + a_user = users.get_by_email_only(data.email) if a_user is not None: # ---FOR SSO if a_user.get("origin") is not None and a_user.get("hasPassword", False) is False: return {"errors": ["Please use your SSO to login"]} # ---------- invitation_link = users.generate_new_invitation(user_id=a_user["id"]) - email_helper.send_forgot_password(recipient=data["email"], invitation_link=invitation_link) + email_helper.send_forgot_password(recipient=data.email, invitation_link=invitation_link) else: - print(f"invalid email address [{data['email']}]") + print(f"invalid email address [{data.email}]") return {"errors": ["invalid email address"]} return {"data": {"state": "success"}} diff --git a/ee/api/chalicelib/core/roles.py b/ee/api/chalicelib/core/roles.py index 8ba62091a..5bd80dc06 100644 --- a/ee/api/chalicelib/core/roles.py +++ b/ee/api/chalicelib/core/roles.py @@ -1,64 +1,111 @@ -from chalicelib.core import users +import schemas_ee +from chalicelib.core import users, projects from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC -def update(tenant_id, user_id, role_id, changes): +def update(tenant_id, user_id, role_id, data: schemas_ee.RolePayloadSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - if len(changes.keys()) == 0: - return None - ALLOW_EDIT = ["name", "description", "permissions"] - sub_query = [] - for key in changes.keys(): - if key in ALLOW_EDIT: - sub_query.append(f"{helper.key_to_snake_case(key)} = %({key})s") + if not data.all_projects and (data.projects is None or len(data.projects) == 0): + return {"errors": ["must specify a project or all projects"]} + if data.projects is not None and len(data.projects) > 0 and not data.all_projects: + data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id) with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify(f"""\ + cur.mogrify("""SELECT 1 + FROM public.roles + WHERE role_id = %(role_id)s + AND tenant_id = %(tenant_id)s + AND protected = TRUE + LIMIT 1;""", + {"tenant_id": tenant_id, "role_id": role_id}) + ) + if cur.fetchone() is not None: + return {"errors": ["this role is protected"]} + cur.execute( + cur.mogrify("""\ UPDATE public.roles - SET {" ,".join(sub_query)} + SET name= %(name)s, + description= %(description)s, + permissions= %(permissions)s, + all_projects= %(all_projects)s WHERE role_id = %(role_id)s AND tenant_id = %(tenant_id)s AND deleted_at ISNULL AND protected = FALSE - RETURNING *;""", - {"tenant_id": tenant_id, "role_id": role_id, **changes}) + RETURNING *, COALESCE((SELECT ARRAY_AGG(project_id) + FROM roles_projects WHERE roles_projects.role_id=%(role_id)s),'{}') AS projects;""", + {"tenant_id": tenant_id, "role_id": role_id, **data.dict()}) ) row = cur.fetchone() row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + if not data.all_projects: + d_projects = [i for i in row["projects"] if i not in data.projects] + if len(d_projects) > 0: + cur.execute( + cur.mogrify( + "DELETE FROM roles_projects WHERE role_id=%(role_id)s AND project_id IN %(project_ids)s", + {"role_id": role_id, "project_ids": tuple(d_projects)}) + ) + n_projects = [i for i in data.projects if i not in row["projects"]] + if len(n_projects) > 0: + cur.execute( + cur.mogrify( + f"""INSERT INTO roles_projects(role_id, project_id) + VALUES {",".join([f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(n_projects))])}""", + {"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(n_projects)}}) + ) + row["projects"] = data.projects + return helper.dict_to_camel_case(row) -def create(tenant_id, user_id, name, description, permissions): +def create(tenant_id, user_id, data: schemas_ee.RolePayloadSchema): admin = users.get(user_id=user_id, tenant_id=tenant_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} - + if not data.all_projects and (data.projects is None or len(data.projects) == 0): + return {"errors": ["must specify a project or all projects"]} + if data.projects is not None and len(data.projects) > 0 and not data.all_projects: + data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id) with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions) - VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[]) + cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions, all_projects) + VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[], %(all_projects)s) RETURNING *;""", - {"tenant_id": tenant_id, "name": name, "description": description, "permissions": permissions}) + {"tenant_id": tenant_id, "name": data.name, "description": data.description, + "permissions": data.permissions, "all_projects": data.all_projects}) ) row = cur.fetchone() row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"]) + if not data.all_projects: + role_id = row["role_id"] + cur.execute( + cur.mogrify(f"""INSERT INTO roles_projects(role_id, project_id) + VALUES {",".join(f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(data.projects)))};""", + {"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(data.projects)}}) + ) return helper.dict_to_camel_case(row) def get_roles(tenant_id): with pg_client.PostgresClient() as cur: cur.execute( - cur.mogrify("""SELECT * - FROM public.roles - where tenant_id =%(tenant_id)s - AND deleted_at IS NULL - ORDER BY role_id;""", + cur.mogrify("""SELECT roles.*, COALESCE(projects, '{}') AS projects + FROM public.roles + LEFT JOIN LATERAL (SELECT array_agg(project_id) AS projects + FROM roles_projects + INNER JOIN projects USING (project_id) + WHERE roles_projects.role_id = roles.role_id + AND projects.deleted_at ISNULL ) AS role_projects ON (TRUE) + WHERE tenant_id =%(tenant_id)s + AND deleted_at IS NULL + ORDER BY role_id;""", {"tenant_id": tenant_id}) ) rows = cur.fetchall() @@ -71,11 +118,10 @@ def get_role_by_name(tenant_id, name): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""SELECT * - FROM public.roles - where tenant_id =%(tenant_id)s - AND deleted_at IS NULL - AND name ILIKE %(name)s - ;""", + FROM public.roles + where tenant_id =%(tenant_id)s + AND deleted_at IS NULL + AND name ILIKE %(name)s;""", {"tenant_id": tenant_id, "name": name}) ) row = cur.fetchone() @@ -92,11 +138,11 @@ def delete(tenant_id, user_id, role_id): with pg_client.PostgresClient() as cur: cur.execute( cur.mogrify("""SELECT 1 - FROM public.roles - WHERE role_id = %(role_id)s - AND tenant_id = %(tenant_id)s - AND protected = TRUE - LIMIT 1;""", + FROM public.roles + WHERE role_id = %(role_id)s + AND tenant_id = %(tenant_id)s + AND protected = TRUE + LIMIT 1;""", {"tenant_id": tenant_id, "role_id": role_id}) ) if cur.fetchone() is not None: diff --git a/ee/api/chalicelib/core/signup.py b/ee/api/chalicelib/core/signup.py index 4650736a5..aa4ba2af9 100644 --- a/ee/api/chalicelib/core/signup.py +++ b/ee/api/chalicelib/core/signup.py @@ -1,21 +1,24 @@ -from chalicelib.utils import helper -from chalicelib.utils import pg_client +import json + +from decouple import config + +import schemas from chalicelib.core import users, telemetry, tenants from chalicelib.utils import captcha -import json +from chalicelib.utils import helper +from chalicelib.utils import pg_client from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ -def create_step1(data): +def create_step1(data: schemas.UserSignupSchema): print(f"===================== SIGNUP STEP 1 AT {TimeUTC.to_human_readable(TimeUTC.now())} UTC") errors = [] if tenants.tenants_exists(): - return {"errors":["tenants already registered"]} + return {"errors": ["tenants already registered"]} - email = data.get("email") + email = data.email print(f"=====================> {email}") - password = data.get("password") + password = data.password print("Verifying email validity") if email is None or len(email) < 5 or not helper.is_valid_email(email): @@ -28,25 +31,25 @@ def create_step1(data): errors.append("Email address previously deleted.") print("Verifying captcha") - if helper.allow_captcha() and not captcha.is_valid(data["g-recaptcha-response"]): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): errors.append("Invalid captcha.") print("Verifying password validity") - if len(data["password"]) < 6: + if len(password) < 6: errors.append("Password is too short, it must be at least 6 characters long.") print("Verifying fullname validity") - fullname = data.get("fullname") + fullname = data.fullname if fullname is None or len(fullname) < 1 or not helper.is_alphabet_space_dash(fullname): errors.append("Invalid full name.") print("Verifying company's name validity") - company_name = data.get("organizationName") + company_name = data.organizationName if company_name is None or len(company_name) < 1 or not helper.is_alphanumeric_space(company_name): errors.append("invalid organization's name") print("Verifying project's name validity") - project_name = data.get("projectName") + project_name = data.projectName if project_name is None or len(project_name) < 1: project_name = "my first project" @@ -60,7 +63,7 @@ def create_step1(data): params = {"email": email, "password": password, "fullname": fullname, "companyName": company_name, "projectName": project_name, - "versionNumber": environ["version_number"], + "versionNumber": config("version_number"), "data": json.dumps({"lastAnnouncementView": TimeUTC.now()})} query = """\ WITH t AS ( @@ -131,4 +134,4 @@ def create_step1(data): "user": r, "client": c, } - } \ No newline at end of file + } diff --git a/ee/api/chalicelib/core/tenants.py b/ee/api/chalicelib/core/tenants.py index eb827d827..45491f654 100644 --- a/ee/api/chalicelib/core/tenants.py +++ b/ee/api/chalicelib/core/tenants.py @@ -1,6 +1,6 @@ -from chalicelib.utils import pg_client -from chalicelib.utils import helper from chalicelib.core import users +from chalicelib.utils import helper +from chalicelib.utils import pg_client def get_by_tenant_key(tenant_key): diff --git a/ee/api/chalicelib/core/traces.py b/ee/api/chalicelib/core/traces.py new file mode 100644 index 000000000..fd0ae6c2b --- /dev/null +++ b/ee/api/chalicelib/core/traces.py @@ -0,0 +1,157 @@ +import json +import queue +import re +from typing import Optional, List + +from decouple import config +from fastapi import Request, Response +from pydantic import BaseModel, Field +from starlette.background import BackgroundTask + +import app as main_app +from chalicelib.utils import pg_client +from chalicelib.utils.TimeUTC import TimeUTC +from schemas import CurrentContext + +IGNORE_ROUTES = [ + {"method": ["*"], "path": "/notifications"}, + {"method": ["*"], "path": "/announcements"}, + {"method": ["*"], "path": "/client"}, + {"method": ["*"], "path": "/account"}, + {"method": ["GET"], "path": "/projects"}, + {"method": ["*"], "path": "/{projectId}/sessions/search2"}, + {"method": ["GET"], "path": "/{projectId}/sessions2/favorite"}, + {"method": ["GET"], "path": re.compile("^/{projectId}/sessions2/{sessionId}/.*")}, + {"method": ["GET"], "path": "/{projectId}/sample_rate"}, + {"method": ["GET"], "path": "/boarding"}, + {"method": ["GET"], "path": "/{projectId}/metadata"}, + {"method": ["GET"], "path": "/{projectId}/integration/sources"}, + {"method": ["GET"], "path": "/{projectId}/funnels"}, + {"method": ["GET"], "path": "/integrations/slack/channels"}, + {"method": ["GET"], "path": "/webhooks"}, + {"method": ["GET"], "path": "/{projectId}/alerts"}, + {"method": ["GET"], "path": "/client/members"}, + {"method": ["GET"], "path": "/client/roles"}, + {"method": ["GET"], "path": "/announcements/view"}, + {"method": ["GET"], "path": "/config/weekly_report"}, + {"method": ["GET"], "path": "/{projectId}/events/search"}, + {"method": ["POST"], "path": "/{projectId}/errors/search"}, + {"method": ["GET"], "path": "/{projectId}/errors/stats"}, + {"method": ["GET"], "path": re.compile("^/{projectId}/errors/{errorId}/.*")}, + {"method": ["GET"], "path": re.compile("^/integrations/.*")}, + {"method": ["*"], "path": re.compile("^/{projectId}/dashboard/.*")}, + {"method": ["*"], "path": re.compile("^/{projectId}/funnels$")}, + {"method": ["*"], "path": re.compile("^/{projectId}/funnels/.*")}, +] +IGNORE_IN_PAYLOAD = ["token", "password", "authorizationToken", "authHeader", "xQueryKey", "awsSecretAccessKey", + "serviceAccountCredentials", "accessKey", "applicationKey", "apiKey"] + + +class TraceSchema(BaseModel): + user_id: Optional[int] = Field(None) + tenant_id: int = Field(...) + auth: Optional[str] = Field(None) + action: str = Field(...) + method: str = Field(...) + path_format: str = Field(...) + endpoint: str = Field(...) + payload: Optional[dict] = Field(None) + parameters: Optional[dict] = Field(None) + status: Optional[int] = Field(None) + created_at: int = Field(...) + + +def __process_trace(trace: TraceSchema): + data = trace.dict() + data["parameters"] = json.dumps(trace.parameters) if trace.parameters is not None and len( + trace.parameters.keys()) > 0 else None + data["payload"] = json.dumps(trace.payload) if trace.payload is not None and len(trace.payload.keys()) > 0 else None + return data + + +async def write_trace(trace: TraceSchema): + data = __process_trace(trace) + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + f"""INSERT INTO traces(user_id, tenant_id, created_at, auth, action, method, path_format, endpoint, payload, parameters, status) + VALUES (%(user_id)s, %(tenant_id)s, %(created_at)s, %(auth)s, %(action)s, %(method)s, %(path_format)s, %(endpoint)s, %(payload)s::jsonb, %(parameters)s::jsonb, %(status)s);""", + data) + ) + + +async def write_traces_batch(traces: List[TraceSchema]): + if len(traces) == 0: + return + params = {} + values = [] + for i, t in enumerate(traces): + data = __process_trace(t) + for key in data.keys(): + params[f"{key}_{i}"] = data[key] + values.append( + f"(%(user_id_{i})s, %(tenant_id_{i})s, %(created_at_{i})s, %(auth_{i})s, %(action_{i})s, %(method_{i})s, %(path_format_{i})s, %(endpoint_{i})s, %(payload_{i})s::jsonb, %(parameters_{i})s::jsonb, %(status_{i})s)") + + with pg_client.PostgresClient() as cur: + cur.execute( + cur.mogrify( + f"""INSERT INTO traces(user_id, tenant_id, created_at, auth, action, method, path_format, endpoint, payload, parameters, status) + VALUES {" , ".join(values)};""", + params) + ) + + +async def process_trace(action: str, path_format: str, request: Request, response: Response): + if not hasattr(request.state, "currentContext"): + return + current_context: CurrentContext = request.state.currentContext + body: json = None + if request.method in ["POST", "PUT", "DELETE"]: + body = await request.json() + intersect = list(set(body.keys()) & set(IGNORE_IN_PAYLOAD)) + for attribute in intersect: + body[attribute] = "HIDDEN" + current_trace = TraceSchema(tenant_id=current_context.tenant_id, + user_id=current_context.user_id if isinstance(current_context, CurrentContext) \ + else None, + auth="jwt" if isinstance(current_context, CurrentContext) else "apiKey", + action=action, + endpoint=str(request.url.path), method=request.method, + payload=body, + parameters=dict(request.query_params), + status=response.status_code, + path_format=path_format, + created_at=TimeUTC.now()) + if not hasattr(main_app.app, "queue_system"): + main_app.app.queue_system = queue.Queue() + q: queue.Queue = main_app.app.queue_system + q.put(current_trace) + + +def trace(action: str, path_format: str, request: Request, response: Response): + for p in IGNORE_ROUTES: + if (isinstance(p["path"], str) and p["path"] == path_format \ + or isinstance(p["path"], re.Pattern) and re.search(p["path"], path_format)) \ + and (p["method"][0] == "*" or request.method in p["method"]): + return + background_task: BackgroundTask = BackgroundTask(process_trace, action, path_format, request, response) + if response.background is None: + response.background = background_task + else: + response.background.add_task(background_task.func, *background_task.args, *background_task.kwargs) + + +async def process_traces_queue(): + queue_system: queue.Queue = main_app.app.queue_system + traces = [] + while not queue_system.empty(): + obj = queue_system.get_nowait() + traces.append(obj) + if len(traces) > 0: + await write_traces_batch(traces) + + +cron_jobs = [ + {"func": process_traces_queue, "trigger": "interval", "seconds": config("traces_period", cast=int, default=60), + "misfire_grace_time": 20} +] diff --git a/ee/api/chalicelib/core/unlock.py b/ee/api/chalicelib/core/unlock.py index f4d5da19c..d656edf8a 100644 --- a/ee/api/chalicelib/core/unlock.py +++ b/ee/api/chalicelib/core/unlock.py @@ -1,7 +1,10 @@ -from chalicelib.utils.helper import environ -from chalicelib.utils.TimeUTC import TimeUTC -import requests import uuid +from os import environ + +import requests +from decouple import config + +from chalicelib.utils.TimeUTC import TimeUTC def __get_mid(): @@ -9,7 +12,7 @@ def __get_mid(): def get_license(): - return environ.get("LICENSE_KEY", "") + return config("LICENSE_KEY", default="") def check(): @@ -33,10 +36,10 @@ def check(): def get_expiration_date(): - return int(environ.get("expiration", 0)) + return config("expiration", default=0, cast=int) def is_valid(): - if environ.get("lastCheck") is None: + if config("lastCheck", default=None) is None or (get_expiration_date() - TimeUTC.now()) <= 0: check() return get_expiration_date() - TimeUTC.now() > 0 diff --git a/ee/api/chalicelib/core/users.py b/ee/api/chalicelib/core/users.py index f82abd191..ce5bcca5d 100644 --- a/ee/api/chalicelib/core/users.py +++ b/ee/api/chalicelib/core/users.py @@ -1,13 +1,15 @@ import json import secrets -from chalicelib.core import authorizers, metadata, projects, assist -from chalicelib.core import tenants +from decouple import config +from fastapi import BackgroundTasks + +from chalicelib.core import authorizers, metadata, projects, roles +from chalicelib.core import tenants, assist from chalicelib.utils import dev, SAML2_helper -from chalicelib.utils import helper +from chalicelib.utils import helper, email_helper from chalicelib.utils import pg_client from chalicelib.utils.TimeUTC import TimeUTC -from chalicelib.utils.helper import environ def __generate_invitation_token(): @@ -20,7 +22,7 @@ def create_new_member(tenant_id, email, invitation_token, admin, name, owner=Fal WITH u AS ( INSERT INTO public.users (tenant_id, email, role, name, data, role_id) VALUES (%(tenantId)s, %(email)s, %(role)s, %(name)s, %(data)s, %(role_id)s) - RETURNING user_id,email,role,name,appearance, role_id + RETURNING tenant_id,user_id,email,role,name,appearance, role_id ), au AS (INSERT INTO public.basic_authentication (user_id, generated_password, invitation_token, invited_at) VALUES ((SELECT user_id FROM u), TRUE, %(invitation_token)s, timezone('utc'::text, now())) @@ -36,8 +38,11 @@ def create_new_member(tenant_id, email, invitation_token, admin, name, owner=Fal (CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin, (CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member, au.invitation_token, - u.role_id - FROM u,au;""", + u.role_id, + roles.name AS role_name, + roles.permissions, + TRUE AS has_password + FROM au,u LEFT JOIN roles USING(tenant_id) WHERE roles.role_id IS NULL OR roles.role_id = %(role_id)s;""", {"tenantId": tenant_id, "email": email, "role": "owner" if owner else "admin" if admin else "member", "name": name, "data": json.dumps({"lastAnnouncementView": TimeUTC.now()}), @@ -190,7 +195,7 @@ def update(tenant_id, user_id, changes): return helper.dict_to_camel_case(cur.fetchone()) -def create_member(tenant_id, user_id, data): +def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks): admin = get(tenant_id=tenant_id, user_id=user_id) if not admin["admin"] and not admin["superAdmin"]: return {"errors": ["unauthorized"]} @@ -205,6 +210,8 @@ def create_member(tenant_id, user_id, data): if name is None: name = data["email"] role_id = data.get("roleId") + if role_id is None: + role_id = roles.get_role_by_name(tenant_id=tenant_id, name="member").get("roleId") invitation_token = __generate_invitation_token() user = get_deleted_user_by_email(email=data["email"]) if user is not None: @@ -214,18 +221,24 @@ def create_member(tenant_id, user_id, data): new_member = create_new_member(tenant_id=tenant_id, email=data["email"], invitation_token=invitation_token, admin=data.get("admin", False), name=name, role_id=role_id) new_member["invitationLink"] = __get_invitation_link(new_member.pop("invitationToken")) - helper.async_post(environ['email_basic'] % 'member_invitation', - { - "email": data["email"], - "invitationLink": new_member["invitationLink"], - "clientId": tenants.get_by_tenant_id(tenant_id)["name"], - "senderName": admin["name"] - }) + # helper.async_post(config('email_basic') % 'member_invitation', + # { + # "email": data["email"], + # "invitationLink": new_member["invitationLink"], + # "clientId": tenants.get_by_tenant_id(tenant_id)["name"], + # "senderName": admin["name"] + # }) + background_tasks.add_task(email_helper.send_team_invitation, **{ + "recipient": data["email"], + "invitation_link": new_member["invitationLink"], + "client_id": tenants.get_by_tenant_id(tenant_id)["name"], + "sender_name": admin["name"] + }) return {"data": new_member} def __get_invitation_link(invitation_token): - return environ["SITE_URL"] + environ["invitation_link"] % invitation_token + return config("SITE_URL") + config("invitation_link") % invitation_token def allow_password_change(user_id, delta_min=10): @@ -261,6 +274,7 @@ def get(user_id, tenant_id): role_id, roles.name AS role_name, roles.permissions, + roles.all_projects, basic_authentication.password IS NOT NULL AS has_password FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id LEFT JOIN public.roles USING (role_id) @@ -299,12 +313,15 @@ def edit(user_id_to_update, tenant_id, changes, editor_id): admin = get(tenant_id=tenant_id, user_id=editor_id) if not admin["superAdmin"] and not admin["admin"]: return {"errors": ["unauthorized"]} - if user["superAdmin"] and "admin" in changes: - changes.pop("admin") + if editor_id == user_id_to_update: + if user["superAdmin"]: + changes.pop("admin") + elif user["admin"] != changes["admin"]: + return {"errors": ["cannot change your own role"]} keys = list(changes.keys()) for k in keys: - if k not in ALLOW_EDIT: + if k not in ALLOW_EDIT or changes[k] is None: changes.pop(k) keys = list(changes.keys()) @@ -466,7 +483,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password): c = tenants.get_by_tenant_id(tenant_id) c.pop("createdAt") c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True) + stack_integrations=True, user_id=user_id) c["smtp"] = helper.has_smtp() c["iceServers"] = assist.get_ice_servers() return { @@ -494,7 +511,7 @@ def set_password_invitation(tenant_id, user_id, new_password): c = tenants.get_by_tenant_id(tenant_id) c.pop("createdAt") c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, - stack_integrations=True) + stack_integrations=True, user_id=user_id) c["smtp"] = helper.has_smtp() c["iceServers"] = assist.get_ice_servers() return { @@ -719,3 +736,57 @@ def create_sso_user(tenant_id, email, admin, name, origin, role_id, internal_id= query ) return helper.dict_to_camel_case(cur.fetchone()) + + +def restore_sso_user(user_id, tenant_id, email, admin, name, origin, role_id, internal_id=None): + with pg_client.PostgresClient() as cur: + query = cur.mogrify(f"""\ + WITH u AS ( + UPDATE public.users + SET tenant_id= %(tenantId)s, + role= %(role)s, + name= %(name)s, + data= %(data)s, + origin= %(origin)s, + internal_id= %(internal_id)s, + role_id= %(role_id)s, + deleted_at= NULL, + created_at= default, + api_key= default, + jwt_iat= NULL, + appearance= default, + weekly_report= default + WHERE user_id = %(user_id)s + RETURNING * + ), + au AS ( + UPDATE public.basic_authentication + SET password= default, + generated_password= default, + invitation_token= default, + invited_at= default, + change_pwd_token= default, + change_pwd_expire_at= default, + changed_at= NULL + WHERE user_id = %(user_id)s + RETURNING user_id + ) + SELECT u.user_id AS id, + u.email, + u.role, + u.name, + TRUE AS change_password, + (CASE WHEN u.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin, + (CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin, + (CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member, + u.appearance, + origin + FROM u;""", + {"tenantId": tenant_id, "email": email, "internal_id": internal_id, + "role": "admin" if admin else "member", "name": name, "origin": origin, + "role_id": role_id, "data": json.dumps({"lastAnnouncementView": TimeUTC.now()}), + "user_id": user_id}) + cur.execute( + query + ) + return helper.dict_to_camel_case(cur.fetchone()) diff --git a/ee/api/chalicelib/core/webhook.py b/ee/api/chalicelib/core/webhook.py index 20e873f5c..cb7cf509e 100644 --- a/ee/api/chalicelib/core/webhook.py +++ b/ee/api/chalicelib/core/webhook.py @@ -1,6 +1,7 @@ +import requests + from chalicelib.utils import pg_client, helper from chalicelib.utils.TimeUTC import TimeUTC -import requests def get_by_id(webhook_id): @@ -121,7 +122,7 @@ def add(tenant_id, endpoint, auth_header=None, webhook_type='webhook', name="", def add_edit(tenant_id, data, replace_none=None): - if "webhookId" in data: + if data.get("webhookId") is not None: return update(tenant_id=tenant_id, webhook_id=data["webhookId"], changes={"endpoint": data["endpoint"], "authHeader": None if "authHeader" not in data else data["authHeader"], diff --git a/ee/api/chalicelib/utils/SAML2_helper.py b/ee/api/chalicelib/utils/SAML2_helper.py index 25f279d3a..a2a4e1e6e 100644 --- a/ee/api/chalicelib/utils/SAML2_helper.py +++ b/ee/api/chalicelib/utils/SAML2_helper.py @@ -1,21 +1,22 @@ from http import cookies -from urllib.parse import urlparse, parse_qsl +from urllib.parse import urlparse +from decouple import config +from fastapi import Request from onelogin.saml2.auth import OneLogin_Saml2_Auth - -from chalicelib.utils.helper import environ +from starlette.datastructures import FormData SAML2 = { "strict": True, "debug": True, "sp": { - "entityId": environ["SITE_URL"] + "/api/sso/saml2/metadata/", + "entityId": config("SITE_URL") + "/api/sso/saml2/metadata/", "assertionConsumerService": { - "url": environ["SITE_URL"] + "/api/sso/saml2/acs", + "url": config("SITE_URL") + "/api/sso/saml2/acs", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST" }, "singleLogoutService": { - "url": environ["SITE_URL"] + "/api/sso/saml2/sls", + "url": config("SITE_URL") + "/api/sso/saml2/sls", "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, "NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress", @@ -26,28 +27,28 @@ SAML2 = { } idp = None # SAML2 config handler -if environ.get("SAML2_MD_URL") is not None and len(environ["SAML2_MD_URL"]) > 0: +if config("SAML2_MD_URL", default=None) is not None and len(config("SAML2_MD_URL")) > 0: print("SAML2_MD_URL provided, getting IdP metadata config") from onelogin.saml2.idp_metadata_parser import OneLogin_Saml2_IdPMetadataParser - idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(environ.get("SAML2_MD_URL")) + idp_data = OneLogin_Saml2_IdPMetadataParser.parse_remote(config("SAML2_MD_URL", default=None)) idp = idp_data.get("idp") if SAML2["idp"] is None: - if len(environ.get("idp_entityId", "")) > 0 \ - and len(environ.get("idp_sso_url", "")) > 0 \ - and len(environ.get("idp_x509cert", "")) > 0: + if len(config("idp_entityId", default="")) > 0 \ + and len(config("idp_sso_url", default="")) > 0 \ + and len(config("idp_x509cert", default="")) > 0: idp = { - "entityId": environ["idp_entityId"], + "entityId": config("idp_entityId"), "singleSignOnService": { - "url": environ["idp_sso_url"], + "url": config("idp_sso_url"), "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" }, - "x509cert": environ["idp_x509cert"] + "x509cert": config("idp_x509cert") } - if len(environ.get("idp_sls_url", "")) > 0: + if len(config("idp_sls_url", default="")) > 0: idp["singleLogoutService"] = { - "url": environ["idp_sls_url"], + "url": config("idp_sls_url"), "binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect" } @@ -67,10 +68,10 @@ def init_saml_auth(req): return auth -def prepare_request(request): +async def prepare_request(request: Request): request.args = dict(request.query_params).copy() if request.query_params else {} - request.form = dict(request.json_body).copy() if request.json_body else dict( - parse_qsl(request.raw_body.decode())) if request.raw_body else {} + form: FormData = await request.form() + request.form = dict(form) cookie_str = request.headers.get("cookie", "") if "session" in cookie_str: cookie = cookies.SimpleCookie() @@ -90,7 +91,7 @@ def prepare_request(request): 'https': 'on' if request.headers.get('x-forwarded-proto', 'http') == 'https' else 'off', 'http_host': request.headers['host'], 'server_port': url_data.port, - 'script_name': "/api" + request.path, + 'script_name': "/api" + request.url.path, 'get_data': request.args.copy(), # Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144 # 'lowercase_urlencoding': True, @@ -105,9 +106,9 @@ def is_saml2_available(): def get_saml2_provider(): - return environ.get("idp_name", "saml2") if is_saml2_available() and len( - environ.get("idp_name", "saml2")) > 0 else None + return config("idp_name", default="saml2") if is_saml2_available() and len( + config("idp_name", default="saml2")) > 0 else None def get_landing_URL(jwt): - return environ["SITE_URL"] + environ.get("sso_landing", "/login?jwt=%s") % jwt + return config("SITE_URL") + config("sso_landing", default="/login?jwt=%s") % jwt diff --git a/ee/api/chalicelib/utils/assist_helper.py b/ee/api/chalicelib/utils/assist_helper.py index d31cadd1f..913435bd4 100644 --- a/ee/api/chalicelib/utils/assist_helper.py +++ b/ee/api/chalicelib/utils/assist_helper.py @@ -3,14 +3,15 @@ import hashlib import hmac from time import time +from decouple import config + from chalicelib.core import assist from chalicelib.utils import helper -from chalicelib.utils.helper import environ def __get_secret(): - return environ["assist_secret"] if environ["assist_secret"] is not None and len( - environ["assist_secret"]) > 0 else None + return config("assist_secret") if config("assist_secret", default=None) is not None \ + and len(config("assist_secret")) > 0 else None def get_temporary_credentials(): @@ -18,7 +19,7 @@ def get_temporary_credentials(): if secret is None: return {"errors": ["secret not defined"]} user = helper.generate_salt() - ttl = int(environ.get("assist_ttl", 48)) * 3600 + ttl = config("assist_ttl", cast=int, default=48) * 3600 timestamp = int(time()) + ttl username = str(timestamp) + ':' + user dig = hmac.new(bytes(secret, 'utf-8'), bytes(username, 'utf-8'), hashlib.sha1) @@ -34,10 +35,12 @@ def get_full_config(): servers = servers.split("|") credentials = get_temporary_credentials() if __get_secret() is not None: - servers = [{"url": s.split(",")[0], **credentials} for s in servers] + for i in range(len(servers)): + url = servers[i].split(",")[0] + servers[i] = {"url": url} if url.lower().startswith("stun") else {"url": url, **credentials} else: for i in range(len(servers)): - s = servers[i].split("|") + s = servers[i].split(",") if len(s) == 3: servers[i] = {"url": s[0], "username": s[1], "credential": s[2]} else: diff --git a/ee/api/chalicelib/utils/ch_client.py b/ee/api/chalicelib/utils/ch_client.py index cfe635b4a..babdd669a 100644 --- a/ee/api/chalicelib/utils/ch_client.py +++ b/ee/api/chalicelib/utils/ch_client.py @@ -1,14 +1,14 @@ import clickhouse_driver -from chalicelib.utils.helper import environ +from decouple import config class ClickHouseClient: __client = None def __init__(self): - self.__client = clickhouse_driver.Client(host=environ["ch_host"], + self.__client = clickhouse_driver.Client(host=config("ch_host"), database="default", - port=int(environ["ch_port"])) \ + port=config("ch_port", cast=int)) \ if self.__client is None else self.__client def __enter__(self): diff --git a/ee/api/or_dependencies.py b/ee/api/or_dependencies.py new file mode 100644 index 000000000..ec0eb5d51 --- /dev/null +++ b/ee/api/or_dependencies.py @@ -0,0 +1,45 @@ +import json +from typing import Callable + +from fastapi.routing import APIRoute +from starlette import status +from starlette.exceptions import HTTPException +from starlette.requests import Request +from starlette.responses import Response, JSONResponse + +import schemas +from chalicelib.core import traces + + +async def OR_context(request: Request) -> schemas.CurrentContext: + if hasattr(request.state, "currentContext"): + return request.state.currentContext + else: + raise Exception("currentContext not found") + + +class ORRoute(APIRoute): + def get_route_handler(self) -> Callable: + original_route_handler = super().get_route_handler() + + async def custom_route_handler(request: Request) -> Response: + try: + response: Response = await original_route_handler(request) + except HTTPException as e: + if e.status_code // 100 == 4: + response = JSONResponse(content={"errors": [e.detail]}, status_code=e.status_code) + else: + raise e + + if isinstance(response, JSONResponse): + response: JSONResponse = response + body = json.loads(response.body.decode('utf8')) + if response.status_code == 200 and body is not None and body.get("errors") is not None: + if "not found" in body["errors"][0]: + response.status_code = status.HTTP_404_NOT_FOUND + else: + response.status_code = status.HTTP_400_BAD_REQUEST + traces.trace(action=self.name, path_format=self.path_format, request=request, response=response) + return response + + return custom_route_handler diff --git a/ee/api/prepare-local.sh b/ee/api/prepare-local.sh new file mode 100755 index 000000000..c0a3db182 --- /dev/null +++ b/ee/api/prepare-local.sh @@ -0,0 +1,2 @@ +#!/bin/bash +rsync -avr --exclude=".*" --ignore-existing ../../api/* ./ \ No newline at end of file diff --git a/ee/api/requirements.txt b/ee/api/requirements.txt index e241b5edd..82329a1f2 100644 --- a/ee/api/requirements.txt +++ b/ee/api/requirements.txt @@ -3,11 +3,14 @@ urllib3==1.26.6 boto3==1.16.1 pyjwt==1.7.1 psycopg2-binary==2.8.6 -pytz==2020.1 -sentry-sdk==0.19.1 elasticsearch==7.9.1 jira==2.0.0 -schedule==1.1.0 -croniter==1.0.12 -clickhouse-driver==0.1.5 -python3-saml==1.12.0 \ No newline at end of file +clickhouse-driver==0.2.2 +python3-saml==1.12.0 + +fastapi==0.70.1 +python-multipart==0.0.5 +uvicorn[standard]==0.16.0 +python-decouple==3.5 +pydantic[email]==1.8.2 +apscheduler==3.8.1 \ No newline at end of file diff --git a/ee/api/routers/app/v1_api_ee.py b/ee/api/routers/app/v1_api_ee.py new file mode 100644 index 000000000..3ca55d3e9 --- /dev/null +++ b/ee/api/routers/app/v1_api_ee.py @@ -0,0 +1,12 @@ +from chalicelib.utils import assist_helper +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@app_apikey.get('/v1/assist/credentials', tags=["api"]) +def get_assist_credentials(): + credentials = assist_helper.get_temporary_credentials() + if "errors" in credentials: + return credentials + return {"data": credentials} diff --git a/ee/api/routers/base.py b/ee/api/routers/base.py new file mode 100644 index 000000000..5c665b2d1 --- /dev/null +++ b/ee/api/routers/base.py @@ -0,0 +1,14 @@ +from fastapi import APIRouter, Depends + +from auth.auth_apikey import APIKeyAuth +from auth.auth_jwt import JWTAuth +from auth.auth_project import ProjectAuthorizer +from or_dependencies import ORRoute + + +def get_routers() -> (APIRouter, APIRouter, APIRouter): + public_app = APIRouter(route_class=ORRoute) + app = APIRouter(dependencies=[Depends(JWTAuth()), Depends(ProjectAuthorizer("projectId"))], route_class=ORRoute) + app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth()), Depends(ProjectAuthorizer("projectKey"))], + route_class=ORRoute) + return public_app, app, app_apikey diff --git a/ee/api/routers/core_dynamic.py b/ee/api/routers/core_dynamic.py new file mode 100644 index 000000000..cf5e7378f --- /dev/null +++ b/ee/api/routers/core_dynamic.py @@ -0,0 +1,238 @@ +from typing import Optional + +from decouple import config +from fastapi import Body, Depends, HTTPException, status, BackgroundTasks +from starlette.responses import RedirectResponse + +import schemas +import schemas_ee +from chalicelib.core import integrations_manager +from chalicelib.core import sessions +from chalicelib.core import tenants, users, metadata, projects, license, assist +from chalicelib.core import webhook +from chalicelib.core.collaboration_slack import Slack +from chalicelib.utils import captcha, SAML2_helper +from chalicelib.utils import helper +from or_dependencies import OR_context +from routers.base import get_routers + +public_app, app, app_apikey = get_routers() + + +@public_app.get('/signup', tags=['signup']) +def get_all_signup(): + return {"data": {"tenants": tenants.tenants_exists(), + "sso": SAML2_helper.is_saml2_available(), + "ssoProvider": SAML2_helper.get_saml2_provider(), + "edition": helper.get_edition()}} + + +@public_app.post('/login', tags=["authentication"]) +def login(data: schemas.UserLoginSchema = Body(...)): + if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response): + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="Invalid captcha." + ) + + r = users.authenticate(data.email, data.password, for_plugin=False) + if r is None: + raise HTTPException( + status_code=status.HTTP_401_UNAUTHORIZED, + detail="You’ve entered invalid Email or Password." + ) + + tenant_id = r.pop("tenantId") + + r["limits"] = { + "teamMember": -1, + "projects": -1, + "metadata": metadata.get_remaining_metadata_with_count(tenant_id)} + + c = tenants.get_by_tenant_id(tenant_id) + c.pop("createdAt") + c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True, user_id=r["id"]) + c["smtp"] = helper.has_smtp() + c["iceServers"] = assist.get_ice_servers() + r["smtp"] = c["smtp"] + r["iceServers"] = c["iceServers"] + return { + 'jwt': r.pop('jwt'), + 'data': { + "user": r, + "client": c + } + } + + +@app.get('/account', tags=['accounts']) +def get_account(context: schemas.CurrentContext = Depends(OR_context)): + r = users.get(tenant_id=context.tenant_id, user_id=context.user_id) + return { + 'data': { + **r, + "limits": { + "teamMember": -1, + "projects": -1, + "metadata": metadata.get_remaining_metadata_with_count(context.tenant_id) + }, + **license.get_status(context.tenant_id), + "smtp": helper.has_smtp(), + "saml2": SAML2_helper.is_saml2_available(), + "iceServers": assist.get_ice_servers() + } + } + + +@app.get('/projects/limit', tags=['projects']) +def get_projects_limit(context: schemas.CurrentContext = Depends(OR_context)): + return {"data": { + "current": projects.count_by_tenant(tenant_id=context.tenant_id), + "remaining": -1 + }} + + +@app.get('/projects/{projectId}', tags=['projects']) +def get_project(projectId: int, last_tracker_version: Optional[str] = None, + context: schemas.CurrentContext = Depends(OR_context)): + data = projects.get_project(tenant_id=context.tenant_id, project_id=projectId, include_last_session=True, + include_gdpr=True, last_tracker_version=last_tracker_version) + if data is None: + return {"errors": ["project not found"]} + return {"data": data} + + +@app.put('/integrations/slack', tags=['integrations']) +@app.post('/integrations/slack', tags=['integrations']) +def add_slack_client(data: schemas.AddSlackSchema, context: schemas.CurrentContext = Depends(OR_context)): + n = Slack.add_channel(tenant_id=context.tenant_id, url=data.url, name=data.name) + if n is None: + return { + "errors": ["We couldn't send you a test message on your Slack channel. Please verify your webhook url."] + } + return {"data": n} + + +@app.put('/integrations/slack/{integrationId}', tags=['integrations']) +@app.post('/integrations/slack/{integrationId}', tags=['integrations']) +def edit_slack_integration(integrationId: int, data: schemas.EditSlackSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + if len(data.url) > 0: + old = webhook.get(tenant_id=context.tenant_id, webhook_id=integrationId) + if old["endpoint"] != data.url: + if not Slack.say_hello(data.url): + return { + "errors": [ + "We couldn't send you a test message on your Slack channel. Please verify your webhook url."] + } + return {"data": webhook.update(tenant_id=context.tenant_id, webhook_id=integrationId, + changes={"name": data.name, "endpoint": data.url})} + + +# this endpoint supports both jira & github based on `provider` attribute +@app.post('/integrations/issues', tags=["integrations"]) +def add_edit_jira_cloud_github(data: schemas.JiraGithubSchema, + context: schemas.CurrentContext = Depends(OR_context)): + provider = data.provider.upper() + error, integration = integrations_manager.get_integration(tool=provider, tenant_id=context.tenant_id, + user_id=context.user_id) + if error is not None: + return error + return {"data": integration.add_edit(data=data.dict())} + + +@app.post('/client/members', tags=["client"]) +@app.put('/client/members', tags=["client"]) +def add_member(background_tasks: BackgroundTasks, data: schemas_ee.CreateMemberSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + return users.create_member(tenant_id=context.tenant_id, user_id=context.user_id, data=data.dict(), + background_tasks=background_tasks) + + +@public_app.get('/users/invitation', tags=['users']) +def process_invitation_link(token: str): + if token is None or len(token) < 64: + return {"errors": ["please provide a valid invitation"]} + user = users.get_by_invitation_token(token) + if user is None: + return {"errors": ["invitation not found"]} + if user["expiredInvitation"]: + return {"errors": ["expired invitation, please ask your admin to send a new one"]} + if user["expiredChange"] is not None and not user["expiredChange"] \ + and user["changePwdToken"] is not None and user["changePwdAge"] < -5 * 60: + pass_token = user["changePwdToken"] + else: + pass_token = users.allow_password_change(user_id=user["userId"]) + return RedirectResponse(url=config("SITE_URL") + config("change_password_link") % (token, pass_token)) + + +@public_app.post('/password/reset', tags=["users"]) +@public_app.put('/password/reset', tags=["users"]) +def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema = Body(...)): + if data is None or len(data.invitation) < 64 or len(data.passphrase) < 8: + return {"errors": ["please provide a valid invitation & pass"]} + user = users.get_by_invitation_token(token=data.invitation, pass_token=data.passphrase) + if user is None: + return {"errors": ["invitation not found"]} + if user["expiredChange"]: + return {"errors": ["expired change, please re-use the invitation link"]} + + return users.set_password_invitation(new_password=data.password, user_id=user["userId"], tenant_id=user["tenantId"]) + + +@app.put('/client/members/{memberId}', tags=["client"]) +@app.post('/client/members/{memberId}', tags=["client"]) +def edit_member(memberId: int, data: schemas_ee.EditMemberSchema, + context: schemas.CurrentContext = Depends(OR_context)): + return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data.dict(), + user_id_to_update=memberId) + + +@app.get('/metadata/session_search', tags=["metadata"]) +def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] = None, + context: schemas.CurrentContext = Depends(OR_context)): + if key is None or value is None or len(value) == 0 and len(key) == 0: + return {"errors": ["please provide a key&value for search"]} + + if projectId is not None and not projects.is_authorized(project_id=projectId, tenant_id=context.tenant_id, + user_id=context.user_id): + return {"errors": ["unauthorized project"]} + if len(value) == 0: + return {"errors": ["please provide a value for search"]} + if len(key) == 0: + return {"errors": ["please provide a key for search"]} + return { + "data": sessions.search_by_metadata(tenant_id=context.tenant_id, user_id=context.user_id, m_value=value, + m_key=key, project_id=projectId)} + + +@app.get('/plans', tags=["plan"]) +def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)): + return { + "data": license.get_status(context.tenant_id) + } + + +@public_app.get('/general_stats', tags=["private"], include_in_schema=False) +def get_general_stats(): + return {"data": {"sessions:": sessions.count_all()}} + + +@app.get('/client', tags=['projects']) +def get_client(context: schemas.CurrentContext = Depends(OR_context)): + r = tenants.get_by_tenant_id(context.tenant_id) + if r is not None: + r.pop("createdAt") + r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True, + stack_integrations=True, version=True, user_id=context.user_id) + return { + 'data': r + } + + +@app.get('/projects', tags=['projects']) +def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)): + return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True, + stack_integrations=True, version=True, + last_tracker_version=last_tracker_version, user_id=context.user_id)} diff --git a/ee/api/routers/crons/core_dynamic_crons.py b/ee/api/routers/crons/core_dynamic_crons.py new file mode 100644 index 000000000..bdde42a15 --- /dev/null +++ b/ee/api/routers/crons/core_dynamic_crons.py @@ -0,0 +1,18 @@ +from chalicelib.core import telemetry, unlock + + +def telemetry_cron() -> None: + telemetry.compute() + + +# @app.schedule(Cron('0/60', '*', '*', '*', '?', '*')) +def unlock_cron() -> None: + print("validating license") + unlock.check() + print(f"valid: {unlock.is_valid()}") + + +cron_jobs = [ + {"func": telemetry_cron, "trigger": "cron", "day_of_week": "*"}, + {"func": unlock_cron, "trigger": "cron", "hour": "*"} +] diff --git a/ee/api/routers/ee.py b/ee/api/routers/ee.py new file mode 100644 index 000000000..1a9589eaa --- /dev/null +++ b/ee/api/routers/ee.py @@ -0,0 +1,60 @@ +from chalicelib.core import roles +from chalicelib.core import unlock +from chalicelib.utils import assist_helper + +unlock.check() + +from or_dependencies import OR_context +from routers.base import get_routers +import schemas +import schemas_ee +from fastapi import Depends, Body + +public_app, app, app_apikey = get_routers() + + +@app.get('/client/roles', tags=["client", "roles"]) +def get_roles(context: schemas.CurrentContext = Depends(OR_context)): + return { + 'data': roles.get_roles(tenant_id=context.tenant_id) + } + + +@app.post('/client/roles', tags=["client", "roles"]) +@app.put('/client/roles', tags=["client", "roles"]) +def add_role(data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)): + data = roles.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data) + if "errors" in data: + return data + + return { + 'data': data + } + + +@app.post('/client/roles/{roleId}', tags=["client", "roles"]) +@app.put('/client/roles/{roleId}', tags=["client", "roles"]) +def edit_role(roleId: int, data: schemas_ee.RolePayloadSchema = Body(...), + context: schemas.CurrentContext = Depends(OR_context)): + data = roles.update(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId, data=data) + if "errors" in data: + return data + + return { + 'data': data + } + + +@app.delete('/client/roles/{roleId}', tags=["client", "roles"]) +def delete_role(roleId: int, context: schemas.CurrentContext = Depends(OR_context)): + data = roles.delete(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId) + if "errors" in data: + return data + return { + 'data': data + } + + +@app.get('/assist/credentials', tags=["assist"]) +def get_assist_credentials(): + return {"data": assist_helper.get_full_config()} diff --git a/ee/api/chalicelib/blueprints/bp_saml.py b/ee/api/routers/saml.py similarity index 64% rename from ee/api/chalicelib/blueprints/bp_saml.py rename to ee/api/routers/saml.py index 814d93a9c..50723a1db 100644 --- a/ee/api/chalicelib/blueprints/bp_saml.py +++ b/ee/api/routers/saml.py @@ -1,35 +1,32 @@ -from chalice import Blueprint +from fastapi import HTTPException +from fastapi import Request, Response -from chalicelib import _overrides from chalicelib.utils import SAML2_helper from chalicelib.utils.SAML2_helper import prepare_request, init_saml_auth +from routers.base import get_routers -app = Blueprint(__name__) -_overrides.chalice_app(app) - -from chalicelib.utils.helper import environ +public_app, app, app_apikey = get_routers() +from decouple import config from onelogin.saml2.auth import OneLogin_Saml2_Logout_Request -from chalice import Response from chalicelib.core import users, tenants, roles +from starlette.responses import RedirectResponse +from starlette import status -@app.route('/sso/saml2', methods=['GET'], authorizer=None) -def start_sso(): - app.current_request.path = '' - req = prepare_request(request=app.current_request) +@public_app.get("/sso/saml2", tags=["saml2"]) +async def start_sso(request: Request): + request.path = '' + req = await prepare_request(request=request) auth = init_saml_auth(req) sso_built_url = auth.login() - return Response( - status_code=307, - body='', - headers={'Location': sso_built_url, 'Content-Type': 'text/plain'}) + return RedirectResponse(url=sso_built_url) -@app.route('/sso/saml2/acs', methods=['POST'], content_types=['application/x-www-form-urlencoded'], authorizer=None) -def process_sso_assertion(): - req = prepare_request(request=app.current_request) +@public_app.post('/sso/saml2/acs', tags=["saml2"]) +async def process_sso_assertion(request: Request): + req = await prepare_request(request=request) session = req["cookie"]["session"] auth = init_saml_auth(req) @@ -79,11 +76,19 @@ def process_sso_assertion(): or admin_privileges[0].lower() == "false") if existing is None: - print("== new user ==") - users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges, - origin=SAML2_helper.get_saml2_provider(), - name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), - internal_id=internal_id, role_id=role["roleId"]) + deleted = users.get_deleted_user_by_email(auth.get_nameid()) + if deleted is not None: + print("== restore deleted user ==") + users.restore_sso_user(user_id=deleted["userId"], tenant_id=t['tenantId'], email=email, + admin=admin_privileges, origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) + else: + print("== new user ==") + users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges, + origin=SAML2_helper.get_saml2_provider(), + name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])), + internal_id=internal_id, role_id=role["roleId"]) else: if t['tenantId'] != existing["tenantId"]: print("user exists for a different tenant") @@ -94,19 +99,18 @@ def process_sso_assertion(): changes={"origin": SAML2_helper.get_saml2_provider(), "internal_id": internal_id}) expiration = auth.get_session_expiration() expiration = expiration if expiration is not None and expiration > 10 * 60 \ - else int(environ.get("sso_exp_delta_seconds", 24 * 60 * 60)) + else int(config("sso_exp_delta_seconds", cast=int, default=24 * 60 * 60)) jwt = users.authenticate_sso(email=email, internal_id=internal_id, exp=expiration) if jwt is None: return {"errors": ["null JWT"]} return Response( - status_code=302, - body='', - headers={'Location': SAML2_helper.get_landing_URL(jwt), 'Content-Type': 'text/plain'}) + status_code=status.HTTP_302_FOUND, + headers={'Location': SAML2_helper.get_landing_URL(jwt)}) -@app.route('/sso/saml2/sls', methods=['GET'], authorizer=None) -def process_sls_assertion(): - req = prepare_request(request=app.current_request) +@public_app.get('/sso/saml2/sls', tags=["saml2"]) +async def process_sls_assertion(request: Request): + req = await prepare_request(request=request) session = req["cookie"]["session"] auth = init_saml_auth(req) request_id = None @@ -134,20 +138,14 @@ def process_sls_assertion(): print("Preprocessed SLS-Request by SP") if url is not None: - return Response( - status_code=307, - body='', - headers={'Location': url, 'Content-Type': 'text/plain'}) + return RedirectResponse(url=url) - return Response( - status_code=307, - body='', - headers={'Location': environ["SITE_URL"], 'Content-Type': 'text/plain'}) + return RedirectResponse(url=config("SITE_URL")) -@app.route('/sso/saml2/metadata', methods=['GET'], authorizer=None) -def saml2_metadata(): - req = prepare_request(request=app.current_request) +@public_app.get('/sso/saml2/metadata', tags=["saml2"]) +async def saml2_metadata(request: Request): + req = await prepare_request(request=request) auth = init_saml_auth(req) settings = auth.get_settings() metadata = settings.get_sp_metadata() @@ -155,10 +153,10 @@ def saml2_metadata(): if len(errors) == 0: return Response( - status_code=200, - body=metadata, + status_code=status.HTTP_200_OK, + content=metadata, headers={'Content-Type': 'text/xml'}) else: - return Response( - status_code=500, - body=', '.join(errors)) + raise HTTPException( + status_code=status.HTTP_500_INTERNAL_SERVER_ERROR, + detail=', '.join(errors)) diff --git a/ee/api/run-dev.sh b/ee/api/run-dev.sh new file mode 100755 index 000000000..76682286d --- /dev/null +++ b/ee/api/run-dev.sh @@ -0,0 +1,3 @@ +#!/bin/zsh + +uvicorn app:app --reload \ No newline at end of file diff --git a/ee/api/schemas_ee.py b/ee/api/schemas_ee.py new file mode 100644 index 000000000..59a58f94b --- /dev/null +++ b/ee/api/schemas_ee.py @@ -0,0 +1,24 @@ +from typing import Optional, List + +from pydantic import BaseModel, Field + +import schemas + + +class RolePayloadSchema(BaseModel): + name: str = Field(...) + description: Optional[str] = Field(None) + permissions: List[str] = Field(...) + all_projects: bool = Field(True) + projects: List[int] = Field([]) + + class Config: + alias_generator = schemas.attribute_to_camel_case + + +class CreateMemberSchema(schemas.CreateMemberSchema): + roleId: Optional[int] = Field(None) + + +class EditMemberSchema(schemas.EditMemberSchema): + roleId: int = Field(...) diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql b/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql new file mode 100644 index 000000000..e259bdf69 --- /dev/null +++ b/ee/scripts/helm/db/init_dbs/clickhouse/1.4.0/1.4.0.sql @@ -0,0 +1,4 @@ +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source Nullable(String), + ADD COLUMN IF NOT EXISTS utm_medium Nullable(String), + ADD COLUMN IF NOT EXISTS utm_campaign Nullable(String); diff --git a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql index 77d430d85..54901bdc4 100644 --- a/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql +++ b/ee/scripts/helm/db/init_dbs/clickhouse/create/sessions.sql @@ -1,22 +1,26 @@ -CREATE TABLE sessions ( - session_id UInt64, - project_id UInt32, - tracker_version String, - rev_id Nullable(String), - user_uuid UUID, - user_os String, - user_os_version Nullable(String), - user_browser String, - user_browser_version Nullable(String), - user_device Nullable(String), - user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), - user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), - datetime DateTime, - duration UInt32, - pages_count UInt16, - events_count UInt16, - errors_count UInt16 -) ENGINE = ReplacingMergeTree( duration ) -PARTITION BY toDate(datetime) -ORDER BY (project_id, datetime, session_id) -TTL datetime + INTERVAL 1 MONTH; +CREATE TABLE sessions +( + session_id UInt64, + project_id UInt32, + tracker_version String, + rev_id Nullable(String), + user_uuid UUID, + user_os String, + user_os_version Nullable(String), + user_browser String, + user_browser_version Nullable(String), + user_device Nullable(String), + user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2), + user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122), + datetime DateTime, + duration UInt32, + pages_count UInt16, + events_count UInt16, + errors_count UInt16, + utm_source Nullable(String), + utm_medium Nullable(String), + utm_campaign Nullable(String) +) ENGINE = ReplacingMergeTree(duration) + PARTITION BY toDate(datetime) + ORDER BY (project_id, datetime, session_id) + TTL datetime + INTERVAL 1 MONTH; diff --git a/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql b/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql new file mode 100644 index 000000000..94d5fced6 --- /dev/null +++ b/ee/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql @@ -0,0 +1,167 @@ +BEGIN; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0-ee' +$$ LANGUAGE sql IMMUTABLE; + +CREATE TABLE IF NOT EXISTS traces +( + user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, + auth text NULL, + action text NOT NULL, + method text NOT NULL, + path_format text NOT NULL, + endpoint text NOT NULL, + payload jsonb NULL, + parameters jsonb NULL, + status int NULL +); +CREATE INDEX IF NOT EXISTS traces_user_id_idx ON traces (user_id); +CREATE INDEX IF NOT EXISTS traces_tenant_id_idx ON traces (tenant_id); + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); + +CREATE INDEX IF NOT EXISTS pages_first_contentful_paint_time_idx ON events.pages (first_contentful_paint_time) WHERE first_contentful_paint_time > 0; +CREATE INDEX IF NOT EXISTS pages_dom_content_loaded_time_idx ON events.pages (dom_content_loaded_time) WHERE dom_content_loaded_time > 0; +CREATE INDEX IF NOT EXISTS pages_first_paint_time_idx ON events.pages (first_paint_time) WHERE first_paint_time > 0; +CREATE INDEX IF NOT EXISTS pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0; +CREATE INDEX IF NOT EXISTS pages_time_to_interactive_idx ON events.pages (time_to_interactive) WHERE time_to_interactive > 0; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_loadgt0NN_idx ON events.pages (session_id, timestamp) WHERE load_time > 0 AND load_time IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_visualgt0nn_idx ON events.pages (session_id, timestamp) WHERE visually_complete > 0 AND visually_complete IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_timestamp_metgt0_idx ON events.pages (timestamp) WHERE response_time > 0 OR + first_paint_time > 0 OR + dom_content_loaded_time > 0 OR + ttfb > 0 OR + time_to_interactive > 0; +CREATE INDEX IF NOT EXISTS pages_session_id_speed_indexgt0nn_idx ON events.pages (session_id, speed_index) WHERE speed_index > 0 AND speed_index IS NOT NULL; +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_dom_building_timegt0nn_idx ON events.pages (session_id, timestamp, dom_building_time) WHERE dom_building_time > 0 AND dom_building_time IS NOT NULL; +CREATE INDEX IF NOT EXISTS issues_project_id_idx ON issues (project_id); + +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_js_exception_idx ON public.errors (project_id, error_id) WHERE source = 'js_exception'; +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_idx ON public.errors (project_id, error_id); +CREATE INDEX IF NOT EXISTS errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; + +CREATE INDEX IF NOT EXISTS sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; +CREATE INDEX IF NOT EXISTS sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; +CREATE INDEX IF NOT EXISTS sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); +CREATE INDEX IF NOT EXISTS jobs_project_id_idx ON jobs (project_id); +CREATE INDEX IF NOT EXISTS errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); +CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); +CREATE INDEX IF NOT EXISTS errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); +CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_session_id_idx ON events.errors (error_id, timestamp, session_id); +CREATE INDEX IF NOT EXISTS resources_timestamp_idx ON events.resources (timestamp); +CREATE INDEX IF NOT EXISTS resources_success_idx ON events.resources (success); +CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key); +CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_idx ON events.resources (session_id, timestamp); +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_type_idx ON events.resources (session_id, timestamp, type); +CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_noFetch_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL AND type != 'fetch'; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_fail_idx ON events.resources (session_id, timestamp, url_host) WHERE success = FALSE; +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_firstparty_idx ON events.resources (session_id, timestamp, url_host) WHERE type IN ('fetch', 'script'); +CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_duration_durationgt0NN_img_idx ON events.resources (session_id, timestamp, duration) WHERE duration > 0 AND duration IS NOT NULL AND type = 'img'; +CREATE INDEX IF NOT EXISTS resources_timestamp_session_id_idx ON events.resources (timestamp, session_id); + +DROP TRIGGER IF EXISTS on_insert_or_update ON projects; +CREATE TRIGGER on_insert_or_update + AFTER INSERT OR UPDATE + ON projects + FOR EACH ROW +EXECUTE PROCEDURE notify_project(); + +UPDATE tenants +SET name='' +WHERE name ISNULL; +ALTER TABLE tenants + ALTER COLUMN name SET NOT NULL; + +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_medium text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_campaign text NULL DEFAULT NULL; + +CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); +CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; + +DROP INDEX IF EXISTS sessions_project_id_user_browser_idx1; +DROP INDEX IF EXISTS sessions_project_id_user_country_idx1; +ALTER INDEX IF EXISTS platform_idx RENAME TO sessions_platform_idx; +ALTER INDEX IF EXISTS events.resources_duration_idx RENAME TO resources_duration_durationgt0_idx; +DROP INDEX IF EXISTS projects_project_key_idx1; +CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id); + +CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id); +CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp); +CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); +CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; +CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; + +CREATE TABLE IF NOT EXISTS metrics +( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); +CREATE TABLE IF NOT EXISTS metric_series +( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id); +CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id); + + +CREATE TABLE IF NOT EXISTS searches +( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False +); + +CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public); +CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id); +CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id); + +ALTER TABLE alerts + ADD COLUMN IF NOT EXISTS series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE; + +CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id); +UPDATE alerts +SET options=jsonb_set(options, '{change}', '"change"') +WHERE detection_method = 'change' + AND options -> 'change' ISNULL; + +ALTER TABLE roles + ADD COLUMN IF NOT EXISTS all_projects bool NOT NULL DEFAULT TRUE; + +CREATE TABLE IF NOT EXISTS roles_projects +( + role_id integer NOT NULL REFERENCES roles (role_id) ON DELETE CASCADE, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + CONSTRAINT roles_projects_pkey PRIMARY KEY (role_id, project_id) +); +CREATE INDEX IF NOT EXISTS roles_projects_role_id_idx ON roles_projects (role_id); +CREATE INDEX IF NOT EXISTS roles_projects_project_id_idx ON roles_projects (project_id); + +COMMIT; \ No newline at end of file diff --git a/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql b/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql index 0b7e8fa40..35f3a5dd4 100644 --- a/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql +++ b/ee/scripts/helm/db/init_dbs/postgresql/init_schema.sql @@ -3,6 +3,12 @@ BEGIN; CREATE SCHEMA IF NOT EXISTS events_common; CREATE SCHEMA IF NOT EXISTS events; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0-ee' +$$ LANGUAGE sql IMMUTABLE; + -- --- accounts.sql --- CREATE OR REPLACE FUNCTION generate_api_key(length integer) RETURNS text AS @@ -114,7 +120,7 @@ $$ ( tenant_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, user_id text NOT NULL DEFAULT generate_api_key(20), - name text, + name text NOT NULL, api_key text UNIQUE default generate_api_key(20) not null, created_at timestamp without time zone NOT NULL DEFAULT (now() at time zone 'utc'), deleted_at timestamp without time zone NULL DEFAULT NULL, @@ -131,14 +137,15 @@ $$ CREATE TABLE roles ( - role_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, - name text NOT NULL, - description text DEFAULT NULL, - permissions text[] NOT NULL DEFAULT '{}', - protected bool NOT NULL DEFAULT FALSE, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - deleted_at timestamp NULL DEFAULT NULL + role_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + name text NOT NULL, + description text DEFAULT NULL, + permissions text[] NOT NULL DEFAULT '{}', + protected bool NOT NULL DEFAULT FALSE, + all_projects bool NOT NULL DEFAULT TRUE, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + deleted_at timestamp NULL DEFAULT NULL ); CREATE TYPE user_role AS ENUM ('owner', 'admin', 'member'); @@ -280,35 +287,21 @@ $$ }'::jsonb -- ?????? ); - CREATE INDEX ON public.projects (project_key); - --- --- alerts.sql --- - - CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); - - CREATE TABLE alerts - ( - alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, - name text NOT NULL, - description text NULL DEFAULT NULL, - active boolean NOT NULL DEFAULT TRUE, - detection_method alert_detection_method NOT NULL, - query jsonb NOT NULL, - deleted_at timestamp NULL DEFAULT NULL, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - options jsonb NOT NULL DEFAULT '{ - "renotifyInterval": 1440 - }'::jsonb - ); - - - CREATE TRIGGER on_insert_or_update_or_delete - AFTER INSERT OR UPDATE OR DELETE - ON alerts + CREATE INDEX projects_project_key_idx ON public.projects (project_key); + CREATE TRIGGER on_insert_or_update + AFTER INSERT OR UPDATE + ON projects FOR EACH ROW - EXECUTE PROCEDURE notify_alert(); + EXECUTE PROCEDURE notify_project(); + CREATE TABLE roles_projects + ( + role_id integer NOT NULL REFERENCES roles (role_id) ON DELETE CASCADE, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + CONSTRAINT roles_projects_pkey PRIMARY KEY (role_id, project_id) + ); + CREATE INDEX roles_projects_role_id_idx ON roles_projects (role_id); + CREATE INDEX roles_projects_project_id_idx ON roles_projects (project_id); -- --- webhooks.sql --- @@ -350,10 +343,10 @@ $$ CONSTRAINT notification_tenant_xor_user CHECK ( tenant_id NOTNULL AND user_id ISNULL OR tenant_id ISNULL AND user_id NOTNULL ) ); - CREATE INDEX notifications_user_id_index ON public.notifications (user_id); - CREATE INDEX notifications_tenant_id_index ON public.notifications (tenant_id); - CREATE INDEX notifications_created_at_index ON public.notifications (created_at DESC); - CREATE INDEX notifications_created_at_epoch_idx ON public.notifications (CAST(EXTRACT(EPOCH FROM created_at) * 1000 AS BIGINT) DESC); + CREATE INDEX notifications_user_id_index ON notifications (user_id); + CREATE INDEX notifications_tenant_id_index ON notifications (tenant_id); + CREATE INDEX notifications_created_at_index ON notifications (created_at DESC); + CREATE INDEX notifications_created_at_epoch_idx ON notifications (CAST(EXTRACT(EPOCH FROM created_at) * 1000 AS BIGINT) DESC); CREATE TABLE user_viewed_notifications ( @@ -376,7 +369,8 @@ $$ is_public boolean NOT NULL DEFAULT False ); - CREATE INDEX ON public.funnels (user_id, is_public); + CREATE INDEX funnels_user_id_is_public_idx ON public.funnels (user_id, is_public); + CREATE INDEX funnels_project_id_idx ON public.funnels (project_id); -- --- announcements.sql --- @@ -461,9 +455,10 @@ $$ context_string text NOT NULL, context jsonb DEFAULT NULL ); - CREATE INDEX ON issues (issue_id, type); + CREATE INDEX issues_issue_id_type_idx ON issues (issue_id, type); CREATE INDEX issues_context_string_gin_idx ON public.issues USING GIN (context_string gin_trgm_ops); CREATE INDEX issues_project_id_issue_id_idx ON public.issues (project_id, issue_id); + CREATE INDEX issues_project_id_idx ON issues (project_id); -- --- errors.sql --- @@ -482,12 +477,16 @@ $$ stacktrace jsonb, --to save the stacktrace and not query S3 another time stacktrace_parsed_at timestamp ); - CREATE INDEX errors_error_id_idx ON errors (error_id); - CREATE INDEX ON errors (project_id, source); + CREATE INDEX errors_project_id_source_idx ON errors (project_id, source); CREATE INDEX errors_message_gin_idx ON public.errors USING GIN (message gin_trgm_ops); CREATE INDEX errors_name_gin_idx ON public.errors USING GIN (name gin_trgm_ops); CREATE INDEX errors_project_id_idx ON public.errors (project_id); CREATE INDEX errors_project_id_status_idx ON public.errors (project_id, status); + CREATE INDEX errors_project_id_error_id_js_exception_idx ON public.errors (project_id, error_id) WHERE source = 'js_exception'; + CREATE INDEX errors_project_id_error_id_idx ON public.errors (project_id, error_id); + CREATE INDEX errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; + CREATE INDEX errors_error_id_idx ON errors (error_id); + CREATE INDEX errors_parent_error_id_idx ON errors (parent_error_id); CREATE TABLE user_favorite_errors ( @@ -540,6 +539,9 @@ $$ watchdogs_score bigint NOT NULL DEFAULT 0, issue_score bigint NOT NULL DEFAULT 0, issue_types issue_type[] NOT NULL DEFAULT '{}'::issue_type[], + utm_source text NULL DEFAULT NULL, + utm_medium text NULL DEFAULT NULL, + utm_campaign text NULL DEFAULT NULL, metadata_1 text DEFAULT NULL, metadata_2 text DEFAULT NULL, metadata_3 text DEFAULT NULL, @@ -553,25 +555,24 @@ $$ -- , -- rehydration_id integer REFERENCES rehydrations(rehydration_id) ON DELETE SET NULL ); - CREATE INDEX ON sessions (project_id, start_ts); - CREATE INDEX ON sessions (project_id, user_id); - CREATE INDEX ON sessions (project_id, user_anonymous_id); - CREATE INDEX ON sessions (project_id, user_device); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); - CREATE INDEX ON sessions (project_id, metadata_1); - CREATE INDEX ON sessions (project_id, metadata_2); - CREATE INDEX ON sessions (project_id, metadata_3); - CREATE INDEX ON sessions (project_id, metadata_4); - CREATE INDEX ON sessions (project_id, metadata_5); - CREATE INDEX ON sessions (project_id, metadata_6); - CREATE INDEX ON sessions (project_id, metadata_7); - CREATE INDEX ON sessions (project_id, metadata_8); - CREATE INDEX ON sessions (project_id, metadata_9); - CREATE INDEX ON sessions (project_id, metadata_10); --- CREATE INDEX ON sessions (rehydration_id); - CREATE INDEX ON sessions (project_id, watchdogs_score DESC); - CREATE INDEX platform_idx ON public.sessions (platform); + CREATE INDEX sessions_project_id_start_ts_idx ON sessions (project_id, start_ts); + CREATE INDEX sessions_project_id_user_id_idx ON sessions (project_id, user_id); + CREATE INDEX sessions_project_id_user_anonymous_id_idx ON sessions (project_id, user_anonymous_id); + CREATE INDEX sessions_project_id_user_device_idx ON sessions (project_id, user_device); + CREATE INDEX sessions_project_id_user_country_idx ON sessions (project_id, user_country); + CREATE INDEX sessions_project_id_user_browser_idx ON sessions (project_id, user_browser); + CREATE INDEX sessions_project_id_metadata_1_idx ON sessions (project_id, metadata_1); + CREATE INDEX sessions_project_id_metadata_2_idx ON sessions (project_id, metadata_2); + CREATE INDEX sessions_project_id_metadata_3_idx ON sessions (project_id, metadata_3); + CREATE INDEX sessions_project_id_metadata_4_idx ON sessions (project_id, metadata_4); + CREATE INDEX sessions_project_id_metadata_5_idx ON sessions (project_id, metadata_5); + CREATE INDEX sessions_project_id_metadata_6_idx ON sessions (project_id, metadata_6); + CREATE INDEX sessions_project_id_metadata_7_idx ON sessions (project_id, metadata_7); + CREATE INDEX sessions_project_id_metadata_8_idx ON sessions (project_id, metadata_8); + CREATE INDEX sessions_project_id_metadata_9_idx ON sessions (project_id, metadata_9); + CREATE INDEX sessions_project_id_metadata_10_idx ON sessions (project_id, metadata_10); + CREATE INDEX sessions_project_id_watchdogs_score_idx ON sessions (project_id, watchdogs_score DESC); + CREATE INDEX sessions_platform_idx ON public.sessions (platform); CREATE INDEX sessions_metadata1_gin_idx ON public.sessions USING GIN (metadata_1 gin_trgm_ops); CREATE INDEX sessions_metadata2_gin_idx ON public.sessions USING GIN (metadata_2 gin_trgm_ops); @@ -589,12 +590,15 @@ $$ CREATE INDEX sessions_user_id_gin_idx ON public.sessions USING GIN (user_id gin_trgm_ops); CREATE INDEX sessions_user_anonymous_id_gin_idx ON public.sessions USING GIN (user_anonymous_id gin_trgm_ops); CREATE INDEX sessions_user_country_gin_idx ON public.sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); + CREATE INDEX sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; + CREATE INDEX sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; + CREATE INDEX sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; CREATE INDEX sessions_session_id_project_id_start_ts_durationNN_idx ON sessions (session_id, project_id, start_ts) WHERE duration IS NOT NULL; CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL; CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0; - + CREATE INDEX sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); + CREATE INDEX sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); + CREATE INDEX sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); ALTER TABLE public.sessions ADD CONSTRAINT web_browser_constraint CHECK ( @@ -623,7 +627,7 @@ $$ session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, PRIMARY KEY (user_id, session_id) ); - + CREATE INDEX user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); -- --- assignments.sql --- @@ -636,7 +640,7 @@ $$ created_at timestamp default timezone('utc'::text, now()) NOT NULL, provider_data jsonb default '{}'::jsonb NOT NULL ); - CREATE INDEX ON assigned_sessions (session_id); + CREATE INDEX assigned_sessions_session_id_idx ON assigned_sessions (session_id); -- --- events_common.sql --- @@ -654,9 +658,9 @@ $$ level events_common.custom_level NOT NULL DEFAULT 'info', PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.customs (name); + CREATE INDEX customs_name_idx ON events_common.customs (name); CREATE INDEX customs_name_gin_idx ON events_common.customs USING GIN (name gin_trgm_ops); - CREATE INDEX ON events_common.customs (timestamp); + CREATE INDEX customs_timestamp_idx ON events_common.customs (timestamp); CREATE TABLE events_common.issues @@ -682,10 +686,10 @@ $$ success boolean NOT NULL, PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.requests (url); - CREATE INDEX ON events_common.requests (duration); + CREATE INDEX requests_url_idx ON events_common.requests (url); + CREATE INDEX requests_duration_idx ON events_common.requests (duration); CREATE INDEX requests_url_gin_idx ON events_common.requests USING GIN (url gin_trgm_ops); - CREATE INDEX ON events_common.requests (timestamp); + CREATE INDEX requests_timestamp_idx ON events_common.requests (timestamp); CREATE INDEX requests_url_gin_idx2 ON events_common.requests USING GIN (RIGHT(url, length(url) - (CASE WHEN url LIKE 'http://%' THEN 7 @@ -693,6 +697,7 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); + CREATE INDEX requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; -- --- events.sql --- CREATE SCHEMA IF NOT EXISTS events; @@ -720,10 +725,11 @@ $$ ttfb integer DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.pages (session_id); + CREATE INDEX pages_session_id_idx ON events.pages (session_id); CREATE INDEX pages_base_path_gin_idx ON events.pages USING GIN (base_path gin_trgm_ops); CREATE INDEX pages_base_referrer_gin_idx ON events.pages USING GIN (base_referrer gin_trgm_ops); - CREATE INDEX ON events.pages (timestamp); + CREATE INDEX pages_timestamp_idx ON events.pages (timestamp); + CREATE INDEX pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); CREATE INDEX pages_base_path_gin_idx2 ON events.pages USING GIN (RIGHT(base_path, length(base_path) - 1) gin_trgm_ops); CREATE INDEX pages_base_path_idx ON events.pages (base_path); CREATE INDEX pages_base_path_idx2 ON events.pages (RIGHT(base_path, length(base_path) - 1)); @@ -736,15 +742,28 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); - CREATE INDEX ON events.pages (response_time); - CREATE INDEX ON events.pages (response_end); + CREATE INDEX pages_response_time_idx ON events.pages (response_time); + CREATE INDEX pages_response_end_idx ON events.pages (response_end); CREATE INDEX pages_path_gin_idx ON events.pages USING GIN (path gin_trgm_ops); CREATE INDEX pages_path_idx ON events.pages (path); CREATE INDEX pages_visually_complete_idx ON events.pages (visually_complete) WHERE visually_complete > 0; CREATE INDEX pages_dom_building_time_idx ON events.pages (dom_building_time) WHERE dom_building_time > 0; CREATE INDEX pages_load_time_idx ON events.pages (load_time) WHERE load_time > 0; + CREATE INDEX pages_first_contentful_paint_time_idx ON events.pages (first_contentful_paint_time) WHERE first_contentful_paint_time > 0; + CREATE INDEX pages_dom_content_loaded_time_idx ON events.pages (dom_content_loaded_time) WHERE dom_content_loaded_time > 0; + CREATE INDEX pages_first_paint_time_idx ON events.pages (first_paint_time) WHERE first_paint_time > 0; + CREATE INDEX pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0; + CREATE INDEX pages_time_to_interactive_idx ON events.pages (time_to_interactive) WHERE time_to_interactive > 0; + CREATE INDEX pages_session_id_timestamp_loadgt0NN_idx ON events.pages (session_id, timestamp) WHERE load_time > 0 AND load_time IS NOT NULL; + CREATE INDEX pages_session_id_timestamp_visualgt0nn_idx ON events.pages (session_id, timestamp) WHERE visually_complete > 0 AND visually_complete IS NOT NULL; + CREATE INDEX pages_timestamp_metgt0_idx ON events.pages (timestamp) WHERE response_time > 0 OR + first_paint_time > 0 OR + dom_content_loaded_time > 0 OR + ttfb > 0 OR + time_to_interactive > 0; + CREATE INDEX pages_session_id_speed_indexgt0nn_idx ON events.pages (session_id, speed_index) WHERE speed_index > 0 AND speed_index IS NOT NULL; + CREATE INDEX pages_session_id_timestamp_dom_building_timegt0nn_idx ON events.pages (session_id, timestamp, dom_building_time) WHERE dom_building_time > 0 AND dom_building_time IS NOT NULL; CREATE INDEX pages_base_path_session_id_timestamp_idx ON events.pages (base_path, session_id, timestamp); - CREATE INDEX pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); CREATE INDEX pages_base_path_base_pathLNGT2_idx ON events.pages (base_path) WHERE length(base_path) > 2; CREATE TABLE events.clicks @@ -757,10 +776,10 @@ $$ selector text DEFAULT '' NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.clicks (session_id); - CREATE INDEX ON events.clicks (label); + CREATE INDEX clicks_session_id_idx ON events.clicks (session_id); + CREATE INDEX clicks_label_idx ON events.clicks (label); CREATE INDEX clicks_label_gin_idx ON events.clicks USING GIN (label gin_trgm_ops); - CREATE INDEX ON events.clicks (timestamp); + CREATE INDEX clicks_timestamp_idx ON events.clicks (timestamp); CREATE INDEX clicks_label_session_id_timestamp_idx ON events.clicks (label, session_id, timestamp); CREATE INDEX clicks_url_idx ON events.clicks (url); CREATE INDEX clicks_url_gin_idx ON events.clicks USING GIN (url gin_trgm_ops); @@ -777,11 +796,11 @@ $$ value text DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.inputs (session_id); - CREATE INDEX ON events.inputs (label, value); + CREATE INDEX inputs_session_id_idx ON events.inputs (session_id); + CREATE INDEX inputs_label_value_idx ON events.inputs (label, value); CREATE INDEX inputs_label_gin_idx ON events.inputs USING GIN (label gin_trgm_ops); CREATE INDEX inputs_label_idx ON events.inputs (label); - CREATE INDEX ON events.inputs (timestamp); + CREATE INDEX inputs_timestamp_idx ON events.inputs (timestamp); CREATE INDEX inputs_label_session_id_timestamp_idx ON events.inputs (label, session_id, timestamp); CREATE TABLE events.errors @@ -792,8 +811,12 @@ $$ error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.errors (session_id); - CREATE INDEX ON events.errors (timestamp); + CREATE INDEX errors_session_id_idx ON events.errors (session_id); + CREATE INDEX errors_timestamp_idx ON events.errors (timestamp); + CREATE INDEX errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); + CREATE INDEX errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); + CREATE INDEX errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); + CREATE INDEX errors_error_id_timestamp_session_id_idx ON events.errors (error_id, timestamp, session_id); CREATE INDEX errors_error_id_idx ON events.errors (error_id); @@ -805,9 +828,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.graphql (name); + CREATE INDEX graphql_name_idx ON events.graphql (name); CREATE INDEX graphql_name_gin_idx ON events.graphql USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.graphql (timestamp); + CREATE INDEX graphql_timestamp_idx ON events.graphql (timestamp); CREATE TABLE events.state_actions ( @@ -817,9 +840,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.state_actions (name); + CREATE INDEX state_actions_name_idx ON events.state_actions (name); CREATE INDEX state_actions_name_gin_idx ON events.state_actions USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.state_actions (timestamp); + CREATE INDEX state_actions_timestamp_idx ON events.state_actions (timestamp); CREATE TYPE events.resource_type AS ENUM ('other', 'script', 'stylesheet', 'fetch', 'img', 'media'); CREATE TYPE events.resource_method AS ENUM ('GET' , 'HEAD' , 'POST' , 'PUT' , 'DELETE' , 'CONNECT' , 'OPTIONS' , 'TRACE' , 'PATCH' ); @@ -842,19 +865,26 @@ $$ decoded_body_size integer NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.resources (session_id); - CREATE INDEX ON events.resources (timestamp); - CREATE INDEX ON events.resources (success); - CREATE INDEX ON events.resources (status); - CREATE INDEX ON events.resources (type); - CREATE INDEX ON events.resources (duration) WHERE duration > 0; - CREATE INDEX ON events.resources (url_host); + CREATE INDEX resources_session_id_idx ON events.resources (session_id); + CREATE INDEX resources_status_idx ON events.resources (status); + CREATE INDEX resources_type_idx ON events.resources (type); + CREATE INDEX resources_duration_durationgt0_idx ON events.resources (duration) WHERE duration > 0; + CREATE INDEX resources_url_host_idx ON events.resources (url_host); + CREATE INDEX resources_timestamp_idx ON events.resources (timestamp); + CREATE INDEX resources_success_idx ON events.resources (success); CREATE INDEX resources_url_gin_idx ON events.resources USING GIN (url gin_trgm_ops); CREATE INDEX resources_url_idx ON events.resources (url); CREATE INDEX resources_url_hostpath_gin_idx ON events.resources USING GIN (url_hostpath gin_trgm_ops); CREATE INDEX resources_url_hostpath_idx ON events.resources (url_hostpath); - + CREATE INDEX resources_timestamp_type_durationgt0NN_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL; + CREATE INDEX resources_session_id_timestamp_idx ON events.resources (session_id, timestamp); + CREATE INDEX resources_session_id_timestamp_type_idx ON events.resources (session_id, timestamp, type); + CREATE INDEX resources_timestamp_type_durationgt0NN_noFetch_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL AND type != 'fetch'; + CREATE INDEX resources_session_id_timestamp_url_host_fail_idx ON events.resources (session_id, timestamp, url_host) WHERE success = FALSE; + CREATE INDEX resources_session_id_timestamp_url_host_firstparty_idx ON events.resources (session_id, timestamp, url_host) WHERE type IN ('fetch', 'script'); + CREATE INDEX resources_session_id_timestamp_duration_durationgt0NN_img_idx ON events.resources (session_id, timestamp, duration) WHERE duration > 0 AND duration IS NOT NULL AND type = 'img'; + CREATE INDEX resources_timestamp_session_id_idx ON events.resources (timestamp, session_id); CREATE TABLE events.performance ( @@ -875,6 +905,11 @@ $$ max_used_js_heap_size bigint NOT NULL, PRIMARY KEY (session_id, message_id) ); + CREATE INDEX performance_session_id_idx ON events.performance (session_id); + CREATE INDEX performance_timestamp_idx ON events.performance (timestamp); + CREATE INDEX performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); + CREATE INDEX performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; + CREATE INDEX performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; -- --- autocomplete.sql --- @@ -906,10 +941,92 @@ $$ start_at timestamp NOT NULL, errors text NULL ); - CREATE INDEX ON jobs (status); - CREATE INDEX ON jobs (start_at); + CREATE INDEX jobs_status_idx ON jobs (status); + CREATE INDEX jobs_start_at_idx ON jobs (start_at); + CREATE INDEX jobs_project_id_idx ON jobs (project_id); + CREATE TABLE traces + ( + user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE, + tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE, + created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint, + auth text NULL, + action text NOT NULL, + method text NOT NULL, + path_format text NOT NULL, + endpoint text NOT NULL, + payload jsonb NULL, + parameters jsonb NULL, + status int NULL + ); + CREATE INDEX traces_user_id_idx ON traces (user_id); + CREATE INDEX traces_tenant_id_idx ON traces (tenant_id); + + CREATE TABLE metrics + ( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp + ); + CREATE INDEX metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); + CREATE TABLE metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + CREATE INDEX metric_series_metric_id_idx ON public.metric_series (metric_id); + + CREATE TABLE searches + ( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + + CREATE INDEX searches_user_id_is_public_idx ON public.searches (user_id, is_public); + CREATE INDEX searches_project_id_idx ON public.searches (project_id); + + CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); + + CREATE TABLE alerts + ( + alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE, + name text NOT NULL, + description text NULL DEFAULT NULL, + active boolean NOT NULL DEFAULT TRUE, + detection_method alert_detection_method NOT NULL, + query jsonb NOT NULL, + deleted_at timestamp NULL DEFAULT NULL, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + options jsonb NOT NULL DEFAULT '{ + "renotifyInterval": 1440 + }'::jsonb + ); + CREATE INDEX alerts_project_id_idx ON alerts (project_id); + CREATE INDEX alerts_series_id_idx ON alerts (series_id); + CREATE TRIGGER on_insert_or_update_or_delete + AFTER INSERT OR UPDATE OR DELETE + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE notify_alert(); + raise notice 'DB created'; END IF; END; diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore b/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml new file mode 100644 index 000000000..c7a0eb3d6 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: clickhouse +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl new file mode 100644 index 000000000..44cfadff0 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "clickhouse.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "clickhouse.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "clickhouse.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "clickhouse.labels" -}} +helm.sh/chart: {{ include "clickhouse.chart" . }} +{{ include "clickhouse.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "clickhouse.selectorLabels" -}} +app.kubernetes.io/name: {{ include "clickhouse.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "clickhouse.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "clickhouse.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml new file mode 100644 index 000000000..4496f556c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: clickhouse + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.service.webPort }} + targetPort: web + protocol: TCP + name: web + - port: {{ .Values.service.dataPort }} + targetPort: data + protocol: TCP + name: data + selector: + {{- include "clickhouse.selectorLabels" . | nindent 4 }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml new file mode 100644 index 000000000..1f1183598 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "clickhouse.serviceAccountName" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml new file mode 100644 index 000000000..392976eec --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/templates/statefulset.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "clickhouse.fullname" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "clickhouse.fullname" . }} + selector: + matchLabels: + {{- include "clickhouse.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "clickhouse.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "clickhouse.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 9000 + name: web + - containerPort: 8123 + name: data + volumeMounts: + - name: ch-volume + mountPath: /var/lib/mydata + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: ch-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storageSize }} diff --git a/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml b/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml new file mode 100644 index 000000000..4cba1c1f8 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/clickhouse/values.yaml @@ -0,0 +1,62 @@ +# Default values for clickhouse. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: yandex/clickhouse-server + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "20.9" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +env: {} + +service: + webPort: 9000 + dataPort: 8123 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +storageSize: 8G diff --git a/ee/scripts/helm/helm/databases/charts/kafka/.helmignore b/ee/scripts/helm/helm/databases/charts/kafka/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml b/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml new file mode 100755 index 000000000..165e70d55 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2.6.0 +description: Apache Kafka is a distributed streaming platform. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kafka +sources: +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 11.8.6 diff --git a/ee/scripts/helm/helm/databases/charts/kafka/README.md b/ee/scripts/helm/helm/databases/charts/kafka/README.md new file mode 100755 index 000000000..5584bd43d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/README.md @@ -0,0 +1,737 @@ +# Kafka + +[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. + +## TL;DR + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the Kafka chart and their default values per section/component: + +### Global parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override kafka.fullname | `nil` | +| `fullnameOverride` | String to fully override kafka.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) | + +### Kafka parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image name | `bitnami/kafka` | +| `image.tag` | Kafka image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` | +| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` | +| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | +| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` | +| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | +| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` | +| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` | +| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` | +| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` | +| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` | +| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` | +| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` | +| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` | +| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of Kafka nodes | `1` | +| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `podSecurityContext` | Kafka pods' Security Context | `{}` | +| `containerSecurityContext` | Kafka containers' Security Context | `{}` | +| `resources.limits` | The resources limits for Kafka containers | `{}` | +| `resources.requests` | The requested resources for Kafka containers | `{}` | +| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` | +| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` | +| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | +| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | +| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) | +| `args` | Override kafka container arguments | `[]` (evaluated as a template) | +| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Kafka port for client connections | `9092` | +| `service.internalPort` | Kafka port for inter-broker connections | `9093` | +| `service.externalPort` | Kafka port for external connections | `9094` | +| `service.nodePorts.client` | Nodeport for client connections | `""` | +| `service.nodePorts.external` | Nodeport for external connections | `""` | +| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `service.annotations` | Service annotations | `{}`(evaluated as a template) | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) | + +### RBAC parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` | +| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | +| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | +| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | +| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | +| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | +| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | +| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | +| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | +| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) | +| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | +| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` | +| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + bitnami/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml bitnami/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of Kafka nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Allow to use the PLAINTEXT listener: + +```diff +- allowPlaintextListener: true ++ allowPlaintextListener: false +``` + +- Default replication factors for automatically created topics: + +```diff +- defaultReplicationFactor: 1 ++ defaultReplicationFactor: 3 +``` + +- Allow auto creation of topics. + +```diff +- autoCreateTopicsEnable: true ++ autoCreateTopicsEnable: false +``` + +- The replication factor for the offsets topic: + +```diff +- offsetsTopicReplicationFactor: 1 ++ offsetsTopicReplicationFactor: 3 +``` + +- The replication factor for the transaction topic: + +```diff +- transactionStateLogReplicationFactor: 1 ++ transactionStateLogReplicationFactor: 3 +``` + +- Overridden min.insync.replicas config for the transaction topic: + +```diff +- transactionStateLogMinIsr: 1 ++ transactionStateLogMinIsr: 3 +``` + +- Switch to enable the Kafka SASAL authentication on client and inter-broker communications: + +```diff +- auth.clientProtocol: plaintext ++ auth.clientProtocol: sasl +- auth.interBrokerProtocol: plaintext ++ auth.interBrokerProtocol: sasl +``` + +- Enable Zookeeper authentication: + +```diff ++ auth.jaas.zookeeperUser: zookeeperUser ++ auth.jaas.zookeeperPassword: zookeeperPassword +- zookeeper.auth.enabled: false ++ zookeeper.auth.enabled: true ++ zookeeper.auth.clientUser: zookeeperUser ++ zookeeper.auth.clientPassword: zookeeperPassword ++ zookeeper.auth.serverUsers: zookeeperUser ++ zookeeper.auth.serverPasswords: zookeeperPassword +``` + +- Enable Pod Disruption Budget: + +```diff +- pdb.create: false ++ pdb.create: true +``` + +- Create a separate Kafka metrics exporter: + +```diff +- metrics.kafka.enabled: false ++ metrics.kafka.enabled: true +``` + +- Expose JMX metrics to Prometheus: + +```diff +- metrics.jmx.enabled: false ++ metrics.jmx.enabled: true +``` + +- Enable Zookeeper metrics: + +```diff ++ zookeeper.metrics.enabled: true +``` + +To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|-------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret: + +```console +kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it. + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.certificatesSecret=kafka-jks +auth.certificatesPassword=jksPassword +auth.jaas.clientUsers[0]=brokerUser +auth.jaas.clientPassword[0]=brokerPassword +auth.jaas.zookeeperUser=zookeeperUser +auth.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.serivce.nodePorts[0]='node-port-1' +externalAccess.serivce.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: |- + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 10 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /opt/bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 11.8.0 + +External access to brokers can now be archived through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100755 index 000000000..c3b15dc5c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 3.6.2 +description: A centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services for distributed + applications. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png +keywords: +- zookeeper +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 5.21.9 diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md new file mode 100755 index 000000000..0291875ed --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,297 @@ +# ZooKeeper + +[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component: + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override zookeeper.fullname | `nil` | +| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` | +| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` | +| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` | +| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` | +| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` | +| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` | +| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` | +| `auth.enabled` | Enable ZooKeeper auth | `false` | +| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` | +| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` | +| `auth.serverUsers` | List of user to be created | `nil` | +| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` | +| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` | +| `logLevel` | Log level of ZooKeeper server | `ERROR` | +| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` | +| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` | +| `dataLogDir` | Data log directory | `""` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podManagementPolicy` | Pod management policy | `Parallel` | +| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` | +| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` | +| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` | +| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file | +| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file | +| `extraVolumes` | Extra volumes | `nil` | +| `extraVolumeMounts` | Mount extra volume(s) | `nil` | +| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | ZooKeeper port | `2181` | +| `service.followerPort` | ZooKeeper follower port | `2888` | +| `service.electionPort` | ZooKeeper election port | `3888` | +| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template | +| `service.tls.client_enable` | Enable tls for client connections | `false` | +| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` | +| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` | +| `service.tls.client_port` | Service port for tls client connections | `3181` | +| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.annotations` | Annotations for the Service | `{}` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | + +### Metrics parameters + +| Parameter | Description | Default | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` | +| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` | +| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` | +| `metrics.service.port` | Prometheus metrics service port | `9141` | +| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace | +| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of ZooKeeper nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Enable prometheus metrics: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection. + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Data Log Directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +## Upgrading + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100755 index 000000000..3cc2edbed --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,57 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.auth.clientPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster: + + {{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + zkCli.sh $SERVICE_IP:2181 + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 & + zkCli.sh 127.0.0.1:2181 + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100755 index 000000000..f82502d69 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,212 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Zookeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper.labels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +helm.sh/chart: {{ include "zookeeper.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "zookeeper.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "zookeeper.matchLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return ZooKeeper Client Password +*/}} +{{- define "zookeeper.clientPassword" -}} +{{- if .Values.auth.clientPassword -}} + {{- .Values.auth.clientPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return ZooKeeper Servers Passwords +*/}} +{{- define "zookeeper.serverPasswords" -}} +{{- if .Values.auth.serverPasswords -}} + {{- .Values.auth.serverPasswords -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "zookeeper.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100755 index 000000000..1a4061565 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- +{{ .Values.config | indent 4 }} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100755 index 000000000..3e26ed6c8 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100755 index 000000000..f7e30b4bc --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections to zookeeper + - ports: + - port: {{ .Values.service.port }} + from: + {{- if not .Values.networkPolicy.allowExternal }} + - podSelector: + matchLabels: + {{ include "zookeeper.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + {{- else }} + - podSelector: + matchLabels: {} + {{- end }} + # Internal ports + - ports: &intranodes_ports + - port: {{ .Values.service.followerPort }} + - port: {{ .Values.service.electionPort }} + from: + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + egress: + - ports: *intranodes_ports + # Allow outbound connections from zookeeper nodes + +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..818950c66 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if gt $replicaCount 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml new file mode 100755 index 000000000..9cda3985c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "zookeeper.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.prometheusRule.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "zookeeper.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }} +{{- end }} + diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100755 index 000000000..b3d727fec --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }} + server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100755 index 000000000..3f7ef39fd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100755 index 000000000..5782dad59 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "zookeeper.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100755 index 000000000..fa1e5231f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,334 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "zookeeper.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if (eq "Recreate" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + template: + metadata: + name: {{ template "zookeeper.fullname" . }} + labels: {{- include "zookeeper.labels" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "zookeeper.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "zookeeper.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "zookeeper.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "zookeeper.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - chown + args: + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + - /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - {{ .Values.dataLogDir }} + {{- end }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.service.port | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $followerPort := int .Values.service.followerPort }} + {{- $electionPort := int .Values.service.electionPort }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $zookeeperFullname := include "zookeeper.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "yes" "no" .Values.allowAnonymousLogin | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.service.tls.client_enable }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.service.tls.client_enable | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.service.tls.client_keystore_path | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.client_keystore_password | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.service.tls.client_truststore_path | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.client_truststore_password | quote }} + {{ end }} + {{- if .Values.service.tls.quorum_enable }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.service.tls.quorum_enable | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.service.tls.quorum_keystore_path | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_keystore_password | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.service.tls.quorum_truststore_path | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_truststore_password | quote }} + {{ end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- toYaml .Values.extraEnvVars | nindent 12 }} + {{- end }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: client + containerPort: {{ .Values.service.port }} + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: client-tls + containerPort: {{ .Values.service.tls.client_port }} + {{ end }} + - name: follower + containerPort: {{ .Values.service.followerPort }} + - name: election + containerPort: {{ .Values.service.electionPort }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if .Values.config }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.config }} + - name: config + configMap: + name: {{ template "zookeeper.fullname" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) )}} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100755 index 000000000..972efb51d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100755 index 000000000..da3a2895a --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml new file mode 100755 index 000000000..7d678603f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values-production.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 3 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: true + quorum_enable: true + disable_base_client_port: true + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml new file mode 100755 index 000000000..a40decb54 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 1 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: false + quorum_enable: false + disable_base_client_port: false + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + enabled: true + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md b/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md new file mode 100755 index 000000000..e110a8825 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/files/jks/README.md @@ -0,0 +1,10 @@ +# Java Key Stores + +You can copy here your Java Key Stores (JKS) files so a secret is created including them. Remember to use a truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. For instance, if you have 3 brokers you need to copy here the following files: + +- kafka.truststore.jks +- kafka-0.keystore.jks +- kafka-1.keystore.jks +- kafka-2.keystore.jks + +Find more info in [this section](https://github.com/bitnami/charts/tree/master/bitnami/kafka#enable-security-for-kafka-and-zookeeper) of the README.md file. diff --git a/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml b/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml new file mode 100644 index 000000000..acd718957 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/kafka.yaml @@ -0,0 +1,521 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-scripts + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm +data: + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"kafka-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + exec /entrypoint.sh /run.sh +--- +# Source: kafka/charts/zookeeper/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper-headless + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/charts/zookeeper/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/templates/kafka-metrics-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-metrics + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + annotations: + + prometheus.io/path: /metrics + prometheus.io/port: '9308' + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 9308 + protocol: TCP + targetPort: metrics + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics +--- +# Source: kafka/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: 9093 + protocol: TCP + targetPort: kafka-internal + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/kafka-metrics-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-exporter + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + spec: + containers: + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.2.0-debian-10-r220 + imagePullPolicy: "IfNotPresent" + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + --kafka.server=kafka-0.kafka-headless.db.svc.cluster.local:9092 \ + --kafka.server=kafka-1.kafka-headless.db.svc.cluster.local:9092 \ + --web.listen-address=:9308 + ports: + - name: metrics + containerPort: 9308 + resources: + limits: {} + requests: {} +--- +# Source: kafka/charts/zookeeper/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + role: zookeeper +spec: + serviceName: kafka-zookeeper-headless + replicas: 1 + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper + template: + metadata: + name: kafka-zookeeper + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + spec: + + serviceAccountName: default + securityContext: + fsGroup: 1001 + containers: + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.6.2-debian-10-r10 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: ZOO_DATA_LOG_DIR + value: "" + - name: ZOO_PORT_NUMBER + value: "2181" + - name: ZOO_TICK_TIME + value: "2000" + - name: ZOO_INIT_LIMIT + value: "10" + - name: ZOO_SYNC_LIMIT + value: "5" + - name: ZOO_MAX_CLIENT_CNXNS + value: "60" + - name: ZOO_4LW_COMMANDS_WHITELIST + value: "srvr, mntr, ruok" + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: "no" + - name: ZOO_AUTOPURGE_INTERVAL + value: "0" + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: "3" + - name: ZOO_MAX_SESSION_TIMEOUT + value: "40000" + - name: ZOO_SERVERS + value: kafka-zookeeper-0.kafka-zookeeper-headless.db.svc.cluster.local:2888:3888 + - name: ZOO_ENABLE_AUTH + value: "no" + - name: ZOO_HEAP_SIZE + value: "1024" + - name: ZOO_LOG_LEVEL + value: "ERROR" + - name: ALLOW_ANONYMOUS_LOGIN + value: "yes" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + + - name: client + containerPort: 2181 + + + - name: follower + containerPort: 2888 + - name: election + containerPort: 3888 + livenessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + volumes: + volumeClaimTemplates: + - metadata: + name: data + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" +--- +# Source: kafka/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka + serviceName: kafka-headless + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka + spec: + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: kafka + containers: + - name: kafka + image: docker.io/bitnami/kafka:2.6.0-debian-10-r30 + imagePullPolicy: "IfNotPresent" + command: + - /scripts/setup.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + value: "kafka-zookeeper" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INTERNAL" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT" + - name: KAFKA_CFG_LISTENERS + value: "INTERNAL://:9093,CLIENT://:9092" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "INTERNAL://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9092" + - name: ALLOW_PLAINTEXT_LISTENER + value: "yes" + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: "false" + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_HEAP_OPTS + value: "-Xmx1024m -Xms1024m" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: "10000" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: "1000" + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: "300000" + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: "168" + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: "1000012" + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_DIRS + value: "/bitnami/kafka/data" + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: "1" + - name: KAFKA_CFG_NUM_IO_THREADS + value: "8" + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: "3" + - name: KAFKA_CFG_NUM_PARTITIONS + value: "1" + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: "1" + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: "104857600" + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: "6000" + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: 9093 + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: + periodSeconds: + successThreshold: + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + timeoutSeconds: 5 + failureThreshold: 6 + periodSeconds: + successThreshold: + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + volumes: + - name: scripts + configMap: + name: kafka-scripts + defaultMode: 0755 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock b/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock new file mode 100755 index 000000000..115d0b229 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 5.21.9 +digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e +generated: "2020-09-29T07:43:56.483358254Z" diff --git a/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml b/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml new file mode 100755 index 000000000..533875258 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: zookeeper.enabled diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt b/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt new file mode 100755 index 000000000..0347c21c4 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/NOTES.txt @@ -0,0 +1,181 @@ +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $servicePort := int .Values.service.port -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +############################################################################### +### ERROR: You enabled external access to Kafka brokers without specifying ### +### the array of load balancer IPs for Kafka brokers. ### +############################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for Kafka brokers. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $i, $e := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ + --set replicaCount={{ $replicaCount }} \ + --set externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq .Values.auth.clientProtocol "plaintext") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} + + +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files by executing these commands: + + - kafka_jaas.conf: + +cat > kafka_jaas.conf < client.properties <<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100755 index 000000000..83edd8422 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.jmx.service.type }} + {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} + {{- if .Values.metrics.jmx.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} + nodePort: {{ .Values.metrics.jmx.service.nodePort }} + {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100755 index 000000000..c547fbb39 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,87 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $servicePort := int .Values.service.port -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kafka.fullname" . }}-exporter + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: metrics + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username="$SASL_USERNAME" \ + --sasl.password="${sasl_passwords[0]}" \ + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.ca-file="/opt/bitnami/kafka-exporter/certs/ca-file" \ + --tls.cert-file="/opt/bitnami/kafka-exporter/certs/cert-file" \ + --tls.key-file="/opt/bitnami/kafka-exporter/certs/key-file" \ + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:9308 + {{- if (include "kafka.client.saslAuthentication" .) }} + env: + - name: SASL_USERNAME + value: {{ index .Values.auth.jaas.clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + ports: + - name: metrics + containerPort: 9308 + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + {{- if and (include "kafka.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + volumeMounts: + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + volumes: + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100755 index 000000000..54a4ccb0b --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.kafka.service.type }} + {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} + {{- if .Values.metrics.kafka.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} + nodePort: {{ .Values.metrics.kafka.service.nodePort }} + {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml new file mode 100755 index 000000000..0a34d50dd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,16 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{ .Values.log4j | nindent 4 }} +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..cf515becb --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml new file mode 100755 index 000000000..943c5bf3c --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml new file mode 100755 index 000000000..78f940f85 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "kafka.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml new file mode 100755 index 000000000..705545a61 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kafka.fullname" . }}-scripts + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "kafka.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.internalPort }} + {{- $clientPort := .Values.service.port }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliar functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + if [[ -f "/certs/kafka.truststore.jks" ]] && [[ -f "/certs/kafka-${ID}.keystore.jks" ]]; then + mkdir -p /opt/bitnami/kafka/config/certs + cp "/certs/kafka.truststore.jks" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "/certs/kafka-${ID}.keystore.jks" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml new file mode 100755 index 000000000..790790b3f --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100755 index 000000000..250bb5306 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100755 index 000000000..951bf7c41 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml new file mode 100755 index 000000000..e9b5ce8f9 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,435 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "kafka.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.internalPort }} +{{- $clientPort := .Values.service.port }} +{{- $interBrokerProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.interBrokerProtocol ) -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: Parallel + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ template "kafka.fullname" . }}-headless + updateStrategy: + type: {{ .Values.updateStrategy | quote }} + {{- if (eq "OnDelete" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "kafka.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "kafka.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ template "kafka.serviceAccountName" . }} + {{- end }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/kafka + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/kafka" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: {{- include "kafka.tplValue" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- if .Values.args }} + args: {{- include "kafka.tplValue" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ include "kafka.zookeeper.fullname" . | quote }} + {{- else }} + value: {{ join "," .Values.externalZookeeper.servers | quote }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ include "kafka.auth.saslMechanisms" ( dict "type" .Values.auth.saslMechanisms ) }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.saslInterBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: "SASL" + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ .Values.auth.tlsEndpointIdentificationAlgorithm | quote }} + {{- if .Values.auth.jksPassword }} + - name: KAFKA_CERTIFICATE_PASSWORD + value: {{ .Values.auth.jksPassword | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + {{- if .Values.extraEnvVars }} + {{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: {{ $interBrokerPort }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: 9094 + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customlivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customreadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: /bitnami/kafka/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + mountPath: /certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ template "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + ports: + - name: metrics + containerPort: 5556 + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "kafka.tplValue" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "kafka.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + secret: + secretName: {{ include "kafka.jksSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "kafka.tplValue" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml new file mode 100755 index 000000000..eefe0046d --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,52 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "kafka.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" $ }}-{{ $i }}-external + labels: {{- include "kafka.labels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.port }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + selector: {{- include "kafka.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml new file mode 100755 index 000000000..e7c2e5e6e --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-headless + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.internalPort }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml new file mode 100755 index 000000000..189cb9ffd --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/templates/svc.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml b/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml new file mode 100755 index 000000000..af6f43dba --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/values-production.yaml @@ -0,0 +1,931 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: false + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 3 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 3 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 3 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 3 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: sasl + interBrokerProtocol: sasl + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords + ## + ## clientPasswords: + ## - password1 + ## - password2 + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + zookeeperUser: zookeeperUser + + ## Kafka Zookeeper password + ## + zookeeperPassword: zookeeperPassword + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-password=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: false + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 3 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: true + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: true + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: true + ## User that will use Zookeeper clients to auth + ## + clientUser: zookeeperUser + ## Password that will use Zookeeper clients to auth + ## + clientPassword: zookeeperPassword + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: zookeeperUser + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: zookeeperPassword + metrics: + enabled: true + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/ee/scripts/helm/helm/databases/charts/kafka/values.yaml b/ee/scripts/helm/helm/databases/charts/kafka/values.yaml new file mode 100755 index 000000000..154d71bd5 --- /dev/null +++ b/ee/scripts/helm/helm/databases/charts/kafka/values.yaml @@ -0,0 +1,934 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: true + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: plaintext + interBrokerProtocol: plaintext + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers. + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + # zookeeperUser: + + ## Kafka Zookeeper password + ## + # zookeeperPassword: + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: true + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 2 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Custom liveness/readiness probes that will override the default ones +## +customLivenessProbe: {} +customReadinessProbe: {} + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: false + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: false + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + # clientUser: + ## Password that will use Zookeeper clients to auth + ## + # clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + # serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + # serverPasswords: + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/frontend/app/assets/index.html b/frontend/app/assets/index.html index 6213da4c9..a9d4b0f62 100644 --- a/frontend/app/assets/index.html +++ b/frontend/app/assets/index.html @@ -12,6 +12,6 @@ -
+

Loading...

diff --git a/frontend/app/player/MessageDistributor/MessageDistributor.ts b/frontend/app/player/MessageDistributor/MessageDistributor.ts index 35ada8ce3..69e4b4836 100644 --- a/frontend/app/player/MessageDistributor/MessageDistributor.ts +++ b/frontend/app/player/MessageDistributor/MessageDistributor.ts @@ -25,12 +25,11 @@ import WindowNodeCounter from './managers/WindowNodeCounter'; import ActivityManager from './managers/ActivityManager'; import AssistManager from './managers/AssistManager'; -import MessageReader from './MessageReader'; +import MFileReader from './messages/MFileReader'; import { INITIAL_STATE as SUPER_INITIAL_STATE, State as SuperState } from './StatedScreen/StatedScreen'; import { INITIAL_STATE as ASSIST_INITIAL_STATE, State as AssistState } from './managers/AssistManager'; -import type { TimedMessage } from './Timed'; import type { PerformanceChartPoint } from './managers/PerformanceTrackManager'; import type { SkipInterval } from './managers/ActivityManager'; @@ -82,32 +81,24 @@ import type { SetViewportScroll, } from './messages'; -interface Timed { //TODO: to common space - time: number; -} - -type ReduxDecoded = Timed & { - action: {}, - state: {}, - duration: number, -} +import type { Timed } from './messages/timed'; export default class MessageDistributor extends StatedScreen { // TODO: consistent with the other data-lists private readonly locationEventManager: ListWalker/**/ = new ListWalker(); - private readonly locationManager: ListWalker = new ListWalker(); - private readonly loadedLocationManager: ListWalker = new ListWalker(); - private readonly connectionInfoManger: ListWalker = new ListWalker(); + private readonly locationManager: ListWalker = new ListWalker(); + private readonly loadedLocationManager: ListWalker = new ListWalker(); + private readonly connectionInfoManger: ListWalker = new ListWalker(); private readonly performanceTrackManager: PerformanceTrackManager = new PerformanceTrackManager(); private readonly windowNodeCounter: WindowNodeCounter = new WindowNodeCounter(); private readonly clickManager: ListWalker = new ListWalker(); - private readonly resizeManager: ListWalker = new ListWalker([]); + private readonly resizeManager: ListWalker = new ListWalker([]); private readonly pagesManager: PagesManager; private readonly mouseManager: MouseManager; private readonly assistManager: AssistManager; - private readonly scrollManager: ListWalker = new ListWalker(); + private readonly scrollManager: ListWalker = new ListWalker(); private readonly decoder = new Decoder(); private readonly lists = initLists(); @@ -184,7 +175,7 @@ export default class MessageDistributor extends StatedScreen { window.fetch(fileUrl) .then(r => r.arrayBuffer()) .then(b => { - const r = new MessageReader(new Uint8Array(b), this.sessionStart); + const r = new MFileReader(new Uint8Array(b), this.sessionStart); const msgs: Array = []; while (r.hasNext()) { @@ -334,7 +325,7 @@ export default class MessageDistributor extends StatedScreen { } /* Binded */ - distributeMessage = (msg: TimedMessage, index: number): void => { + distributeMessage = (msg: Message, index: number): void => { if ([ "mouse_move", "mouse_click", diff --git a/frontend/app/player/MessageDistributor/MessageReader.ts b/frontend/app/player/MessageDistributor/MessageReader.ts deleted file mode 100644 index dea8759c9..000000000 --- a/frontend/app/player/MessageDistributor/MessageReader.ts +++ /dev/null @@ -1,80 +0,0 @@ -import type { TimedMessage, Indexed } from './Timed'; - -import logger from 'App/logger'; -import readMessage, { Message } from './messages'; -import PrimitiveReader from './PrimitiveReader'; - -// function needSkipMessage(data: Uint8Array, p: number, pLast: number): boolean { -// for (let i = 7; i >= 0; i--) { -// if (data[ p + i ] !== data[ pLast + i ]) { -// return data[ p + i ] - data[ pLast + i ] < 0 -// } -// } -// return true -// } - -export default class MessageReader extends PrimitiveReader { - private pLastMessageID: number = 0; - private currentTime: number = 0; - public error: boolean = false; - constructor(data: Uint8Array, private readonly startTime: number) { - super(data); - } - - private needSkipMessage(): boolean { - if (this.p === 0) return false; - for (let i = 7; i >= 0; i--) { - if (this.buf[ this.p + i ] !== this.buf[ this.pLastMessageID + i ]) { - return this.buf[ this.p + i ] - this.buf[ this.pLastMessageID + i ] < 0; - } - } - return true; - } - - private readMessage(): Message | null { - this.skip(8); - try { - let msg - msg = readMessage(this); - return msg; - } catch (e) { - this.error = true; - logger.error("Read message error:", e); - return null; - } - } - - hasNext():boolean { - return !this.error && this.buf.length > this.p; - } - - next(): [ TimedMessage, number] | null { - if (!this.hasNext()) { - return null; - } - - while (this.needSkipMessage()) { - this.readMessage(); - } - this.pLastMessageID = this.p; - - const msg = this.readMessage(); - if (!msg) { - return null; - } - - if (msg.tp === "timestamp") { - // if (this.startTime == null) { - // this.startTime = msg.timestamp - // } - this.currentTime = msg.timestamp - this.startTime; - } else { - const tMsg = Object.assign(msg, { - time: this.currentTime, - _index: this.pLastMessageID, - }) - return [tMsg, this.pLastMessageID]; - } - return null; - } -} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/PrimitiveReader.ts b/frontend/app/player/MessageDistributor/PrimitiveReader.ts deleted file mode 100644 index b49955074..000000000 --- a/frontend/app/player/MessageDistributor/PrimitiveReader.ts +++ /dev/null @@ -1,40 +0,0 @@ -export default class PrimitiveReader { - protected p = 0 - constructor(protected readonly buf: Uint8Array) {} - - hasNext() { - return this.p < this.buf.length - } - - readUint() { - var r = 0, s = 1, b; - do { - b = this.buf[this.p++]; - r += (b & 0x7F) * s; - s *= 128; - } while (b >= 0x80) - return r; - } - - readInt() { - let u = this.readUint(); - if (u % 2) { - u = (u + 1) / -2; - } else { - u = u / 2; - } - return u; - } - - readString() { - var l = this.readUint(); - return new TextDecoder().decode(this.buf.subarray(this.p, this.p+=l)); - } - - readBoolean() { - return !!this.buf[this.p++]; - } - skip(n: number) { - this.p += n; - } -} diff --git a/frontend/app/player/MessageDistributor/StatedScreen/Screen/screen.css b/frontend/app/player/MessageDistributor/StatedScreen/Screen/screen.css index 6f5da4549..b715986d2 100644 --- a/frontend/app/player/MessageDistributor/StatedScreen/Screen/screen.css +++ b/frontend/app/player/MessageDistributor/StatedScreen/Screen/screen.css @@ -9,7 +9,7 @@ .iframe { position: absolute; border: none; - background: whilte; + background: white; } .overlay { position: absolute; diff --git a/frontend/app/player/MessageDistributor/Timed.ts b/frontend/app/player/MessageDistributor/Timed.ts deleted file mode 100644 index e0a1d6a82..000000000 --- a/frontend/app/player/MessageDistributor/Timed.ts +++ /dev/null @@ -1,5 +0,0 @@ -import type { Message } from './messages'; - -export interface Timed { readonly time: number }; -export interface Indexed { readonly _index: number }; // TODO: remove dash (evwrywhere) -export type TimedMessage = Timed & Message; diff --git a/frontend/app/player/MessageDistributor/managers/AssistManager.ts b/frontend/app/player/MessageDistributor/managers/AssistManager.ts index 03b2b9512..2b0ac4e63 100644 --- a/frontend/app/player/MessageDistributor/managers/AssistManager.ts +++ b/frontend/app/player/MessageDistributor/managers/AssistManager.ts @@ -1,14 +1,14 @@ import type Peer from 'peerjs'; import type { DataConnection, MediaConnection } from 'peerjs'; import type MessageDistributor from '../MessageDistributor'; -import type { TimedMessage } from '../Timed'; import type { Message } from '../messages' -import { ID_TP_MAP } from '../messages'; import store from 'App/store'; import type { LocalStream } from './LocalStream'; import { update, getState } from '../../store'; import { iceServerConfigFromString } from 'App/utils' +import MStreamReader from '../messages/MStreamReader';; +import JSONRawMessageReader from '../messages/JSONRawMessageReader' export enum CallingState { Reconnecting, @@ -59,68 +59,15 @@ export const INITIAL_STATE: State = { const MAX_RECONNECTION_COUNT = 4; -function resolveURL(baseURL: string, relURL: string): string { - if (relURL.startsWith('#') || relURL === "") { - return relURL; - } - return new URL(relURL, baseURL).toString(); -} - - -var match = /bar/.exec("foobar"); -const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g -const re2 = /@import "(.*?)"/g -function cssUrlsIndex(css: string): Array<[number, number]> { - const idxs: Array<[number, number]> = []; - const i1 = css.matchAll(re1); - // @ts-ignore - for (let m of i1) { - // @ts-ignore - const s: number = m.index + m[0].indexOf(m[1]); - const e: number = s + m[1].length; - idxs.push([s, e]); - } - const i2 = css.matchAll(re2); - // @ts-ignore - for (let m of i2) { - // @ts-ignore - const s = m.index + m[0].indexOf(m[1]); - const e = s + m[1].length; - idxs.push([s, e]) - } - return idxs; -} -function unquote(str: string): [string, string] { - str = str.trim(); - if (str.length <= 2) { - return [str, ""] - } - if (str[0] == '"' && str[str.length-1] == '"') { - return [ str.substring(1, str.length-1), "\""]; - } - if (str[0] == '\'' && str[str.length-1] == '\'') { - return [ str.substring(1, str.length-1), "'" ]; - } - return [str, ""] -} -function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { - for (let idx of cssUrlsIndex(css)) { - const f = idx[0] - const t = idx[1] - const [ rawurl, q ] = unquote(css.substring(f, t)); - css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); - } - return css -} - -function resolveCSS(baseURL: string, css: string): string { - return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); -} - export default class AssistManager { constructor(private session, private md: MessageDistributor, private config) {} private setStatus(status: ConnectionStatus) { + if (getState().peerConnectionStatus === ConnectionStatus.Disconnected && + status !== ConnectionStatus.Connected) { + return + } + if (status === ConnectionStatus.Connecting) { this.md.setMessagesLoading(true); } else { @@ -150,6 +97,7 @@ export default class AssistManager { // @ts-ignore const urlObject = new URL(window.ENV.API_EDP) import('peerjs').then(({ default: Peer }) => { + if (this.closed) {return} const _config = { host: urlObject.hostname, path: '/assist', @@ -171,12 +119,11 @@ export default class AssistManager { console.warn("AssistManager PeerJS peer error: ", e.type, e) } if (['peer-unavailable', 'network', 'webrtc'].includes(e.type)) { - if (this.peer && this.connectionAttempts++ < MAX_RECONNECTION_COUNT) { - this.setStatus(ConnectionStatus.Connecting); + if (this.peer) { + this.setStatus(this.connectionAttempts++ < MAX_RECONNECTION_COUNT + ? ConnectionStatus.Connecting + : ConnectionStatus.Disconnected); this.connectToPeer(); - } else { - this.setStatus(ConnectionStatus.Disconnected); - this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); } } else { console.error(`PeerJS error (on peer). Type ${e.type}`, e); @@ -191,12 +138,11 @@ export default class AssistManager { }); } - private dataCheckIntervalID: ReturnType | undefined; private connectToPeer() { if (!this.peer) { return; } this.setStatus(ConnectionStatus.Connecting); const id = this.peerID; - const conn = this.peer.connect(id, { serialization: 'json', reliable: true}); + const conn = this.peer.connect(id, { serialization: "json", reliable: true}); conn.on('open', () => { window.addEventListener("beforeunload", ()=>conn.open &&conn.send("unload")); @@ -207,75 +153,42 @@ export default class AssistManager { this._call() } - let i = 0; let firstMessage = true; this.setStatus(ConnectionStatus.WaitingMessages) + const jmr = new JSONRawMessageReader() + const reader = new MStreamReader(jmr) + conn.on('data', (data) => { - if (!Array.isArray(data)) { return this.handleCommand(data); } this.disconnectTimeout && clearTimeout(this.disconnectTimeout); + + + if (Array.isArray(data)) { + jmr.append(data) // as RawMessage[] + } else if (data instanceof ArrayBuffer) { + //rawMessageReader.append(new Uint8Array(data)) + } else { return this.handleCommand(data); } + if (firstMessage) { firstMessage = false; this.setStatus(ConnectionStatus.Connected) } - let time = 0; - let ts0 = 0; - (data as Array).forEach(msg => { - - // TODO: more appropriate way to do it. - if (msg._id === 60) { - // @ts-ignore - if (msg.name === 'src' || msg.name === 'href') { - // @ts-ignore - msg.value = resolveURL(msg.baseURL, msg.value); - // @ts-ignore - } else if (msg.name === 'style') { - // @ts-ignore - msg.value = resolveCSS(msg.baseURL, msg.value); - } - msg._id = 12; - } else if (msg._id === 61) { // "SetCSSDataURLBased" - // @ts-ignore - msg.data = resolveCSS(msg.baseURL, msg.data); - msg._id = 15; - } else if (msg._id === 67) { // "insert_rule" - // @ts-ignore - msg.rule = resolveCSS(msg.baseURL, msg.rule); - msg._id = 37; - } - - - msg.tp = ID_TP_MAP[msg._id]; // _id goes from tracker - - if (msg.tp === "timestamp") { - ts0 = ts0 || msg.timestamp - time = msg.timestamp - ts0; - return; - } - const tMsg: TimedMessage = Object.assign(msg, { - time, - _index: i, - }); - this.md.distributeMessage(tMsg, i++); - }); + for (let msg = reader.readNext();msg !== null;msg = reader.readNext()) { + //@ts-ignore + this.md.distributeMessage(msg, msg._index); + } }); }); const onDataClose = () => { this.onCallDisconnect() - //console.log('closed peer conn. Reconnecting...') this.connectToPeer(); } - // this.dataCheckIntervalID = setInterval(() => { - // if (!this.dataConnection && getState().peerConnectionStatus === ConnectionStatus.Connected) { - // onDataClose(); - // } - // }, 3000); - conn.on('close', onDataClose);// Does it work ? + conn.on('close', onDataClose);// What case does it work ? conn.on("error", (e) => { this.setStatus(ConnectionStatus.Error); }) @@ -285,11 +198,9 @@ export default class AssistManager { private get dataConnection(): DataConnection | undefined { return this.peer?.connections[this.peerID]?.find(c => c.type === 'data' && c.open); } - private get callConnection(): MediaConnection | undefined { return this.peer?.connections[this.peerID]?.find(c => c.type === 'media' && c.open); } - private send(data: any) { this.dataConnection?.send(data); } @@ -327,18 +238,20 @@ export default class AssistManager { private disconnectTimeout: ReturnType | undefined; + private closeDataConnectionTimeout: ReturnType | undefined; private handleCommand(command: string) { console.log("Data command", command) switch (command) { case "unload": //this.onTrackerCallEnd(); - this.onCallDisconnect() - this.dataConnection?.close(); + this.closeDataConnectionTimeout = setTimeout(() => { + this.onCallDisconnect() + this.dataConnection?.close(); + }, 1500); this.disconnectTimeout = setTimeout(() => { this.onTrackerCallEnd(); this.setStatus(ConnectionStatus.Disconnected); }, 15000); // TODO: more convenient way - //this.dataConnection?.close(); return; case "call_end": this.onTrackerCallEnd(); @@ -350,29 +263,17 @@ export default class AssistManager { } } - // private mmtid?:ReturnType private onMouseMove = (e: MouseEvent): void => { - // this.mmtid && clearTimeout(this.mmtid) - // this.mmtid = setTimeout(() => { const data = this.md.getInternalCoordinates(e); this.send({ x: Math.round(data.x), y: Math.round(data.y) }); - // }, 5) } - // private wtid?: ReturnType - // private scrollDelta: [number, number] = [0,0] private onWheel = (e: WheelEvent): void => { e.preventDefault() - //throttling makes movements less smooth - // this.wtid && clearTimeout(this.wtid) - // this.scrollDelta[0] += e.deltaX - // this.scrollDelta[1] += e.deltaY - // this.wtid = setTimeout(() => { - this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]})//this.scrollDelta }); - this.onMouseMove(e) - // this.scrollDelta = [0,0] - // }, 20) + //throttling makes movements less smooth, so it is omitted + //this.onMouseMove(e) + this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]}) } private onMouseClick = (e: MouseEvent): void => { @@ -460,7 +361,7 @@ export default class AssistManager { }); this.md.overlay.addEventListener("mousemove", this.onMouseMove) - // this.md.overlay.addEventListener("click", this.onMouseClick) + this.md.overlay.addEventListener("click", this.onMouseClick) }); //call.peerConnection.addEventListener("track", e => console.log('newtrack',e.track)) @@ -474,13 +375,15 @@ export default class AssistManager { window.addEventListener("beforeunload", this.initiateCallEnd) } + closed = false clear() { + this.closed =true this.initiateCallEnd(); - this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); if (this.peer) { - //console.log("destroying peer...") + console.log("destroying peer...") const peer = this.peer; // otherwise it calls reconnection on data chan close this.peer = null; + peer.disconnect(); peer.destroy(); } } diff --git a/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts b/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts new file mode 100644 index 000000000..b901dc076 --- /dev/null +++ b/frontend/app/player/MessageDistributor/managers/AssistManager_old.ts @@ -0,0 +1,486 @@ +// import type Peer from 'peerjs'; +// import type { DataConnection, MediaConnection } from 'peerjs'; +// import type MessageDistributor from '../MessageDistributor'; +// import type { Message } from '../messages' +// import store from 'App/store'; +// import type { LocalStream } from './LocalStream'; +// import { update, getState } from '../../store'; +// import { iceServerConfigFromString } from 'App/utils' + + +// export enum CallingState { +// Reconnecting, +// Requesting, +// True, +// False, +// }; + +// export enum ConnectionStatus { +// Connecting, +// WaitingMessages, +// Connected, +// Inactive, +// Disconnected, +// Error, +// }; + + +// export function getStatusText(status: ConnectionStatus): string { +// switch(status) { +// case ConnectionStatus.Connecting: +// return "Connecting..."; +// case ConnectionStatus.Connected: +// return ""; +// case ConnectionStatus.Inactive: +// return "Client tab is inactive"; +// case ConnectionStatus.Disconnected: +// return "Disconnected"; +// case ConnectionStatus.Error: +// return "Something went wrong. Try to reload the page."; +// case ConnectionStatus.WaitingMessages: +// return "Connected. Waiting for the data... (The tab might be inactive)" +// } +// } + +// export interface State { +// calling: CallingState, +// peerConnectionStatus: ConnectionStatus, +// remoteControl: boolean, +// } + +// export const INITIAL_STATE: State = { +// calling: CallingState.False, +// peerConnectionStatus: ConnectionStatus.Connecting, +// remoteControl: false, +// } + +// const MAX_RECONNECTION_COUNT = 4; + + +// function resolveURL(baseURL: string, relURL: string): string { +// if (relURL.startsWith('#') || relURL === "") { +// return relURL; +// } +// return new URL(relURL, baseURL).toString(); +// } + + +// var match = /bar/.exec("foobar"); +// const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g +// const re2 = /@import "(.*?)"/g +// function cssUrlsIndex(css: string): Array<[number, number]> { +// const idxs: Array<[number, number]> = []; +// const i1 = css.matchAll(re1); +// // @ts-ignore +// for (let m of i1) { +// // @ts-ignore +// const s: number = m.index + m[0].indexOf(m[1]); +// const e: number = s + m[1].length; +// idxs.push([s, e]); +// } +// const i2 = css.matchAll(re2); +// // @ts-ignore +// for (let m of i2) { +// // @ts-ignore +// const s = m.index + m[0].indexOf(m[1]); +// const e = s + m[1].length; +// idxs.push([s, e]) +// } +// return idxs; +// } +// function unquote(str: string): [string, string] { +// str = str.trim(); +// if (str.length <= 2) { +// return [str, ""] +// } +// if (str[0] == '"' && str[str.length-1] == '"') { +// return [ str.substring(1, str.length-1), "\""]; +// } +// if (str[0] == '\'' && str[str.length-1] == '\'') { +// return [ str.substring(1, str.length-1), "'" ]; +// } +// return [str, ""] +// } +// function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { +// for (let idx of cssUrlsIndex(css)) { +// const f = idx[0] +// const t = idx[1] +// const [ rawurl, q ] = unquote(css.substring(f, t)); +// css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); +// } +// return css +// } + +// function resolveCSS(baseURL: string, css: string): string { +// return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); +// } + +// export default class AssistManager { +// constructor(private session, private md: MessageDistributor, private config) {} + +// private setStatus(status: ConnectionStatus) { +// if (status === ConnectionStatus.Connecting) { +// this.md.setMessagesLoading(true); +// } else { +// this.md.setMessagesLoading(false); +// } +// if (status === ConnectionStatus.Connected) { +// this.md.display(true); +// } else { +// this.md.display(false); +// } +// update({ peerConnectionStatus: status }); +// } + +// private get peerID(): string { +// return `${this.session.projectKey}-${this.session.sessionId}` +// } + +// private peer: Peer | null = null; +// connectionAttempts: number = 0; +// private peeropened: boolean = false; +// connect() { +// if (this.peer != null) { +// console.error("AssistManager: trying to connect more than once"); +// return; +// } +// this.setStatus(ConnectionStatus.Connecting) +// import('peerjs').then(({ default: Peer }) => { +// const _config = { +// // @ts-ignore +// host: new URL(window.ENV.API_EDP).host, +// path: '/assist', +// port: location.protocol === 'https:' ? 443 : 80, +// } + +// if (this.config) { +// _config['config'] = { +// iceServers: this.config, +// sdpSemantics: 'unified-plan', +// iceTransportPolicy: 'relay', +// }; +// } + +// const peer = new Peer(_config); +// this.peer = peer; +// peer.on('error', e => { +// if (e.type !== 'peer-unavailable') { +// console.warn("AssistManager PeerJS peer error: ", e.type, e) +// } +// if (['peer-unavailable', 'network', 'webrtc'].includes(e.type)) { +// if (this.peer && this.connectionAttempts++ < MAX_RECONNECTION_COUNT) { +// this.setStatus(ConnectionStatus.Connecting); +// this.connectToPeer(); +// } else { +// this.setStatus(ConnectionStatus.Disconnected); +// this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); +// } +// } else { +// console.error(`PeerJS error (on peer). Type ${e.type}`, e); +// this.setStatus(ConnectionStatus.Error) +// } +// }) +// peer.on("open", () => { +// if (this.peeropened) { return; } +// this.peeropened = true; +// this.connectToPeer(); +// }); +// }); +// } + +// private dataCheckIntervalID: ReturnType | undefined; +// private connectToPeer() { +// if (!this.peer) { return; } +// this.setStatus(ConnectionStatus.Connecting); +// const id = this.peerID; +// const conn = this.peer.connect(id, { serialization: 'json', reliable: true}); +// conn.on('open', () => { +// window.addEventListener("beforeunload", ()=>conn.open &&conn.send("unload")); + +// //console.log("peer connected") + + +// if (getState().calling === CallingState.Reconnecting) { +// this._call() +// } + +// let i = 0; +// let firstMessage = true; + +// this.setStatus(ConnectionStatus.WaitingMessages) + +// conn.on('data', (data) => { +// if (!Array.isArray(data)) { return this.handleCommand(data); } +// this.disconnectTimeout && clearTimeout(this.disconnectTimeout); +// if (firstMessage) { +// firstMessage = false; +// this.setStatus(ConnectionStatus.Connected) +// } + +// let time = 0; +// let ts0 = 0; +// (data as Array).forEach(msg => { + +// // TODO: more appropriate way to do it. +// if (msg._id === 60) { +// // @ts-ignore +// if (msg.name === 'src' || msg.name === 'href') { +// // @ts-ignore +// msg.value = resolveURL(msg.baseURL, msg.value); +// // @ts-ignore +// } else if (msg.name === 'style') { +// // @ts-ignore +// msg.value = resolveCSS(msg.baseURL, msg.value); +// } +// msg._id = 12; +// } else if (msg._id === 61) { // "SetCSSDataURLBased" +// // @ts-ignore +// msg.data = resolveCSS(msg.baseURL, msg.data); +// msg._id = 15; +// } else if (msg._id === 67) { // "insert_rule" +// // @ts-ignore +// msg.rule = resolveCSS(msg.baseURL, msg.rule); +// msg._id = 37; +// } + + +// msg.tp = ID_TP_MAP[msg._id]; // _id goes from tracker + +// if (msg.tp === "timestamp") { +// ts0 = ts0 || msg.timestamp +// time = msg.timestamp - ts0; +// return; +// } +// const tMsg: TimedMessage = Object.assign(msg, { +// time, +// _index: i, +// }); +// this.md.distributeMessage(tMsg, i++); +// }); +// }); +// }); + + +// const onDataClose = () => { +// this.onCallDisconnect() +// //console.log('closed peer conn. Reconnecting...') +// this.connectToPeer(); +// } + +// // this.dataCheckIntervalID = setInterval(() => { +// // if (!this.dataConnection && getState().peerConnectionStatus === ConnectionStatus.Connected) { +// // onDataClose(); +// // } +// // }, 3000); +// conn.on('close', onDataClose);// Does it work ? +// conn.on("error", (e) => { +// this.setStatus(ConnectionStatus.Error); +// }) +// } + + +// private get dataConnection(): DataConnection | undefined { +// return this.peer?.connections[this.peerID]?.find(c => c.type === 'data' && c.open); +// } + +// private get callConnection(): MediaConnection | undefined { +// return this.peer?.connections[this.peerID]?.find(c => c.type === 'media' && c.open); +// } + +// private send(data: any) { +// this.dataConnection?.send(data); +// } + + +// private forceCallEnd() { +// this.callConnection?.close(); +// } +// private notifyCallEnd() { +// const dataConn = this.dataConnection; +// if (dataConn) { +// dataConn.send("call_end"); +// } +// } +// private initiateCallEnd = () => { +// this.forceCallEnd(); +// this.notifyCallEnd(); +// this.localCallData && this.localCallData.onCallEnd(); +// } + +// private onTrackerCallEnd = () => { +// console.log('onTrackerCallEnd') +// this.forceCallEnd(); +// if (getState().calling === CallingState.Requesting) { +// this.localCallData && this.localCallData.onReject(); +// } +// this.localCallData && this.localCallData.onCallEnd(); +// } + +// private onCallDisconnect = () => { +// if (getState().calling === CallingState.True) { +// update({ calling: CallingState.Reconnecting }); +// } +// } + + +// private disconnectTimeout: ReturnType | undefined; +// private handleCommand(command: string) { +// console.log("Data command", command) +// switch (command) { +// case "unload": +// //this.onTrackerCallEnd(); +// this.onCallDisconnect() +// this.dataConnection?.close(); +// this.disconnectTimeout = setTimeout(() => { +// this.onTrackerCallEnd(); +// this.setStatus(ConnectionStatus.Disconnected); +// }, 15000); // TODO: more convenient way +// //this.dataConnection?.close(); +// return; +// case "call_end": +// this.onTrackerCallEnd(); +// return; +// case "call_error": +// this.onTrackerCallEnd(); +// this.setStatus(ConnectionStatus.Error); +// return; +// } +// } + +// // private mmtid?:ReturnType +// private onMouseMove = (e: MouseEvent): void => { +// // this.mmtid && clearTimeout(this.mmtid) +// // this.mmtid = setTimeout(() => { +// const data = this.md.getInternalCoordinates(e); +// this.send({ x: Math.round(data.x), y: Math.round(data.y) }); +// // }, 5) +// } + + +// // private wtid?: ReturnType +// // private scrollDelta: [number, number] = [0,0] +// private onWheel = (e: WheelEvent): void => { +// e.preventDefault() +// //throttling makes movements less smooth +// // this.wtid && clearTimeout(this.wtid) +// // this.scrollDelta[0] += e.deltaX +// // this.scrollDelta[1] += e.deltaY +// // this.wtid = setTimeout(() => { +// this.send({ type: "scroll", delta: [ e.deltaX, e.deltaY ]})//this.scrollDelta }); +// this.onMouseMove(e) +// // this.scrollDelta = [0,0] +// // }, 20) +// } + +// private onMouseClick = (e: MouseEvent): void => { +// const conn = this.dataConnection; +// if (!conn) { return; } +// const data = this.md.getInternalCoordinates(e); +// // const el = this.md.getElementFromPoint(e); // requires requestiong node_id from domManager +// const el = this.md.getElementFromInternalPoint(data) +// if (el instanceof HTMLElement) { +// el.focus() +// el.oninput = e => e.preventDefault(); +// el.onkeydown = e => e.preventDefault(); +// } +// conn.send({ type: "click", x: Math.round(data.x), y: Math.round(data.y) }); +// } + +// private toggleRemoteControl = (flag?: boolean) => { +// const state = getState().remoteControl; +// const newState = typeof flag === 'boolean' ? flag : !state; +// if (state === newState) { return } +// if (newState) { +// this.md.overlay.addEventListener("click", this.onMouseClick); +// this.md.overlay.addEventListener("wheel", this.onWheel) +// update({ remoteControl: true }) +// } else { +// this.md.overlay.removeEventListener("click", this.onMouseClick); +// this.md.overlay.removeEventListener("wheel", this.onWheel); +// update({ remoteControl: false }) +// } +// } + +// private localCallData: { +// localStream: LocalStream, +// onStream: (s: MediaStream)=>void, +// onCallEnd: () => void, +// onReject: () => void, +// onError?: ()=> void +// } | null = null + +// call(localStream: LocalStream, onStream: (s: MediaStream)=>void, onCallEnd: () => void, onReject: () => void, onError?: ()=> void): { end: Function, toggleRemoteControl: Function } { +// this.localCallData = { +// localStream, +// onStream, +// onCallEnd: () => { +// onCallEnd(); +// this.toggleRemoteControl(false); +// this.md.overlay.removeEventListener("mousemove", this.onMouseMove); +// this.md.overlay.removeEventListener("click", this.onMouseClick); +// update({ calling: CallingState.False }); +// this.localCallData = null; +// }, +// onReject, +// onError, +// } +// this._call() +// return { +// end: this.initiateCallEnd, +// toggleRemoteControl: this.toggleRemoteControl, +// } +// } + +// private _call() { +// if (!this.peer || !this.localCallData || ![CallingState.False, CallingState.Reconnecting].includes(getState().calling)) { return null; } + +// update({ calling: CallingState.Requesting }); + +// //console.log('calling...', this.localCallData.localStream) + +// const call = this.peer.call(this.peerID, this.localCallData.localStream.stream); +// this.localCallData.localStream.onVideoTrack(vTrack => { +// const sender = call.peerConnection.getSenders().find(s => s.track?.kind === "video") +// if (!sender) { +// //logger.warn("No video sender found") +// return +// } +// //logger.log("sender found:", sender) +// sender.replaceTrack(vTrack) +// }) + +// call.on('stream', stream => { +// update({ calling: CallingState.True }); +// this.localCallData && this.localCallData.onStream(stream); +// this.send({ +// name: store.getState().getIn([ 'user', 'account', 'name']), +// }); + +// this.md.overlay.addEventListener("mousemove", this.onMouseMove) +// // this.md.overlay.addEventListener("click", this.onMouseClick) +// }); +// //call.peerConnection.addEventListener("track", e => console.log('newtrack',e.track)) + +// call.on("close", this.localCallData.onCallEnd); +// call.on("error", (e) => { +// console.error("PeerJS error (on call):", e) +// this.initiateCallEnd(); +// this.localCallData && this.localCallData.onError && this.localCallData.onError(); +// }); + +// window.addEventListener("beforeunload", this.initiateCallEnd) +// } + +// clear() { +// this.initiateCallEnd(); +// this.dataCheckIntervalID && clearInterval(this.dataCheckIntervalID); +// if (this.peer) { +// //console.log("destroying peer...") +// const peer = this.peer; // otherwise it calls reconnection on data chan close +// this.peer = null; +// peer.destroy(); +// } +// } +// } + + diff --git a/frontend/app/player/MessageDistributor/managers/DOMManager.ts b/frontend/app/player/MessageDistributor/managers/DOMManager.ts index f226c1b4e..7c40a4668 100644 --- a/frontend/app/player/MessageDistributor/managers/DOMManager.ts +++ b/frontend/app/player/MessageDistributor/managers/DOMManager.ts @@ -1,24 +1,22 @@ import type StatedScreen from '../StatedScreen'; import type { Message, SetNodeScroll, CreateElementNode } from '../messages'; -import type { TimedMessage } from '../Timed'; import logger from 'App/logger'; import StylesManager, { rewriteNodeStyleSheet } from './StylesManager'; import ListWalker from './ListWalker'; -import type { Timed }from '../Timed'; const IGNORED_ATTRS = [ "autocomplete", "name" ]; const ATTR_NAME_REGEXP = /([^\t\n\f \/>"'=]+)/; // regexp costs ~ -export default class DOMManager extends ListWalker { +export default class DOMManager extends ListWalker { private isMobile: boolean; private screen: StatedScreen; private nl: Array = []; private isLink: Array = []; // Optimisations private bodyId: number = -1; private postponedBodyMessage: CreateElementNode | null = null; - private nodeScrollManagers: Array> = []; + private nodeScrollManagers: Array> = []; private stylesManager: StylesManager; @@ -36,7 +34,7 @@ export default class DOMManager extends ListWalker { return this.startTime; } - add(m: TimedMessage): void { + add(m: Message): void { switch (m.tp) { case "set_node_scroll": if (!this.nodeScrollManagers[ m.id ]) { @@ -104,8 +102,9 @@ export default class DOMManager extends ListWalker { if ((el instanceof HTMLStyleElement) && // TODO: correct ordering OR filter in tracker el.sheet && el.sheet.cssRules && - el.sheet.cssRules.length > 0) { - logger.log("Trying to insert child to style tag with virtual rules: ", this.nl[ parentID ], this.nl[ id ]); + el.sheet.cssRules.length > 0 && + el.innerText.trim().length === 0) { + logger.log("Trying to insert child to a style tag with virtual rules: ", this.nl[ parentID ], this.nl[ id ]); return; } @@ -183,6 +182,9 @@ export default class DOMManager extends ListWalker { } this.stylesManager.setStyleHandlers(node, value); } + if (node.namespaceURI === 'http://www.w3.org/2000/svg' && value.startsWith("url(")) { + value = "url(#" + (value.split("#")[1] ||")") + } try { node.setAttribute(name, value); } catch(e) { diff --git a/frontend/app/player/MessageDistributor/managers/ListWalker.ts b/frontend/app/player/MessageDistributor/managers/ListWalker.ts index 6283ff3ab..dcfe5cd96 100644 --- a/frontend/app/player/MessageDistributor/managers/ListWalker.ts +++ b/frontend/app/player/MessageDistributor/managers/ListWalker.ts @@ -1,4 +1,4 @@ -import type { Timed } from '../Timed'; +import type { Timed } from '../messages/timed'; export default class ListWalker { // Optimisation: #prop compiles to method that costs mor than strict property call. diff --git a/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts b/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts new file mode 100644 index 000000000..7a181fcf0 --- /dev/null +++ b/frontend/app/player/MessageDistributor/managers/MobXStateManager.ts @@ -0,0 +1,14 @@ +// import type { MobX } from '../messages'; +// import type { Timed } from '../Timed'; + +// import ListWalker from './ListWalker'; + +// type MobXTimed = MobX & Timed; + +// export default class MobXStateManager extends ListWalker { +// moveToLast(t: number) { +// super.moveApply(t, ) +// } + + +// } \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/managers/MouseManager.ts b/frontend/app/player/MessageDistributor/managers/MouseManager.ts index cd26c5b7f..ba174ae89 100644 --- a/frontend/app/player/MessageDistributor/managers/MouseManager.ts +++ b/frontend/app/player/MessageDistributor/managers/MouseManager.ts @@ -1,15 +1,12 @@ import type StatedScreen from '../StatedScreen'; import type { MouseMove } from '../messages'; -import type { Timed } from '../Timed'; import ListWalker from './ListWalker'; -type MouseMoveTimed = MouseMove & Timed; - const HOVER_CLASS = "-openreplay-hover"; const HOVER_CLASS_DEPR = "-asayer-hover"; -export default class MouseManager extends ListWalker { +export default class MouseManager extends ListWalker { private hoverElements: Array = []; constructor(private screen: StatedScreen) {super();} @@ -39,6 +36,7 @@ export default class MouseManager extends ListWalker { if (!!lastMouseMove){ // @ts-ignore TODO this.screen.cursor.move(lastMouseMove); + //window.getComputedStyle(this.screen.getCursorTarget()).cursor === 'pointer' // might nfluence performance though this.updateHover(); } } diff --git a/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts b/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts index 1b11813b2..4c756616e 100644 --- a/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts +++ b/frontend/app/player/MessageDistributor/managers/PerformanceTrackManager.ts @@ -1,11 +1,7 @@ import type { PerformanceTrack, SetPageVisibility } from '../messages'; -import type { Timed } from '../Timed'; import ListWalker from './ListWalker'; -type TimedPerformanceTrack = Timed & PerformanceTrack; -type TimedSetPageVisibility = Timed & SetPageVisibility; - export type PerformanceChartPoint = { time: number, usedHeap: number, @@ -15,7 +11,7 @@ export type PerformanceChartPoint = { nodesCount: number, } -export default class PerformanceTrackManager extends ListWalker { +export default class PerformanceTrackManager extends ListWalker { private chart: Array = []; private isHidden: boolean = false; private timeCorrection: number = 0; @@ -26,7 +22,7 @@ export default class PerformanceTrackManager extends ListWalker { +export default class StylesManager extends ListWalker { private linkLoadingCount: number = 0; private linkLoadPromises: Array> = []; private skipCSSLinks: Array = []; // should be common for all pages diff --git a/frontend/app/player/MessageDistributor/messages.ts b/frontend/app/player/MessageDistributor/messages.ts deleted file mode 100644 index bb391b9f4..000000000 --- a/frontend/app/player/MessageDistributor/messages.ts +++ /dev/null @@ -1,715 +0,0 @@ -// Auto-generated, do not edit - -import PrimitiveReader from './PrimitiveReader'; - -export const ID_TP_MAP = { - - 0: "timestamp", - 2: "session_disconnect", - 4: "set_page_location", - 5: "set_viewport_size", - 6: "set_viewport_scroll", - 7: "create_document", - 8: "create_element_node", - 9: "create_text_node", - 10: "move_node", - 11: "remove_node", - 12: "set_node_attribute", - 13: "remove_node_attribute", - 14: "set_node_data", - 15: "set_css_data", - 16: "set_node_scroll", - 18: "set_input_value", - 19: "set_input_checked", - 20: "mouse_move", - 22: "console_log", - 37: "css_insert_rule", - 38: "css_delete_rule", - 39: "fetch", - 40: "profiler", - 41: "o_table", - 44: "redux", - 45: "vuex", - 46: "mob_x", - 47: "ng_rx", - 48: "graph_ql", - 49: "performance_track", - 54: "connection_information", - 55: "set_page_visibility", - 59: "long_task", - 69: "mouse_click", - 70: "create_i_frame_document", - 90: "ios_session_start", - 93: "ios_custom_event", - 96: "ios_screen_changes", - 100: "ios_click_event", - 102: "ios_performance_event", - 103: "ios_log", - 105: "ios_network_call", -} as const; - - -export interface Timestamp { - tp: "timestamp", - timestamp: number, -} - -export interface SessionDisconnect { - tp: "session_disconnect", - timestamp: number, -} - -export interface SetPageLocation { - tp: "set_page_location", - url: string, - referrer: string, - navigationStart: number, -} - -export interface SetViewportSize { - tp: "set_viewport_size", - width: number, - height: number, -} - -export interface SetViewportScroll { - tp: "set_viewport_scroll", - x: number, - y: number, -} - -export interface CreateDocument { - tp: "create_document", - -} - -export interface CreateElementNode { - tp: "create_element_node", - id: number, - parentID: number, - index: number, - tag: string, - svg: boolean, -} - -export interface CreateTextNode { - tp: "create_text_node", - id: number, - parentID: number, - index: number, -} - -export interface MoveNode { - tp: "move_node", - id: number, - parentID: number, - index: number, -} - -export interface RemoveNode { - tp: "remove_node", - id: number, -} - -export interface SetNodeAttribute { - tp: "set_node_attribute", - id: number, - name: string, - value: string, -} - -export interface RemoveNodeAttribute { - tp: "remove_node_attribute", - id: number, - name: string, -} - -export interface SetNodeData { - tp: "set_node_data", - id: number, - data: string, -} - -export interface SetCssData { - tp: "set_css_data", - id: number, - data: string, -} - -export interface SetNodeScroll { - tp: "set_node_scroll", - id: number, - x: number, - y: number, -} - -export interface SetInputValue { - tp: "set_input_value", - id: number, - value: string, - mask: number, -} - -export interface SetInputChecked { - tp: "set_input_checked", - id: number, - checked: boolean, -} - -export interface MouseMove { - tp: "mouse_move", - x: number, - y: number, -} - -export interface ConsoleLog { - tp: "console_log", - level: string, - value: string, -} - -export interface CssInsertRule { - tp: "css_insert_rule", - id: number, - rule: string, - index: number, -} - -export interface CssDeleteRule { - tp: "css_delete_rule", - id: number, - index: number, -} - -export interface Fetch { - tp: "fetch", - method: string, - url: string, - request: string, - response: string, - status: number, - timestamp: number, - duration: number, -} - -export interface Profiler { - tp: "profiler", - name: string, - duration: number, - args: string, - result: string, -} - -export interface OTable { - tp: "o_table", - key: string, - value: string, -} - -export interface Redux { - tp: "redux", - action: string, - state: string, - duration: number, -} - -export interface Vuex { - tp: "vuex", - mutation: string, - state: string, -} - -export interface MobX { - tp: "mob_x", - type: string, - payload: string, -} - -export interface NgRx { - tp: "ng_rx", - action: string, - state: string, - duration: number, -} - -export interface GraphQl { - tp: "graph_ql", - operationKind: string, - operationName: string, - variables: string, - response: string, -} - -export interface PerformanceTrack { - tp: "performance_track", - frames: number, - ticks: number, - totalJSHeapSize: number, - usedJSHeapSize: number, -} - -export interface ConnectionInformation { - tp: "connection_information", - downlink: number, - type: string, -} - -export interface SetPageVisibility { - tp: "set_page_visibility", - hidden: boolean, -} - -export interface LongTask { - tp: "long_task", - timestamp: number, - duration: number, - context: number, - containerType: number, - containerSrc: string, - containerId: string, - containerName: string, -} - -export interface MouseClick { - tp: "mouse_click", - id: number, - hesitationTime: number, - label: string, - selector: string, -} - -export interface CreateIFrameDocument { - tp: "create_i_frame_document", - frameID: number, - id: number, -} - -export interface IosSessionStart { - tp: "ios_session_start", - timestamp: number, - projectID: number, - trackerVersion: string, - revID: string, - userUUID: string, - userOS: string, - userOSVersion: string, - userDevice: string, - userDeviceType: string, - userCountry: string, -} - -export interface IosCustomEvent { - tp: "ios_custom_event", - timestamp: number, - length: number, - name: string, - payload: string, -} - -export interface IosScreenChanges { - tp: "ios_screen_changes", - timestamp: number, - length: number, - x: number, - y: number, - width: number, - height: number, -} - -export interface IosClickEvent { - tp: "ios_click_event", - timestamp: number, - length: number, - label: string, - x: number, - y: number, -} - -export interface IosPerformanceEvent { - tp: "ios_performance_event", - timestamp: number, - length: number, - name: string, - value: number, -} - -export interface IosLog { - tp: "ios_log", - timestamp: number, - length: number, - severity: string, - content: string, -} - -export interface IosNetworkCall { - tp: "ios_network_call", - timestamp: number, - length: number, - duration: number, - headers: string, - body: string, - url: string, - success: boolean, - method: string, - status: number, -} - - -export type Message = Timestamp | SessionDisconnect | SetPageLocation | SetViewportSize | SetViewportScroll | CreateDocument | CreateElementNode | CreateTextNode | MoveNode | RemoveNode | SetNodeAttribute | RemoveNodeAttribute | SetNodeData | SetCssData | SetNodeScroll | SetInputValue | SetInputChecked | MouseMove | ConsoleLog | CssInsertRule | CssDeleteRule | Fetch | Profiler | OTable | Redux | Vuex | MobX | NgRx | GraphQl | PerformanceTrack | ConnectionInformation | SetPageVisibility | LongTask | MouseClick | CreateIFrameDocument | IosSessionStart | IosCustomEvent | IosScreenChanges | IosClickEvent | IosPerformanceEvent | IosLog | IosNetworkCall; - -export default function (r: PrimitiveReader): Message | null { - const tp = r.readUint() - switch (tp) { - - case 0: - return { - tp: ID_TP_MAP[0], - timestamp: r.readUint(), - }; - - case 2: - return { - tp: ID_TP_MAP[2], - timestamp: r.readUint(), - }; - - case 4: - return { - tp: ID_TP_MAP[4], - url: r.readString(), - referrer: r.readString(), - navigationStart: r.readUint(), - }; - - case 5: - return { - tp: ID_TP_MAP[5], - width: r.readUint(), - height: r.readUint(), - }; - - case 6: - return { - tp: ID_TP_MAP[6], - x: r.readInt(), - y: r.readInt(), - }; - - case 7: - return { - tp: ID_TP_MAP[7], - - }; - - case 8: - return { - tp: ID_TP_MAP[8], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - tag: r.readString(), - svg: r.readBoolean(), - }; - - case 9: - return { - tp: ID_TP_MAP[9], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - }; - - case 10: - return { - tp: ID_TP_MAP[10], - id: r.readUint(), - parentID: r.readUint(), - index: r.readUint(), - }; - - case 11: - return { - tp: ID_TP_MAP[11], - id: r.readUint(), - }; - - case 12: - return { - tp: ID_TP_MAP[12], - id: r.readUint(), - name: r.readString(), - value: r.readString(), - }; - - case 13: - return { - tp: ID_TP_MAP[13], - id: r.readUint(), - name: r.readString(), - }; - - case 14: - return { - tp: ID_TP_MAP[14], - id: r.readUint(), - data: r.readString(), - }; - - case 15: - return { - tp: ID_TP_MAP[15], - id: r.readUint(), - data: r.readString(), - }; - - case 16: - return { - tp: ID_TP_MAP[16], - id: r.readUint(), - x: r.readInt(), - y: r.readInt(), - }; - - case 18: - return { - tp: ID_TP_MAP[18], - id: r.readUint(), - value: r.readString(), - mask: r.readInt(), - }; - - case 19: - return { - tp: ID_TP_MAP[19], - id: r.readUint(), - checked: r.readBoolean(), - }; - - case 20: - return { - tp: ID_TP_MAP[20], - x: r.readUint(), - y: r.readUint(), - }; - - case 22: - return { - tp: ID_TP_MAP[22], - level: r.readString(), - value: r.readString(), - }; - - case 37: - return { - tp: ID_TP_MAP[37], - id: r.readUint(), - rule: r.readString(), - index: r.readUint(), - }; - - case 38: - return { - tp: ID_TP_MAP[38], - id: r.readUint(), - index: r.readUint(), - }; - - case 39: - return { - tp: ID_TP_MAP[39], - method: r.readString(), - url: r.readString(), - request: r.readString(), - response: r.readString(), - status: r.readUint(), - timestamp: r.readUint(), - duration: r.readUint(), - }; - - case 40: - return { - tp: ID_TP_MAP[40], - name: r.readString(), - duration: r.readUint(), - args: r.readString(), - result: r.readString(), - }; - - case 41: - return { - tp: ID_TP_MAP[41], - key: r.readString(), - value: r.readString(), - }; - - case 44: - return { - tp: ID_TP_MAP[44], - action: r.readString(), - state: r.readString(), - duration: r.readUint(), - }; - - case 45: - return { - tp: ID_TP_MAP[45], - mutation: r.readString(), - state: r.readString(), - }; - - case 46: - return { - tp: ID_TP_MAP[46], - type: r.readString(), - payload: r.readString(), - }; - - case 47: - return { - tp: ID_TP_MAP[47], - action: r.readString(), - state: r.readString(), - duration: r.readUint(), - }; - - case 48: - return { - tp: ID_TP_MAP[48], - operationKind: r.readString(), - operationName: r.readString(), - variables: r.readString(), - response: r.readString(), - }; - - case 49: - return { - tp: ID_TP_MAP[49], - frames: r.readInt(), - ticks: r.readInt(), - totalJSHeapSize: r.readUint(), - usedJSHeapSize: r.readUint(), - }; - - case 54: - return { - tp: ID_TP_MAP[54], - downlink: r.readUint(), - type: r.readString(), - }; - - case 55: - return { - tp: ID_TP_MAP[55], - hidden: r.readBoolean(), - }; - - case 59: - return { - tp: ID_TP_MAP[59], - timestamp: r.readUint(), - duration: r.readUint(), - context: r.readUint(), - containerType: r.readUint(), - containerSrc: r.readString(), - containerId: r.readString(), - containerName: r.readString(), - }; - - case 69: - return { - tp: ID_TP_MAP[69], - id: r.readUint(), - hesitationTime: r.readUint(), - label: r.readString(), - selector: r.readString(), - }; - - case 70: - return { - tp: ID_TP_MAP[70], - frameID: r.readUint(), - id: r.readUint(), - }; - - case 90: - return { - tp: ID_TP_MAP[90], - timestamp: r.readUint(), - projectID: r.readUint(), - trackerVersion: r.readString(), - revID: r.readString(), - userUUID: r.readString(), - userOS: r.readString(), - userOSVersion: r.readString(), - userDevice: r.readString(), - userDeviceType: r.readString(), - userCountry: r.readString(), - }; - - case 93: - return { - tp: ID_TP_MAP[93], - timestamp: r.readUint(), - length: r.readUint(), - name: r.readString(), - payload: r.readString(), - }; - - case 96: - return { - tp: ID_TP_MAP[96], - timestamp: r.readUint(), - length: r.readUint(), - x: r.readUint(), - y: r.readUint(), - width: r.readUint(), - height: r.readUint(), - }; - - case 100: - return { - tp: ID_TP_MAP[100], - timestamp: r.readUint(), - length: r.readUint(), - label: r.readString(), - x: r.readUint(), - y: r.readUint(), - }; - - case 102: - return { - tp: ID_TP_MAP[102], - timestamp: r.readUint(), - length: r.readUint(), - name: r.readString(), - value: r.readUint(), - }; - - case 103: - return { - tp: ID_TP_MAP[103], - timestamp: r.readUint(), - length: r.readUint(), - severity: r.readString(), - content: r.readString(), - }; - - case 105: - return { - tp: ID_TP_MAP[105], - timestamp: r.readUint(), - length: r.readUint(), - duration: r.readUint(), - headers: r.readString(), - body: r.readString(), - url: r.readString(), - success: r.readBoolean(), - method: r.readString(), - status: r.readUint(), - }; - - default: - throw new Error(`Unrecognizable message type: ${ tp }`) - return null; - } -} diff --git a/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts b/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts new file mode 100644 index 000000000..8143ae17c --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/JSONRawMessageReader.ts @@ -0,0 +1,18 @@ +import type { RawMessage } from './raw' + +import { TP_MAP } from './raw' + +export default class JSONRawMessageReader { + constructor(private messages: any[] = []){} + append(messages: any[]) { + this.messages = this.messages.concat(messages) + } + readMessage(): RawMessage | null { + const msg = this.messages.shift() + if (!msg) { return null } + msg.tp = TP_MAP[msg._id] + delete msg._id + return msg as RawMessage + } + +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/MFileReader.ts b/frontend/app/player/MessageDistributor/messages/MFileReader.ts new file mode 100644 index 000000000..0204259e5 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/MFileReader.ts @@ -0,0 +1,68 @@ +import type { Message } from './message'; +import type { RawMessage } from './raw'; +import logger from 'App/logger'; +import RawMessageReader from './RawMessageReader'; + +// TODO: composition instead of inheritance +// needSkipMessage() and next() methods here use buf and p protected properties, +// which should be probably somehow incapsulated +export default class MFileReader extends RawMessageReader { + private pLastMessageID: number = 0; + private currentTime: number = 0; + public error: boolean = false; + constructor(data: Uint8Array, private readonly startTime: number) { + super(data); + } + + private needSkipMessage(): boolean { + if (this.p === 0) return false; + for (let i = 7; i >= 0; i--) { + if (this.buf[ this.p + i ] !== this.buf[ this.pLastMessageID + i ]) { + return this.buf[ this.p + i ] - this.buf[ this.pLastMessageID + i ] < 0; + } + } + return true; + } + + private readRawMessage(): RawMessage | null { + this.skip(8); + try { + return super.readMessage(); + } catch (e) { + this.error = true; + logger.error("Read message error:", e); + return null; + } + } + + hasNext():boolean { + return !this.error && this.hasNextByte(); + } + + next(): [ Message, number] | null { + if (!this.hasNext()) { + return null; + } + + while (this.needSkipMessage()) { + this.readRawMessage(); + } + this.pLastMessageID = this.p; + + const rMsg = this.readRawMessage(); + if (!rMsg) { + return null; + } + + if (rMsg.tp === "timestamp") { + this.currentTime = rMsg.timestamp - this.startTime; + } else { + const msg = Object.assign(rMsg, { + time: this.currentTime, + _index: this.pLastMessageID, + }) + return [msg, this.pLastMessageID]; + } + return null; + } +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/MStreamReader.ts b/frontend/app/player/MessageDistributor/messages/MStreamReader.ts new file mode 100644 index 000000000..1cc30dcec --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/MStreamReader.ts @@ -0,0 +1,69 @@ +import type { Message } from './message' +import type { + RawMessage, + RawSetNodeAttributeURLBased, + RawSetNodeAttribute, + RawSetCssDataURLBased, + RawSetCssData, + RawCssInsertRuleURLBased, + RawCssInsertRule, +} from './raw' +import RawMessageReader from './RawMessageReader' +import type { RawMessageReaderI } from './RawMessageReader' +import { resolveURL, resolveCSS } from './urlResolve' + + +const resolveMsg = { + "set_node_attribute_url_based": (msg: RawSetNodeAttributeURLBased): RawSetNodeAttribute => + ({ + ...msg, + value: msg.name === 'src' || msg.name === 'href' + ? resolveURL(msg.baseURL, msg.value) + : (msg.name === 'style' + ? resolveCSS(msg.baseURL, msg.value) + : msg.value + ), + tp: "set_node_attribute", + }), + "set_css_data_url_based": (msg: RawSetCssDataURLBased): RawSetCssData => + ({ + ...msg, + data: resolveCSS(msg.baseURL, msg.data), + tp: "set_css_data", + }), + "css_insert_rule_url_based": (msg: RawCssInsertRuleURLBased): RawCssInsertRule => + ({ + ...msg, + rule: resolveCSS(msg.baseURL, msg.rule), + tp: "css_insert_rule", + }) +} + +export default class MStreamReader { + constructor(private readonly r: RawMessageReaderI = new RawMessageReader()){} + + // append(buf: Uint8Array) { + // this.r.append(buf) + // } + + private t0: number = 0 + private t: number = 0 + private idx: number = 0 + readNext(): Message | null { + let msg = this.r.readMessage() + if (msg === null) { return null } + if (msg.tp === "timestamp" || msg.tp === "batch_meta") { + this.t0 = this.t0 || msg.timestamp + this.t = msg.timestamp - this.t0 + return this.readNext() + } + + // why typescript doesn't work here? + msg = (resolveMsg[msg.tp] || ((m:RawMessage)=>m))(msg) + + return Object.assign(msg, { + time: this.t, + _index: this.idx++, + }) + } +} \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts b/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts new file mode 100644 index 000000000..bc62bf653 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/PrimitiveReader.ts @@ -0,0 +1,55 @@ +export default class PrimitiveReader { + protected p: number = 0 + constructor(protected buf: Uint8Array = new Uint8Array(0)) {} + + append(buf: Uint8Array) { + const newBuf = new Uint8Array(this.buf.length + buf.length) + newBuf.set(this.buf) + newBuf.set(buf, this.buf.length) + this.buf = newBuf + } + + hasNextByte(): boolean { + return this.p < this.buf.length + } + + readUint(): number | null { + let p = this.p, r = 0, s = 1, b + do { + if (p >= this.buf.length) { + return null + } + b = this.buf[ p++ ] + r += (b & 0x7F) * s + s *= 128; + } while (b >= 0x80) + this.p = p + return r; + } + + readInt(): number | null { + let u = this.readUint(); + if (u === null) { return u } + if (u % 2) { + u = (u + 1) / -2; + } else { + u = u / 2; + } + return u; + } + + readString(): string | null { + var l = this.readUint(); + if (l === null || this.p + l > this.buf.length) { return null } + return new TextDecoder().decode(this.buf.subarray(this.p, this.p+=l)); + } + + readBoolean(): boolean | null { + if (this.p >= this.buf.length) { return null } + return !!this.buf[this.p++]; + } + + skip(n: number) { + this.p += n; + } +} diff --git a/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts b/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts new file mode 100644 index 000000000..867d80755 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/RawMessageReader.ts @@ -0,0 +1,758 @@ +// Auto-generated, do not edit + +import PrimitiveReader from './PrimitiveReader' +import type { RawMessage } from './raw' + +export interface RawMessageReaderI { + readMessage(): RawMessage | null +} + +export default class RawMessageReader extends PrimitiveReader { + readMessage(): RawMessage | null { + const p = this.p + const resetPointer = () => { + this.p = p + return null + } + + const tp = this.readUint() + if (tp === null) { return resetPointer() } + + switch (tp) { + + case 80: { + const pageNo = this.readUint(); if (pageNo === null) { return resetPointer() } + const firstIndex = this.readUint(); if (firstIndex === null) { return resetPointer() } + const timestamp = this.readInt(); if (timestamp === null) { return resetPointer() } + return { + tp: "batch_meta", + pageNo, + firstIndex, + timestamp, + }; + } + + case 0: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + return { + tp: "timestamp", + timestamp, + }; + } + + case 2: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + return { + tp: "session_disconnect", + timestamp, + }; + } + + case 4: { + const url = this.readString(); if (url === null) { return resetPointer() } + const referrer = this.readString(); if (referrer === null) { return resetPointer() } + const navigationStart = this.readUint(); if (navigationStart === null) { return resetPointer() } + return { + tp: "set_page_location", + url, + referrer, + navigationStart, + }; + } + + case 5: { + const width = this.readUint(); if (width === null) { return resetPointer() } + const height = this.readUint(); if (height === null) { return resetPointer() } + return { + tp: "set_viewport_size", + width, + height, + }; + } + + case 6: { + const x = this.readInt(); if (x === null) { return resetPointer() } + const y = this.readInt(); if (y === null) { return resetPointer() } + return { + tp: "set_viewport_scroll", + x, + y, + }; + } + + case 7: { + + return { + tp: "create_document", + + }; + } + + case 8: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + const tag = this.readString(); if (tag === null) { return resetPointer() } + const svg = this.readBoolean(); if (svg === null) { return resetPointer() } + return { + tp: "create_element_node", + id, + parentID, + index, + tag, + svg, + }; + } + + case 9: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "create_text_node", + id, + parentID, + index, + }; + } + + case 10: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const parentID = this.readUint(); if (parentID === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "move_node", + id, + parentID, + index, + }; + } + + case 11: { + const id = this.readUint(); if (id === null) { return resetPointer() } + return { + tp: "remove_node", + id, + }; + } + + case 12: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "set_node_attribute", + id, + name, + value, + }; + } + + case 13: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + return { + tp: "remove_node_attribute", + id, + name, + }; + } + + case 14: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + return { + tp: "set_node_data", + id, + data, + }; + } + + case 15: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + return { + tp: "set_css_data", + id, + data, + }; + } + + case 16: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const x = this.readInt(); if (x === null) { return resetPointer() } + const y = this.readInt(); if (y === null) { return resetPointer() } + return { + tp: "set_node_scroll", + id, + x, + y, + }; + } + + case 17: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + return { + tp: "set_input_target", + id, + label, + }; + } + + case 18: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + const mask = this.readInt(); if (mask === null) { return resetPointer() } + return { + tp: "set_input_value", + id, + value, + mask, + }; + } + + case 19: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const checked = this.readBoolean(); if (checked === null) { return resetPointer() } + return { + tp: "set_input_checked", + id, + checked, + }; + } + + case 20: { + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + return { + tp: "mouse_move", + x, + y, + }; + } + + case 22: { + const level = this.readString(); if (level === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "console_log", + level, + value, + }; + } + + case 23: { + const requestStart = this.readUint(); if (requestStart === null) { return resetPointer() } + const responseStart = this.readUint(); if (responseStart === null) { return resetPointer() } + const responseEnd = this.readUint(); if (responseEnd === null) { return resetPointer() } + const domContentLoadedEventStart = this.readUint(); if (domContentLoadedEventStart === null) { return resetPointer() } + const domContentLoadedEventEnd = this.readUint(); if (domContentLoadedEventEnd === null) { return resetPointer() } + const loadEventStart = this.readUint(); if (loadEventStart === null) { return resetPointer() } + const loadEventEnd = this.readUint(); if (loadEventEnd === null) { return resetPointer() } + const firstPaint = this.readUint(); if (firstPaint === null) { return resetPointer() } + const firstContentfulPaint = this.readUint(); if (firstContentfulPaint === null) { return resetPointer() } + return { + tp: "page_load_timing", + requestStart, + responseStart, + responseEnd, + domContentLoadedEventStart, + domContentLoadedEventEnd, + loadEventStart, + loadEventEnd, + firstPaint, + firstContentfulPaint, + }; + } + + case 24: { + const speedIndex = this.readUint(); if (speedIndex === null) { return resetPointer() } + const visuallyComplete = this.readUint(); if (visuallyComplete === null) { return resetPointer() } + const timeToInteractive = this.readUint(); if (timeToInteractive === null) { return resetPointer() } + return { + tp: "page_render_timing", + speedIndex, + visuallyComplete, + timeToInteractive, + }; + } + + case 25: { + const name = this.readString(); if (name === null) { return resetPointer() } + const message = this.readString(); if (message === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "js_exception", + name, + message, + payload, + }; + } + + case 27: { + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "raw_custom_event", + name, + payload, + }; + } + + case 28: { + const id = this.readString(); if (id === null) { return resetPointer() } + return { + tp: "user_id", + id, + }; + } + + case 29: { + const id = this.readString(); if (id === null) { return resetPointer() } + return { + tp: "user_anonymous_id", + id, + }; + } + + case 30: { + const key = this.readString(); if (key === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "metadata", + key, + value, + }; + } + + case 37: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const rule = this.readString(); if (rule === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "css_insert_rule", + id, + rule, + index, + }; + } + + case 38: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + return { + tp: "css_delete_rule", + id, + index, + }; + } + + case 39: { + const method = this.readString(); if (method === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const request = this.readString(); if (request === null) { return resetPointer() } + const response = this.readString(); if (response === null) { return resetPointer() } + const status = this.readUint(); if (status === null) { return resetPointer() } + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "fetch", + method, + url, + request, + response, + status, + timestamp, + duration, + }; + } + + case 40: { + const name = this.readString(); if (name === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const args = this.readString(); if (args === null) { return resetPointer() } + const result = this.readString(); if (result === null) { return resetPointer() } + return { + tp: "profiler", + name, + duration, + args, + result, + }; + } + + case 41: { + const key = this.readString(); if (key === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "o_table", + key, + value, + }; + } + + case 42: { + const type = this.readString(); if (type === null) { return resetPointer() } + return { + tp: "state_action", + type, + }; + } + + case 44: { + const action = this.readString(); if (action === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "redux", + action, + state, + duration, + }; + } + + case 45: { + const mutation = this.readString(); if (mutation === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + return { + tp: "vuex", + mutation, + state, + }; + } + + case 46: { + const type = this.readString(); if (type === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "mob_x", + type, + payload, + }; + } + + case 47: { + const action = this.readString(); if (action === null) { return resetPointer() } + const state = this.readString(); if (state === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + return { + tp: "ng_rx", + action, + state, + duration, + }; + } + + case 48: { + const operationKind = this.readString(); if (operationKind === null) { return resetPointer() } + const operationName = this.readString(); if (operationName === null) { return resetPointer() } + const variables = this.readString(); if (variables === null) { return resetPointer() } + const response = this.readString(); if (response === null) { return resetPointer() } + return { + tp: "graph_ql", + operationKind, + operationName, + variables, + response, + }; + } + + case 49: { + const frames = this.readInt(); if (frames === null) { return resetPointer() } + const ticks = this.readInt(); if (ticks === null) { return resetPointer() } + const totalJSHeapSize = this.readUint(); if (totalJSHeapSize === null) { return resetPointer() } + const usedJSHeapSize = this.readUint(); if (usedJSHeapSize === null) { return resetPointer() } + return { + tp: "performance_track", + frames, + ticks, + totalJSHeapSize, + usedJSHeapSize, + }; + } + + case 53: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const ttfb = this.readUint(); if (ttfb === null) { return resetPointer() } + const headerSize = this.readUint(); if (headerSize === null) { return resetPointer() } + const encodedBodySize = this.readUint(); if (encodedBodySize === null) { return resetPointer() } + const decodedBodySize = this.readUint(); if (decodedBodySize === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const initiator = this.readString(); if (initiator === null) { return resetPointer() } + return { + tp: "resource_timing", + timestamp, + duration, + ttfb, + headerSize, + encodedBodySize, + decodedBodySize, + url, + initiator, + }; + } + + case 54: { + const downlink = this.readUint(); if (downlink === null) { return resetPointer() } + const type = this.readString(); if (type === null) { return resetPointer() } + return { + tp: "connection_information", + downlink, + type, + }; + } + + case 55: { + const hidden = this.readBoolean(); if (hidden === null) { return resetPointer() } + return { + tp: "set_page_visibility", + hidden, + }; + } + + case 59: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const context = this.readUint(); if (context === null) { return resetPointer() } + const containerType = this.readUint(); if (containerType === null) { return resetPointer() } + const containerSrc = this.readString(); if (containerSrc === null) { return resetPointer() } + const containerId = this.readString(); if (containerId === null) { return resetPointer() } + const containerName = this.readString(); if (containerName === null) { return resetPointer() } + return { + tp: "long_task", + timestamp, + duration, + context, + containerType, + containerSrc, + containerId, + containerName, + }; + } + + case 60: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "set_node_attribute_url_based", + id, + name, + value, + baseURL, + }; + } + + case 61: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const data = this.readString(); if (data === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "set_css_data_url_based", + id, + data, + baseURL, + }; + } + + case 63: { + const type = this.readString(); if (type === null) { return resetPointer() } + const value = this.readString(); if (value === null) { return resetPointer() } + return { + tp: "technical_info", + type, + value, + }; + } + + case 64: { + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "custom_issue", + name, + payload, + }; + } + + case 65: { + + return { + tp: "page_close", + + }; + } + + case 67: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const rule = this.readString(); if (rule === null) { return resetPointer() } + const index = this.readUint(); if (index === null) { return resetPointer() } + const baseURL = this.readString(); if (baseURL === null) { return resetPointer() } + return { + tp: "css_insert_rule_url_based", + id, + rule, + index, + baseURL, + }; + } + + case 69: { + const id = this.readUint(); if (id === null) { return resetPointer() } + const hesitationTime = this.readUint(); if (hesitationTime === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + const selector = this.readString(); if (selector === null) { return resetPointer() } + return { + tp: "mouse_click", + id, + hesitationTime, + label, + selector, + }; + } + + case 70: { + const frameID = this.readUint(); if (frameID === null) { return resetPointer() } + const id = this.readUint(); if (id === null) { return resetPointer() } + return { + tp: "create_i_frame_document", + frameID, + id, + }; + } + + case 90: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const projectID = this.readUint(); if (projectID === null) { return resetPointer() } + const trackerVersion = this.readString(); if (trackerVersion === null) { return resetPointer() } + const revID = this.readString(); if (revID === null) { return resetPointer() } + const userUUID = this.readString(); if (userUUID === null) { return resetPointer() } + const userOS = this.readString(); if (userOS === null) { return resetPointer() } + const userOSVersion = this.readString(); if (userOSVersion === null) { return resetPointer() } + const userDevice = this.readString(); if (userDevice === null) { return resetPointer() } + const userDeviceType = this.readString(); if (userDeviceType === null) { return resetPointer() } + const userCountry = this.readString(); if (userCountry === null) { return resetPointer() } + return { + tp: "ios_session_start", + timestamp, + projectID, + trackerVersion, + revID, + userUUID, + userOS, + userOSVersion, + userDevice, + userDeviceType, + userCountry, + }; + } + + case 93: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const payload = this.readString(); if (payload === null) { return resetPointer() } + return { + tp: "ios_custom_event", + timestamp, + length, + name, + payload, + }; + } + + case 96: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + const width = this.readUint(); if (width === null) { return resetPointer() } + const height = this.readUint(); if (height === null) { return resetPointer() } + return { + tp: "ios_screen_changes", + timestamp, + length, + x, + y, + width, + height, + }; + } + + case 100: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const label = this.readString(); if (label === null) { return resetPointer() } + const x = this.readUint(); if (x === null) { return resetPointer() } + const y = this.readUint(); if (y === null) { return resetPointer() } + return { + tp: "ios_click_event", + timestamp, + length, + label, + x, + y, + }; + } + + case 102: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const name = this.readString(); if (name === null) { return resetPointer() } + const value = this.readUint(); if (value === null) { return resetPointer() } + return { + tp: "ios_performance_event", + timestamp, + length, + name, + value, + }; + } + + case 103: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const severity = this.readString(); if (severity === null) { return resetPointer() } + const content = this.readString(); if (content === null) { return resetPointer() } + return { + tp: "ios_log", + timestamp, + length, + severity, + content, + }; + } + + case 105: { + const timestamp = this.readUint(); if (timestamp === null) { return resetPointer() } + const length = this.readUint(); if (length === null) { return resetPointer() } + const duration = this.readUint(); if (duration === null) { return resetPointer() } + const headers = this.readString(); if (headers === null) { return resetPointer() } + const body = this.readString(); if (body === null) { return resetPointer() } + const url = this.readString(); if (url === null) { return resetPointer() } + const success = this.readBoolean(); if (success === null) { return resetPointer() } + const method = this.readString(); if (method === null) { return resetPointer() } + const status = this.readUint(); if (status === null) { return resetPointer() } + return { + tp: "ios_network_call", + timestamp, + length, + duration, + headers, + body, + url, + success, + method, + status, + }; + } + + default: + throw new Error(`Unrecognizable message type: ${ tp }`) + return null; + } + } +} diff --git a/frontend/app/player/MessageDistributor/messages/index.ts b/frontend/app/player/MessageDistributor/messages/index.ts new file mode 100644 index 000000000..2619b58cd --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/index.ts @@ -0,0 +1 @@ +export * from './message' \ No newline at end of file diff --git a/frontend/app/player/MessageDistributor/messages/message.ts b/frontend/app/player/MessageDistributor/messages/message.ts new file mode 100644 index 000000000..1c21bbfd2 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/message.ts @@ -0,0 +1,184 @@ +// Auto-generated, do not edit + +import type { Timed } from './timed' +import type { RawMessage } from './raw' +import type { RawBatchMeta, + RawTimestamp, + RawSessionDisconnect, + RawSetPageLocation, + RawSetViewportSize, + RawSetViewportScroll, + RawCreateDocument, + RawCreateElementNode, + RawCreateTextNode, + RawMoveNode, + RawRemoveNode, + RawSetNodeAttribute, + RawRemoveNodeAttribute, + RawSetNodeData, + RawSetCssData, + RawSetNodeScroll, + RawSetInputTarget, + RawSetInputValue, + RawSetInputChecked, + RawMouseMove, + RawConsoleLog, + RawPageLoadTiming, + RawPageRenderTiming, + RawJsException, + RawRawCustomEvent, + RawUserID, + RawUserAnonymousID, + RawMetadata, + RawCssInsertRule, + RawCssDeleteRule, + RawFetch, + RawProfiler, + RawOTable, + RawStateAction, + RawRedux, + RawVuex, + RawMobX, + RawNgRx, + RawGraphQl, + RawPerformanceTrack, + RawResourceTiming, + RawConnectionInformation, + RawSetPageVisibility, + RawLongTask, + RawSetNodeAttributeURLBased, + RawSetCssDataURLBased, + RawTechnicalInfo, + RawCustomIssue, + RawPageClose, + RawCssInsertRuleURLBased, + RawMouseClick, + RawCreateIFrameDocument, + RawIosSessionStart, + RawIosCustomEvent, + RawIosScreenChanges, + RawIosClickEvent, + RawIosPerformanceEvent, + RawIosLog, + RawIosNetworkCall, } from './raw' + +export type Message = RawMessage & Timed + + +export type BatchMeta = RawBatchMeta & Timed + +export type Timestamp = RawTimestamp & Timed + +export type SessionDisconnect = RawSessionDisconnect & Timed + +export type SetPageLocation = RawSetPageLocation & Timed + +export type SetViewportSize = RawSetViewportSize & Timed + +export type SetViewportScroll = RawSetViewportScroll & Timed + +export type CreateDocument = RawCreateDocument & Timed + +export type CreateElementNode = RawCreateElementNode & Timed + +export type CreateTextNode = RawCreateTextNode & Timed + +export type MoveNode = RawMoveNode & Timed + +export type RemoveNode = RawRemoveNode & Timed + +export type SetNodeAttribute = RawSetNodeAttribute & Timed + +export type RemoveNodeAttribute = RawRemoveNodeAttribute & Timed + +export type SetNodeData = RawSetNodeData & Timed + +export type SetCssData = RawSetCssData & Timed + +export type SetNodeScroll = RawSetNodeScroll & Timed + +export type SetInputTarget = RawSetInputTarget & Timed + +export type SetInputValue = RawSetInputValue & Timed + +export type SetInputChecked = RawSetInputChecked & Timed + +export type MouseMove = RawMouseMove & Timed + +export type ConsoleLog = RawConsoleLog & Timed + +export type PageLoadTiming = RawPageLoadTiming & Timed + +export type PageRenderTiming = RawPageRenderTiming & Timed + +export type JsException = RawJsException & Timed + +export type RawCustomEvent = RawRawCustomEvent & Timed + +export type UserID = RawUserID & Timed + +export type UserAnonymousID = RawUserAnonymousID & Timed + +export type Metadata = RawMetadata & Timed + +export type CssInsertRule = RawCssInsertRule & Timed + +export type CssDeleteRule = RawCssDeleteRule & Timed + +export type Fetch = RawFetch & Timed + +export type Profiler = RawProfiler & Timed + +export type OTable = RawOTable & Timed + +export type StateAction = RawStateAction & Timed + +export type Redux = RawRedux & Timed + +export type Vuex = RawVuex & Timed + +export type MobX = RawMobX & Timed + +export type NgRx = RawNgRx & Timed + +export type GraphQl = RawGraphQl & Timed + +export type PerformanceTrack = RawPerformanceTrack & Timed + +export type ResourceTiming = RawResourceTiming & Timed + +export type ConnectionInformation = RawConnectionInformation & Timed + +export type SetPageVisibility = RawSetPageVisibility & Timed + +export type LongTask = RawLongTask & Timed + +export type SetNodeAttributeURLBased = RawSetNodeAttributeURLBased & Timed + +export type SetCssDataURLBased = RawSetCssDataURLBased & Timed + +export type TechnicalInfo = RawTechnicalInfo & Timed + +export type CustomIssue = RawCustomIssue & Timed + +export type PageClose = RawPageClose & Timed + +export type CssInsertRuleURLBased = RawCssInsertRuleURLBased & Timed + +export type MouseClick = RawMouseClick & Timed + +export type CreateIFrameDocument = RawCreateIFrameDocument & Timed + +export type IosSessionStart = RawIosSessionStart & Timed + +export type IosCustomEvent = RawIosCustomEvent & Timed + +export type IosScreenChanges = RawIosScreenChanges & Timed + +export type IosClickEvent = RawIosClickEvent & Timed + +export type IosPerformanceEvent = RawIosPerformanceEvent & Timed + +export type IosLog = RawIosLog & Timed + +export type IosNetworkCall = RawIosNetworkCall & Timed diff --git a/frontend/app/player/MessageDistributor/messages/raw.ts b/frontend/app/player/MessageDistributor/messages/raw.ts new file mode 100644 index 000000000..e86181b2d --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/raw.ts @@ -0,0 +1,491 @@ +// Auto-generated, do not edit + +export const TP_MAP = { + 80: "batch_meta", + 0: "timestamp", + 2: "session_disconnect", + 4: "set_page_location", + 5: "set_viewport_size", + 6: "set_viewport_scroll", + 7: "create_document", + 8: "create_element_node", + 9: "create_text_node", + 10: "move_node", + 11: "remove_node", + 12: "set_node_attribute", + 13: "remove_node_attribute", + 14: "set_node_data", + 15: "set_css_data", + 16: "set_node_scroll", + 17: "set_input_target", + 18: "set_input_value", + 19: "set_input_checked", + 20: "mouse_move", + 22: "console_log", + 23: "page_load_timing", + 24: "page_render_timing", + 25: "js_exception", + 27: "raw_custom_event", + 28: "user_id", + 29: "user_anonymous_id", + 30: "metadata", + 37: "css_insert_rule", + 38: "css_delete_rule", + 39: "fetch", + 40: "profiler", + 41: "o_table", + 42: "state_action", + 44: "redux", + 45: "vuex", + 46: "mob_x", + 47: "ng_rx", + 48: "graph_ql", + 49: "performance_track", + 53: "resource_timing", + 54: "connection_information", + 55: "set_page_visibility", + 59: "long_task", + 60: "set_node_attribute_url_based", + 61: "set_css_data_url_based", + 63: "technical_info", + 64: "custom_issue", + 65: "page_close", + 67: "css_insert_rule_url_based", + 69: "mouse_click", + 70: "create_i_frame_document", + 90: "ios_session_start", + 93: "ios_custom_event", + 96: "ios_screen_changes", + 100: "ios_click_event", + 102: "ios_performance_event", + 103: "ios_log", + 105: "ios_network_call", +} + + +export interface RawBatchMeta { + tp: "batch_meta", + pageNo: number, + firstIndex: number, + timestamp: number, +} + +export interface RawTimestamp { + tp: "timestamp", + timestamp: number, +} + +export interface RawSessionDisconnect { + tp: "session_disconnect", + timestamp: number, +} + +export interface RawSetPageLocation { + tp: "set_page_location", + url: string, + referrer: string, + navigationStart: number, +} + +export interface RawSetViewportSize { + tp: "set_viewport_size", + width: number, + height: number, +} + +export interface RawSetViewportScroll { + tp: "set_viewport_scroll", + x: number, + y: number, +} + +export interface RawCreateDocument { + tp: "create_document", + +} + +export interface RawCreateElementNode { + tp: "create_element_node", + id: number, + parentID: number, + index: number, + tag: string, + svg: boolean, +} + +export interface RawCreateTextNode { + tp: "create_text_node", + id: number, + parentID: number, + index: number, +} + +export interface RawMoveNode { + tp: "move_node", + id: number, + parentID: number, + index: number, +} + +export interface RawRemoveNode { + tp: "remove_node", + id: number, +} + +export interface RawSetNodeAttribute { + tp: "set_node_attribute", + id: number, + name: string, + value: string, +} + +export interface RawRemoveNodeAttribute { + tp: "remove_node_attribute", + id: number, + name: string, +} + +export interface RawSetNodeData { + tp: "set_node_data", + id: number, + data: string, +} + +export interface RawSetCssData { + tp: "set_css_data", + id: number, + data: string, +} + +export interface RawSetNodeScroll { + tp: "set_node_scroll", + id: number, + x: number, + y: number, +} + +export interface RawSetInputTarget { + tp: "set_input_target", + id: number, + label: string, +} + +export interface RawSetInputValue { + tp: "set_input_value", + id: number, + value: string, + mask: number, +} + +export interface RawSetInputChecked { + tp: "set_input_checked", + id: number, + checked: boolean, +} + +export interface RawMouseMove { + tp: "mouse_move", + x: number, + y: number, +} + +export interface RawConsoleLog { + tp: "console_log", + level: string, + value: string, +} + +export interface RawPageLoadTiming { + tp: "page_load_timing", + requestStart: number, + responseStart: number, + responseEnd: number, + domContentLoadedEventStart: number, + domContentLoadedEventEnd: number, + loadEventStart: number, + loadEventEnd: number, + firstPaint: number, + firstContentfulPaint: number, +} + +export interface RawPageRenderTiming { + tp: "page_render_timing", + speedIndex: number, + visuallyComplete: number, + timeToInteractive: number, +} + +export interface RawJsException { + tp: "js_exception", + name: string, + message: string, + payload: string, +} + +export interface RawRawCustomEvent { + tp: "raw_custom_event", + name: string, + payload: string, +} + +export interface RawUserID { + tp: "user_id", + id: string, +} + +export interface RawUserAnonymousID { + tp: "user_anonymous_id", + id: string, +} + +export interface RawMetadata { + tp: "metadata", + key: string, + value: string, +} + +export interface RawCssInsertRule { + tp: "css_insert_rule", + id: number, + rule: string, + index: number, +} + +export interface RawCssDeleteRule { + tp: "css_delete_rule", + id: number, + index: number, +} + +export interface RawFetch { + tp: "fetch", + method: string, + url: string, + request: string, + response: string, + status: number, + timestamp: number, + duration: number, +} + +export interface RawProfiler { + tp: "profiler", + name: string, + duration: number, + args: string, + result: string, +} + +export interface RawOTable { + tp: "o_table", + key: string, + value: string, +} + +export interface RawStateAction { + tp: "state_action", + type: string, +} + +export interface RawRedux { + tp: "redux", + action: string, + state: string, + duration: number, +} + +export interface RawVuex { + tp: "vuex", + mutation: string, + state: string, +} + +export interface RawMobX { + tp: "mob_x", + type: string, + payload: string, +} + +export interface RawNgRx { + tp: "ng_rx", + action: string, + state: string, + duration: number, +} + +export interface RawGraphQl { + tp: "graph_ql", + operationKind: string, + operationName: string, + variables: string, + response: string, +} + +export interface RawPerformanceTrack { + tp: "performance_track", + frames: number, + ticks: number, + totalJSHeapSize: number, + usedJSHeapSize: number, +} + +export interface RawResourceTiming { + tp: "resource_timing", + timestamp: number, + duration: number, + ttfb: number, + headerSize: number, + encodedBodySize: number, + decodedBodySize: number, + url: string, + initiator: string, +} + +export interface RawConnectionInformation { + tp: "connection_information", + downlink: number, + type: string, +} + +export interface RawSetPageVisibility { + tp: "set_page_visibility", + hidden: boolean, +} + +export interface RawLongTask { + tp: "long_task", + timestamp: number, + duration: number, + context: number, + containerType: number, + containerSrc: string, + containerId: string, + containerName: string, +} + +export interface RawSetNodeAttributeURLBased { + tp: "set_node_attribute_url_based", + id: number, + name: string, + value: string, + baseURL: string, +} + +export interface RawSetCssDataURLBased { + tp: "set_css_data_url_based", + id: number, + data: string, + baseURL: string, +} + +export interface RawTechnicalInfo { + tp: "technical_info", + type: string, + value: string, +} + +export interface RawCustomIssue { + tp: "custom_issue", + name: string, + payload: string, +} + +export interface RawPageClose { + tp: "page_close", + +} + +export interface RawCssInsertRuleURLBased { + tp: "css_insert_rule_url_based", + id: number, + rule: string, + index: number, + baseURL: string, +} + +export interface RawMouseClick { + tp: "mouse_click", + id: number, + hesitationTime: number, + label: string, + selector: string, +} + +export interface RawCreateIFrameDocument { + tp: "create_i_frame_document", + frameID: number, + id: number, +} + +export interface RawIosSessionStart { + tp: "ios_session_start", + timestamp: number, + projectID: number, + trackerVersion: string, + revID: string, + userUUID: string, + userOS: string, + userOSVersion: string, + userDevice: string, + userDeviceType: string, + userCountry: string, +} + +export interface RawIosCustomEvent { + tp: "ios_custom_event", + timestamp: number, + length: number, + name: string, + payload: string, +} + +export interface RawIosScreenChanges { + tp: "ios_screen_changes", + timestamp: number, + length: number, + x: number, + y: number, + width: number, + height: number, +} + +export interface RawIosClickEvent { + tp: "ios_click_event", + timestamp: number, + length: number, + label: string, + x: number, + y: number, +} + +export interface RawIosPerformanceEvent { + tp: "ios_performance_event", + timestamp: number, + length: number, + name: string, + value: number, +} + +export interface RawIosLog { + tp: "ios_log", + timestamp: number, + length: number, + severity: string, + content: string, +} + +export interface RawIosNetworkCall { + tp: "ios_network_call", + timestamp: number, + length: number, + duration: number, + headers: string, + body: string, + url: string, + success: boolean, + method: string, + status: number, +} + + +export type RawMessage = RawBatchMeta | RawTimestamp | RawSessionDisconnect | RawSetPageLocation | RawSetViewportSize | RawSetViewportScroll | RawCreateDocument | RawCreateElementNode | RawCreateTextNode | RawMoveNode | RawRemoveNode | RawSetNodeAttribute | RawRemoveNodeAttribute | RawSetNodeData | RawSetCssData | RawSetNodeScroll | RawSetInputTarget | RawSetInputValue | RawSetInputChecked | RawMouseMove | RawConsoleLog | RawPageLoadTiming | RawPageRenderTiming | RawJsException | RawRawCustomEvent | RawUserID | RawUserAnonymousID | RawMetadata | RawCssInsertRule | RawCssDeleteRule | RawFetch | RawProfiler | RawOTable | RawStateAction | RawRedux | RawVuex | RawMobX | RawNgRx | RawGraphQl | RawPerformanceTrack | RawResourceTiming | RawConnectionInformation | RawSetPageVisibility | RawLongTask | RawSetNodeAttributeURLBased | RawSetCssDataURLBased | RawTechnicalInfo | RawCustomIssue | RawPageClose | RawCssInsertRuleURLBased | RawMouseClick | RawCreateIFrameDocument | RawIosSessionStart | RawIosCustomEvent | RawIosScreenChanges | RawIosClickEvent | RawIosPerformanceEvent | RawIosLog | RawIosNetworkCall; diff --git a/frontend/app/player/MessageDistributor/messages/timed.ts b/frontend/app/player/MessageDistributor/messages/timed.ts new file mode 100644 index 000000000..2dd4cc707 --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/timed.ts @@ -0,0 +1 @@ +export interface Timed { readonly time: number }; diff --git a/frontend/app/player/MessageDistributor/messages/urlResolve.ts b/frontend/app/player/MessageDistributor/messages/urlResolve.ts new file mode 100644 index 000000000..b80ff4f9a --- /dev/null +++ b/frontend/app/player/MessageDistributor/messages/urlResolve.ts @@ -0,0 +1,57 @@ +export function resolveURL(baseURL: string, relURL: string): string { + if (relURL.startsWith('#') || relURL === "") { + return relURL; + } + return new URL(relURL, baseURL).toString(); +} + + +var match = /bar/.exec("foobar"); +const re1 = /url\(("[^"]*"|'[^']*'|[^)]*)\)/g +const re2 = /@import "(.*?)"/g +function cssUrlsIndex(css: string): Array<[number, number]> { + const idxs: Array<[number, number]> = []; + const i1 = css.matchAll(re1); + // @ts-ignore + for (let m of i1) { + // @ts-ignore + const s: number = m.index + m[0].indexOf(m[1]); + const e: number = s + m[1].length; + idxs.push([s, e]); + } + const i2 = css.matchAll(re2); + // @ts-ignore + for (let m of i2) { + // @ts-ignore + const s = m.index + m[0].indexOf(m[1]); + const e = s + m[1].length; + idxs.push([s, e]) + } + return idxs; +} +function unquote(str: string): [string, string] { + str = str.trim(); + if (str.length <= 2) { + return [str, ""] + } + if (str[0] == '"' && str[str.length-1] == '"') { + return [ str.substring(1, str.length-1), "\""]; + } + if (str[0] == '\'' && str[str.length-1] == '\'') { + return [ str.substring(1, str.length-1), "'" ]; + } + return [str, ""] +} +function rewriteCSSLinks(css: string, rewriter: (rawurl: string) => string): string { + for (let idx of cssUrlsIndex(css)) { + const f = idx[0] + const t = idx[1] + const [ rawurl, q ] = unquote(css.substring(f, t)); + css = css.substring(0,f) + q + rewriter(rawurl) + q + css.substring(t); + } + return css +} + +export function resolveCSS(baseURL: string, css: string): string { + return rewriteCSSLinks(css, rawurl => resolveURL(baseURL, rawurl)); +} \ No newline at end of file diff --git a/frontend/app/player/ios/Parser.ts b/frontend/app/player/ios/Parser.ts index f202e9306..15b750df6 100644 --- a/frontend/app/player/ios/Parser.ts +++ b/frontend/app/player/ios/Parser.ts @@ -1,12 +1,11 @@ -import readMessage from '../MessageDistributor/messages'; -import PrimitiveReader from '../MessageDistributor/PrimitiveReader'; +import RawMessageReader from '../MessageDistributor/messages/RawMessageReader'; export default class Parser { - private reader: PrimitiveReader + private reader: RawMessageReader private error: boolean = false constructor(byteArray) { - this.reader = new PrimitiveReader(byteArray) + this.reader = new RawMessageReader(byteArray) } parseEach(cb) { @@ -19,12 +18,12 @@ export default class Parser { } hasNext() { - return !this.error && this.reader.hasNext(); + return !this.error && this.reader.hasNextByte(); } next() { try { - return readMessage(this.reader) + return this.reader.readMessage() } catch(e) { console.warn(e) this.error = true diff --git a/frontend/env.js b/frontend/env.js index f7c49c874..10513c3b1 100644 --- a/frontend/env.js +++ b/frontend/env.js @@ -13,7 +13,7 @@ const oss = { ORIGIN: () => 'window.location.origin', API_EDP: () => 'window.location.origin + "/api"', ASSETS_HOST: () => 'window.location.origin + "/assets"', - VERSION: '1.3.6', + VERSION: '1.4.0', SOURCEMAP: true, MINIO_ENDPOINT: process.env.MINIO_ENDPOINT, MINIO_PORT: process.env.MINIO_PORT, diff --git a/scripts/helm/app/alerts.yaml b/scripts/helm/app/alerts.yaml index f992a7cee..59e6bc18b 100644 --- a/scripts/helm/app/alerts.yaml +++ b/scripts/helm/app/alerts.yaml @@ -22,7 +22,25 @@ resources: memory: 1Mi env: - ALERT_NOTIFICATION_STRING: http://chalice-openreplay.app.svc.cluster.local:8000/alerts/notifications - CLICKHOUSE_STRING: tcp://clickhouse.db.svc.cluster.local:9000/default - POSTGRES_STRING: postgres://postgres:asayerPostgres@postgresql.db.svc.cluster.local:5432 + pg_host: postgresql.db.svc.cluster.local + pg_port: 5432 + pg_dbname: postgres + pg_user: postgres + pg_password: asayerPostgres + EMAIL_HOST: '' + EMAIL_PORT: '587' + EMAIL_USER: '' + EMAIL_PASSWORD: '' + EMAIL_USE_TLS: 'true' + EMAIL_USE_SSL: 'false' + EMAIL_SSL_KEY: '' + EMAIL_SSL_CERT: '' + EMAIL_FROM: OpenReplay + SITE_URL: '' + S3_HOST: 'http://minio.db.svc.cluster.local:9000' + S3_KEY: minios3AccessKeyS3cr3t + S3_SECRET: m1n10s3CretK3yPassw0rd + AWS_DEFAULT_REGION: us-east-1 LICENSE_KEY: "" + PYTHONUNBUFFERED: '0' + version_number: '1.3.6' diff --git a/scripts/helm/db/bucket_policy.sh b/scripts/helm/db/bucket_policy.sh index 67e6adb77..65ea068e5 100644 --- a/scripts/helm/db/bucket_policy.sh +++ b/scripts/helm/db/bucket_policy.sh @@ -1,5 +1,5 @@ #!/bin/bash -buckets=("mobs" "sessions-assets" "static" "sourcemaps") +buckets=("mobs" "sessions-assets" "static" "sourcemaps" "sessions-mobile-assets") mc alias set minio http://localhost:9000 $1 $2 diff --git a/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql b/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql new file mode 100644 index 000000000..2f021d1ee --- /dev/null +++ b/scripts/helm/db/init_dbs/postgresql/1.4.0/1.4.0.sql @@ -0,0 +1,89 @@ +BEGIN; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0' +$$ LANGUAGE sql IMMUTABLE; + +CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); + +CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); + +CREATE INDEX IF NOT EXISTS errors_timestamp_idx ON events.errors (timestamp); +CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key); + +ALTER TABLE sessions + ADD COLUMN IF NOT EXISTS utm_source text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_medium text NULL DEFAULT NULL, + ADD COLUMN IF NOT EXISTS utm_campaign text NULL DEFAULT NULL; + +CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); +CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); +CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; + + +DROP INDEX IF EXISTS sessions_project_id_user_browser_idx1; +DROP INDEX IF EXISTS sessions_project_id_user_country_idx1; +ALTER INDEX IF EXISTS platform_idx RENAME TO sessions_platform_idx; +ALTER INDEX IF EXISTS events.resources_duration_idx RENAME TO resources_duration_durationgt0_idx; +DROP INDEX IF EXISTS projects_project_key_idx1; +CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id); + +CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id); +CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp); +CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); +CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; +CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; + +CREATE TABLE IF NOT EXISTS metrics +( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); +CREATE TABLE IF NOT EXISTS metric_series +( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp +); +CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id); + +CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id); + + +CREATE TABLE IF NOT EXISTS searches +( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False +); + +CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public); +CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id); +CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id); + +ALTER TABLE alerts + ADD COLUMN IF NOT EXISTS series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE; + +CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id); +UPDATE alerts +SET options=jsonb_set(options, '{change}', '"change"') +WHERE detection_method = 'change' + AND options -> 'change' ISNULL; +COMMIT; \ No newline at end of file diff --git a/scripts/helm/db/init_dbs/postgresql/init_schema.sql b/scripts/helm/db/init_dbs/postgresql/init_schema.sql index 80b2a9135..4607c2759 100644 --- a/scripts/helm/db/init_dbs/postgresql/init_schema.sql +++ b/scripts/helm/db/init_dbs/postgresql/init_schema.sql @@ -3,6 +3,12 @@ BEGIN; CREATE SCHEMA IF NOT EXISTS events_common; CREATE SCHEMA IF NOT EXISTS events; +CREATE OR REPLACE FUNCTION openreplay_version() + RETURNS text AS +$$ +SELECT 'v1.4.0' +$$ LANGUAGE sql IMMUTABLE; + -- --- accounts.sql --- CREATE OR REPLACE FUNCTION generate_api_key(length integer) RETURNS text AS @@ -108,7 +114,7 @@ $$ CREATE EXTENSION IF NOT EXISTS pgcrypto; -- --- accounts.sql --- - CREATE TABLE IF NOT EXISTS public.tenants + CREATE TABLE tenants ( tenant_id integer NOT NULL DEFAULT 1, user_id text NOT NULL DEFAULT generate_api_key(20), @@ -256,38 +262,13 @@ $$ }'::jsonb -- ?????? ); + CREATE INDEX projects_project_key_idx ON public.projects (project_key); CREATE TRIGGER on_insert_or_update AFTER INSERT OR UPDATE ON projects FOR EACH ROW EXECUTE PROCEDURE notify_project(); --- --- alerts.sql --- - - CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); - - CREATE TABLE alerts - ( - alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, - project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, - name text NOT NULL, - description text NULL DEFAULT NULL, - active boolean NOT NULL DEFAULT TRUE, - detection_method alert_detection_method NOT NULL, - query jsonb NOT NULL, - deleted_at timestamp NULL DEFAULT NULL, - created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), - options jsonb NOT NULL DEFAULT '{ - "renotifyInterval": 1440 - }'::jsonb - ); - - - CREATE TRIGGER on_insert_or_update_or_delete - AFTER INSERT OR UPDATE OR DELETE - ON alerts - FOR EACH ROW - EXECUTE PROCEDURE notify_alert(); -- --- webhooks.sql --- @@ -347,7 +328,8 @@ $$ is_public boolean NOT NULL DEFAULT False ); - CREATE INDEX ON public.funnels (user_id, is_public); + CREATE INDEX funnels_user_id_is_public_idx ON public.funnels (user_id, is_public); + CREATE INDEX funnels_project_id_idx ON public.funnels (project_id); -- --- announcements.sql --- @@ -431,7 +413,7 @@ $$ context_string text NOT NULL, context jsonb DEFAULT NULL ); - CREATE INDEX ON issues (issue_id, type); + CREATE INDEX issues_issue_id_type_idx ON issues (issue_id, type); CREATE INDEX issues_context_string_gin_idx ON public.issues USING GIN (context_string gin_trgm_ops); CREATE INDEX issues_project_id_idx ON issues (project_id); @@ -452,7 +434,7 @@ $$ stacktrace jsonb, --to save the stacktrace and not query S3 another time stacktrace_parsed_at timestamp ); - CREATE INDEX ON errors (project_id, source); + CREATE INDEX errors_project_id_source_idx ON errors (project_id, source); CREATE INDEX errors_message_gin_idx ON public.errors USING GIN (message gin_trgm_ops); CREATE INDEX errors_name_gin_idx ON public.errors USING GIN (name gin_trgm_ops); CREATE INDEX errors_project_id_idx ON public.errors (project_id); @@ -461,6 +443,7 @@ $$ CREATE INDEX errors_project_id_error_id_idx ON public.errors (project_id, error_id); CREATE INDEX errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception'; CREATE INDEX errors_error_id_idx ON errors (error_id); + CREATE INDEX errors_parent_error_id_idx ON errors (parent_error_id); CREATE TABLE user_favorite_errors ( @@ -513,6 +496,9 @@ $$ watchdogs_score bigint NOT NULL DEFAULT 0, issue_score bigint NOT NULL DEFAULT 0, issue_types issue_type[] NOT NULL DEFAULT '{}'::issue_type[], + utm_source text NULL DEFAULT NULL, + utm_medium text NULL DEFAULT NULL, + utm_campaign text NULL DEFAULT NULL, metadata_1 text DEFAULT NULL, metadata_2 text DEFAULT NULL, metadata_3 text DEFAULT NULL, @@ -523,28 +509,25 @@ $$ metadata_8 text DEFAULT NULL, metadata_9 text DEFAULT NULL, metadata_10 text DEFAULT NULL --- , --- rehydration_id integer REFERENCES rehydrations(rehydration_id) ON DELETE SET NULL ); - CREATE INDEX ON sessions (project_id, start_ts); - CREATE INDEX ON sessions (project_id, user_id); - CREATE INDEX ON sessions (project_id, user_anonymous_id); - CREATE INDEX ON sessions (project_id, user_device); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); - CREATE INDEX ON sessions (project_id, metadata_1); - CREATE INDEX ON sessions (project_id, metadata_2); - CREATE INDEX ON sessions (project_id, metadata_3); - CREATE INDEX ON sessions (project_id, metadata_4); - CREATE INDEX ON sessions (project_id, metadata_5); - CREATE INDEX ON sessions (project_id, metadata_6); - CREATE INDEX ON sessions (project_id, metadata_7); - CREATE INDEX ON sessions (project_id, metadata_8); - CREATE INDEX ON sessions (project_id, metadata_9); - CREATE INDEX ON sessions (project_id, metadata_10); --- CREATE INDEX ON sessions (rehydration_id); - CREATE INDEX ON sessions (project_id, watchdogs_score DESC); - CREATE INDEX platform_idx ON public.sessions (platform); + CREATE INDEX sessions_project_id_start_ts_idx ON sessions (project_id, start_ts); + CREATE INDEX sessions_project_id_user_id_idx ON sessions (project_id, user_id); + CREATE INDEX sessions_project_id_user_anonymous_id_idx ON sessions (project_id, user_anonymous_id); + CREATE INDEX sessions_project_id_user_device_idx ON sessions (project_id, user_device); + CREATE INDEX sessions_project_id_user_country_idx ON sessions (project_id, user_country); + CREATE INDEX sessions_project_id_user_browser_idx ON sessions (project_id, user_browser); + CREATE INDEX sessions_project_id_metadata_1_idx ON sessions (project_id, metadata_1); + CREATE INDEX sessions_project_id_metadata_2_idx ON sessions (project_id, metadata_2); + CREATE INDEX sessions_project_id_metadata_3_idx ON sessions (project_id, metadata_3); + CREATE INDEX sessions_project_id_metadata_4_idx ON sessions (project_id, metadata_4); + CREATE INDEX sessions_project_id_metadata_5_idx ON sessions (project_id, metadata_5); + CREATE INDEX sessions_project_id_metadata_6_idx ON sessions (project_id, metadata_6); + CREATE INDEX sessions_project_id_metadata_7_idx ON sessions (project_id, metadata_7); + CREATE INDEX sessions_project_id_metadata_8_idx ON sessions (project_id, metadata_8); + CREATE INDEX sessions_project_id_metadata_9_idx ON sessions (project_id, metadata_9); + CREATE INDEX sessions_project_id_metadata_10_idx ON sessions (project_id, metadata_10); + CREATE INDEX sessions_project_id_watchdogs_score_idx ON sessions (project_id, watchdogs_score DESC); + CREATE INDEX sessions_platform_idx ON public.sessions (platform); CREATE INDEX sessions_metadata1_gin_idx ON public.sessions USING GIN (metadata_1 gin_trgm_ops); CREATE INDEX sessions_metadata2_gin_idx ON public.sessions USING GIN (metadata_2 gin_trgm_ops); @@ -562,14 +545,15 @@ $$ CREATE INDEX sessions_user_id_gin_idx ON public.sessions USING GIN (user_id gin_trgm_ops); CREATE INDEX sessions_user_anonymous_id_gin_idx ON public.sessions USING GIN (user_anonymous_id gin_trgm_ops); CREATE INDEX sessions_user_country_gin_idx ON public.sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_country); - CREATE INDEX ON sessions (project_id, user_browser); CREATE INDEX sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0; CREATE INDEX sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0; CREATE INDEX sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0; CREATE INDEX sessions_session_id_project_id_start_ts_durationNN_idx ON sessions (session_id, project_id, start_ts) WHERE duration IS NOT NULL; CREATE INDEX sessions_user_id_useridNN_idx ON sessions (user_id) WHERE user_id IS NOT NULL; CREATE INDEX sessions_uid_projectid_startts_sessionid_uidNN_durGTZ_idx ON sessions (user_id, project_id, start_ts, session_id) WHERE user_id IS NOT NULL AND duration > 0; + CREATE INDEX sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops); + CREATE INDEX sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops); + CREATE INDEX sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops); ALTER TABLE public.sessions ADD CONSTRAINT web_browser_constraint CHECK ( @@ -598,7 +582,7 @@ $$ session_id bigint NOT NULL REFERENCES sessions (session_id) ON DELETE CASCADE, PRIMARY KEY (user_id, session_id) ); - + CREATE INDEX user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id); -- --- assignments.sql --- @@ -611,7 +595,7 @@ $$ created_at timestamp default timezone('utc'::text, now()) NOT NULL, provider_data jsonb default '{}'::jsonb NOT NULL ); - CREATE INDEX ON assigned_sessions (session_id); + CREATE INDEX assigned_sessions_session_id_idx ON assigned_sessions (session_id); -- --- events_common.sql --- @@ -629,9 +613,9 @@ $$ level events_common.custom_level NOT NULL DEFAULT 'info', PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.customs (name); + CREATE INDEX customs_name_idx ON events_common.customs (name); CREATE INDEX customs_name_gin_idx ON events_common.customs USING GIN (name gin_trgm_ops); - CREATE INDEX ON events_common.customs (timestamp); + CREATE INDEX customs_timestamp_idx ON events_common.customs (timestamp); CREATE TABLE events_common.issues @@ -657,10 +641,10 @@ $$ success boolean NOT NULL, PRIMARY KEY (session_id, timestamp, seq_index) ); - CREATE INDEX ON events_common.requests (url); - CREATE INDEX ON events_common.requests (duration); + CREATE INDEX requests_url_idx ON events_common.requests (url); + CREATE INDEX requests_duration_idx ON events_common.requests (duration); CREATE INDEX requests_url_gin_idx ON events_common.requests USING GIN (url gin_trgm_ops); - CREATE INDEX ON events_common.requests (timestamp); + CREATE INDEX requests_timestamp_idx ON events_common.requests (timestamp); CREATE INDEX requests_url_gin_idx2 ON events_common.requests USING GIN (RIGHT(url, length(url) - (CASE WHEN url LIKE 'http://%' THEN 7 @@ -668,7 +652,7 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); - + CREATE INDEX requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE; -- --- events.sql --- CREATE SCHEMA IF NOT EXISTS events; @@ -695,10 +679,11 @@ $$ ttfb integer DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.pages (session_id); + CREATE INDEX pages_session_id_idx ON events.pages (session_id); CREATE INDEX pages_base_path_gin_idx ON events.pages USING GIN (base_path gin_trgm_ops); CREATE INDEX pages_base_referrer_gin_idx ON events.pages USING GIN (base_referrer gin_trgm_ops); - CREATE INDEX ON events.pages (timestamp); + CREATE INDEX pages_timestamp_idx ON events.pages (timestamp); + CREATE INDEX pages_session_id_timestamp_idx ON events.pages (session_id, timestamp); CREATE INDEX pages_base_path_gin_idx2 ON events.pages USING GIN (RIGHT(base_path, length(base_path) - 1) gin_trgm_ops); CREATE INDEX pages_base_path_idx ON events.pages (base_path); CREATE INDEX pages_base_path_idx2 ON events.pages (RIGHT(base_path, length(base_path) - 1)); @@ -711,8 +696,8 @@ $$ THEN 8 ELSE 0 END)) gin_trgm_ops); - CREATE INDEX ON events.pages (response_time); - CREATE INDEX ON events.pages (response_end); + CREATE INDEX pages_response_time_idx ON events.pages (response_time); + CREATE INDEX pages_response_end_idx ON events.pages (response_end); CREATE INDEX pages_path_gin_idx ON events.pages USING GIN (path gin_trgm_ops); CREATE INDEX pages_path_idx ON events.pages (path); CREATE INDEX pages_visually_complete_idx ON events.pages (visually_complete) WHERE visually_complete > 0; @@ -746,10 +731,10 @@ $$ selector text DEFAULT '' NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.clicks (session_id); - CREATE INDEX ON events.clicks (label); + CREATE INDEX clicks_session_id_idx ON events.clicks (session_id); + CREATE INDEX clicks_label_idx ON events.clicks (label); CREATE INDEX clicks_label_gin_idx ON events.clicks USING GIN (label gin_trgm_ops); - CREATE INDEX ON events.clicks (timestamp); + CREATE INDEX clicks_timestamp_idx ON events.clicks (timestamp); CREATE INDEX clicks_label_session_id_timestamp_idx ON events.clicks (label, session_id, timestamp); CREATE INDEX clicks_url_idx ON events.clicks (url); CREATE INDEX clicks_url_gin_idx ON events.clicks USING GIN (url gin_trgm_ops); @@ -766,11 +751,11 @@ $$ value text DEFAULT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.inputs (session_id); - CREATE INDEX ON events.inputs (label, value); + CREATE INDEX inputs_session_id_idx ON events.inputs (session_id); + CREATE INDEX inputs_label_value_idx ON events.inputs (label, value); CREATE INDEX inputs_label_gin_idx ON events.inputs USING GIN (label gin_trgm_ops); CREATE INDEX inputs_label_idx ON events.inputs (label); - CREATE INDEX ON events.inputs (timestamp); + CREATE INDEX inputs_timestamp_idx ON events.inputs (timestamp); CREATE INDEX inputs_label_session_id_timestamp_idx ON events.inputs (label, session_id, timestamp); CREATE TABLE events.errors @@ -781,7 +766,8 @@ $$ error_id text NOT NULL REFERENCES errors (error_id) ON DELETE CASCADE, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.errors (session_id); + CREATE INDEX errors_session_id_idx ON events.errors (session_id); + CREATE INDEX errors_timestamp_idx ON events.errors (timestamp); CREATE INDEX errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id); CREATE INDEX errors_error_id_timestamp_idx ON events.errors (error_id, timestamp); CREATE INDEX errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id); @@ -796,9 +782,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.graphql (name); + CREATE INDEX graphql_name_idx ON events.graphql (name); CREATE INDEX graphql_name_gin_idx ON events.graphql USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.graphql (timestamp); + CREATE INDEX graphql_timestamp_idx ON events.graphql (timestamp); CREATE TABLE events.state_actions ( @@ -808,9 +794,9 @@ $$ name text NOT NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.state_actions (name); + CREATE INDEX state_actions_name_idx ON events.state_actions (name); CREATE INDEX state_actions_name_gin_idx ON events.state_actions USING GIN (name gin_trgm_ops); - CREATE INDEX ON events.state_actions (timestamp); + CREATE INDEX state_actions_timestamp_idx ON events.state_actions (timestamp); CREATE TYPE events.resource_type AS ENUM ('other', 'script', 'stylesheet', 'fetch', 'img', 'media'); CREATE TYPE events.resource_method AS ENUM ('GET' , 'HEAD' , 'POST' , 'PUT' , 'DELETE' , 'CONNECT' , 'OPTIONS' , 'TRACE' , 'PATCH' ); @@ -833,11 +819,13 @@ $$ decoded_body_size integer NULL, PRIMARY KEY (session_id, message_id) ); - CREATE INDEX ON events.resources (session_id); - CREATE INDEX ON events.resources (status); - CREATE INDEX ON events.resources (type); - CREATE INDEX ON events.resources (duration) WHERE duration > 0; - CREATE INDEX ON events.resources (url_host); + CREATE INDEX resources_session_id_idx ON events.resources (session_id); + CREATE INDEX resources_status_idx ON events.resources (status); + CREATE INDEX resources_type_idx ON events.resources (type); + CREATE INDEX resources_duration_durationgt0_idx ON events.resources (duration) WHERE duration > 0; + CREATE INDEX resources_url_host_idx ON events.resources (url_host); + CREATE INDEX resources_timestamp_idx ON events.resources (timestamp); + CREATE INDEX resources_success_idx ON events.resources (success); CREATE INDEX resources_url_gin_idx ON events.resources USING GIN (url gin_trgm_ops); CREATE INDEX resources_url_idx ON events.resources (url); @@ -871,6 +859,11 @@ $$ max_used_js_heap_size bigint NOT NULL, PRIMARY KEY (session_id, message_id) ); + CREATE INDEX performance_session_id_idx ON events.performance (session_id); + CREATE INDEX performance_timestamp_idx ON events.performance (timestamp); + CREATE INDEX performance_session_id_timestamp_idx ON events.performance (session_id, timestamp); + CREATE INDEX performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0; + CREATE INDEX performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0; -- --- autocomplete.sql --- @@ -903,10 +896,74 @@ $$ start_at timestamp NOT NULL, errors text NULL ); - CREATE INDEX ON jobs (status); - CREATE INDEX ON jobs (start_at); + CREATE INDEX jobs_status_idx ON jobs (status); + CREATE INDEX jobs_start_at_idx ON jobs (start_at); CREATE INDEX jobs_project_id_idx ON jobs (project_id); + CREATE TABLE metrics + ( + metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer REFERENCES users (user_id) ON DELETE SET NULL, + name text NOT NULL, + is_public boolean NOT NULL DEFAULT FALSE, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp + ); + CREATE INDEX metrics_user_id_is_public_idx ON public.metrics (user_id, is_public); + CREATE TABLE metric_series + ( + series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE, + index integer NOT NULL, + name text NULL, + filter jsonb NOT NULL, + created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL, + deleted_at timestamp + ); + CREATE INDEX metric_series_metric_id_idx ON public.metric_series (metric_id); + + + CREATE TABLE searches + ( + search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE, + name text not null, + filter jsonb not null, + created_at timestamp default timezone('utc'::text, now()) not null, + deleted_at timestamp, + is_public boolean NOT NULL DEFAULT False + ); + + CREATE INDEX searches_user_id_is_public_idx ON public.searches (user_id, is_public); + CREATE INDEX searches_project_id_idx ON public.searches (project_id); + + CREATE TYPE alert_detection_method AS ENUM ('threshold', 'change'); + + CREATE TABLE alerts + ( + alert_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY, + project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE, + series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE, + name text NOT NULL, + description text NULL DEFAULT NULL, + active boolean NOT NULL DEFAULT TRUE, + detection_method alert_detection_method NOT NULL, + query jsonb NOT NULL, + deleted_at timestamp NULL DEFAULT NULL, + created_at timestamp NOT NULL DEFAULT timezone('utc'::text, now()), + options jsonb NOT NULL DEFAULT '{ + "renotifyInterval": 1440 + }'::jsonb + ); + CREATE INDEX alerts_project_id_idx ON alerts (project_id); + CREATE INDEX alerts_series_id_idx ON alerts (series_id); + CREATE TRIGGER on_insert_or_update_or_delete + AFTER INSERT OR UPDATE OR DELETE + ON alerts + FOR EACH ROW + EXECUTE PROCEDURE notify_alert(); raise notice 'DB created'; END IF; diff --git a/scripts/helm/helmcharts/README.md b/scripts/helm/helmcharts/README.md new file mode 100644 index 000000000..ee63b1567 --- /dev/null +++ b/scripts/helm/helmcharts/README.md @@ -0,0 +1,14 @@ +- Initialize databases + - we've to pass the --wait flag, else the db installation won't be complete. and it'll break the db init. + - collate all dbs required + - How to distinguish b/w enterprise and community + - Or fist only community then enterprise +- install db migration + - have to have another helm chart with low hook value for higher prioriry +- install app + - customize values.yaml file + + +## Installation +helm upgrade --install databases ./databases -n db --create-namespace --wait -f ./values.yaml --atomic +helm upgrade --install openreplay ./openreplay -n app --create-namespace --wait -f ./values.yaml --atomic diff --git a/scripts/helm/helmcharts/databases/.helmignore b/scripts/helm/helmcharts/databases/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/databases/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/databases/Chart.yaml b/scripts/helm/helmcharts/databases/Chart.yaml new file mode 100644 index 000000000..0aa62594a --- /dev/null +++ b/scripts/helm/helmcharts/databases/Chart.yaml @@ -0,0 +1,42 @@ +apiVersion: v2 +name: databases +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +appVersion: "1.16.0" + +dependencies: + - name: kafka + repository: file://charts/kafka + version: 11.8.6 + condition: kafka.enabled + - name: clickhouse + repository: file://charts/clickhouse + version: 1.16.0 + condition: clickhouse.enabled + - name: postgresql + repository: file://charts/postgresql + version: 9.8.2 + condition: postgresql.enabled + - name: redis + repository: file://charts/redis + version: 12.10.1 + condition: redis.enabled diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/.helmignore b/scripts/helm/helmcharts/databases/charts/clickhouse/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/Chart.yaml b/scripts/helm/helmcharts/databases/charts/clickhouse/Chart.yaml new file mode 100644 index 000000000..c7a0eb3d6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/Chart.yaml @@ -0,0 +1,23 @@ +apiVersion: v2 +name: clickhouse +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +appVersion: 1.16.0 diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl new file mode 100644 index 000000000..44cfadff0 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "clickhouse.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "clickhouse.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "clickhouse.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "clickhouse.labels" -}} +helm.sh/chart: {{ include "clickhouse.chart" . }} +{{ include "clickhouse.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "clickhouse.selectorLabels" -}} +app.kubernetes.io/name: {{ include "clickhouse.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "clickhouse.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "clickhouse.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/templates/service.yaml b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/service.yaml new file mode 100644 index 000000000..4496f556c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: clickhouse + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + type: ClusterIP + ports: + - port: {{ .Values.service.webPort }} + targetPort: web + protocol: TCP + name: web + - port: {{ .Values.service.dataPort }} + targetPort: data + protocol: TCP + name: data + selector: + {{- include "clickhouse.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml new file mode 100644 index 000000000..1f1183598 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "clickhouse.serviceAccountName" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml new file mode 100644 index 000000000..392976eec --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/templates/statefulset.yaml @@ -0,0 +1,69 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "clickhouse.fullname" . }} + labels: + {{- include "clickhouse.labels" . | nindent 4 }} +spec: + replicas: {{ .Values.replicaCount }} + serviceName: {{ include "clickhouse.fullname" . }} + selector: + matchLabels: + {{- include "clickhouse.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "clickhouse.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "clickhouse.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + env: + {{- range $key, $value := .Values.env }} + - name: "{{ $key }}" + value: "{{ $value }}" + {{- end }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - containerPort: 9000 + name: web + - containerPort: 8123 + name: data + volumeMounts: + - name: ch-volume + mountPath: /var/lib/mydata + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} + volumeClaimTemplates: + - metadata: + name: ch-volume + spec: + accessModes: [ "ReadWriteOnce" ] + resources: + requests: + storage: {{ .Values.storageSize }} diff --git a/scripts/helm/helmcharts/databases/charts/clickhouse/values.yaml b/scripts/helm/helmcharts/databases/charts/clickhouse/values.yaml new file mode 100644 index 000000000..4cba1c1f8 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/clickhouse/values.yaml @@ -0,0 +1,62 @@ +# Default values for clickhouse. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: yandex/clickhouse-server + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "20.9" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +env: {} + +service: + webPort: 9000 + dataPort: 8123 + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +nodeSelector: {} + +tolerations: [] + +affinity: {} +storageSize: 8G diff --git a/scripts/helm/helmcharts/databases/charts/kafka/.helmignore b/scripts/helm/helmcharts/databases/charts/kafka/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helm/helmcharts/databases/charts/kafka/Chart.yaml b/scripts/helm/helmcharts/databases/charts/kafka/Chart.yaml new file mode 100755 index 000000000..165e70d55 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2.6.0 +description: Apache Kafka is a distributed streaming platform. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/kafka +icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png +keywords: +- kafka +- zookeeper +- streaming +- producer +- consumer +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: kafka +sources: +- https://github.com/bitnami/bitnami-docker-kafka +- https://kafka.apache.org/ +version: 11.8.6 diff --git a/scripts/helm/helmcharts/databases/charts/kafka/README.md b/scripts/helm/helmcharts/databases/charts/kafka/README.md new file mode 100755 index 000000000..5584bd43d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/README.md @@ -0,0 +1,737 @@ +# Kafka + +[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies. + +## TL;DR + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +## Introduction + +This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +helm repo add bitnami https://charts.bitnami.com/bitnami +helm install my-release bitnami/kafka +``` + +These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the Kafka chart and their default values per section/component: + +### Global parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override kafka.fullname | `nil` | +| `fullnameOverride` | String to fully override kafka.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) | + +### Kafka parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | Kafka image registry | `docker.io` | +| `image.repository` | Kafka image name | `bitnami/kafka` | +| `image.tag` | Kafka image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Set to true if you would like to see extra information on logs | `false` | +| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` | +| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` | +| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` | +| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` | +| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` | +| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` | +| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` | +| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` | +| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` | +| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` | +| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` | +| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` | +| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` | +| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` | +| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` | +| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` | +| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` | +| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` | +| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` | +| `numIoThreads` | The number of threads doing disk I/O | `8` | +| `numNetworkThreads` | The number of threads handling network requests | `3` | +| `numPartitions` | The default number of log partitions per topic | `1` | +| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` | +| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` | +| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` | +| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` | +| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` | +| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` | +| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` | +| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` | +| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` | +| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` | +| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` | +| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` | +| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` | +| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` | +| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` | +| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` | +| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` | +| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` | +| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` | +| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` | +| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` | +| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` | +| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` | +| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` | +| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` | +| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of Kafka nodes | `1` | +| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `podSecurityContext` | Kafka pods' Security Context | `{}` | +| `containerSecurityContext` | Kafka containers' Security Context | `{}` | +| `resources.limits` | The resources limits for Kafka containers | `{}` | +| `resources.requests` | The requested resources for Kafka containers | `{}` | +| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` | +| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` | +| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` | +| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` | +| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` | +| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` | +| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` | +| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) | +| `args` | Override kafka container arguments | `[]` (evaluated as a template) | +| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | Kafka port for client connections | `9092` | +| `service.internalPort` | Kafka port for inter-broker connections | `9093` | +| `service.externalPort` | Kafka port for external connections | `9094` | +| `service.nodePorts.client` | Nodeport for client connections | `""` | +| `service.nodePorts.external` | Nodeport for external connections | `""` | +| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` | +| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `service.annotations` | Service annotations | `{}`(evaluated as a template) | +| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` | +| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` | +| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` | +| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` | +| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` | +| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` | +| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` | +| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` | +| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` | +| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` | +| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` | +| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` | +| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` | +| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` | +| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` | +| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) | + +### RBAC parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` | +| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template | +| `rbac.create` | Weather to create & use RBAC resources or not | `false` | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` | +| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` | + +### Metrics parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` | +| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` | +| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` | +| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` | +| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` | +| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` | +| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` | +| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` | +| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` | +| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` | +| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` | +| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` | +| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` | +| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` | +| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` | +| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` | +| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` | +| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` | +| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` | +| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` | +| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` | +| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` | +| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) | +| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) | +| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` | +| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` | +| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +helm install my-release \ + --set replicaCount=3 \ + bitnami/kafka +``` + +The above command deploys Kafka with 3 brokers (replicas). + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +helm install my-release -f values.yaml bitnami/kafka +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of Kafka nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Allow to use the PLAINTEXT listener: + +```diff +- allowPlaintextListener: true ++ allowPlaintextListener: false +``` + +- Default replication factors for automatically created topics: + +```diff +- defaultReplicationFactor: 1 ++ defaultReplicationFactor: 3 +``` + +- Allow auto creation of topics. + +```diff +- autoCreateTopicsEnable: true ++ autoCreateTopicsEnable: false +``` + +- The replication factor for the offsets topic: + +```diff +- offsetsTopicReplicationFactor: 1 ++ offsetsTopicReplicationFactor: 3 +``` + +- The replication factor for the transaction topic: + +```diff +- transactionStateLogReplicationFactor: 1 ++ transactionStateLogReplicationFactor: 3 +``` + +- Overridden min.insync.replicas config for the transaction topic: + +```diff +- transactionStateLogMinIsr: 1 ++ transactionStateLogMinIsr: 3 +``` + +- Switch to enable the Kafka SASAL authentication on client and inter-broker communications: + +```diff +- auth.clientProtocol: plaintext ++ auth.clientProtocol: sasl +- auth.interBrokerProtocol: plaintext ++ auth.interBrokerProtocol: sasl +``` + +- Enable Zookeeper authentication: + +```diff ++ auth.jaas.zookeeperUser: zookeeperUser ++ auth.jaas.zookeeperPassword: zookeeperPassword +- zookeeper.auth.enabled: false ++ zookeeper.auth.enabled: true ++ zookeeper.auth.clientUser: zookeeperUser ++ zookeeper.auth.clientPassword: zookeeperPassword ++ zookeeper.auth.serverUsers: zookeeperUser ++ zookeeper.auth.serverPasswords: zookeeperPassword +``` + +- Enable Pod Disruption Budget: + +```diff +- pdb.create: false ++ pdb.create: true +``` + +- Create a separate Kafka metrics exporter: + +```diff +- metrics.kafka.enabled: false ++ metrics.kafka.enabled: true +``` + +- Expose JMX metrics to Prometheus: + +```diff +- metrics.jmx.enabled: false ++ metrics.jmx.enabled: true +``` + +- Enable Zookeeper metrics: + +```diff ++ zookeeper.metrics.enabled: true +``` + +To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas. + +### Setting custom parameters + +Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property. + +### Listeners configuration + +This chart allows you to automatically configure Kafka with 3 listeners: + +- One for inter-broker communications. +- A second one for communications with clients within the K8s cluster. +- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information. + +For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed. + +### Enable security for Kafka and Zookeeper + +You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide: + +| Method | Authentication | Encryption via TLS | +|-----------|-------------------------------|--------------------| +| plaintext | None | No | +| tls | None | Yes | +| mtls | Yes (two-way authentication) | Yes | +| sasl | Yes (via SASL) | No | +| sasl_tls | Yes (via SASL) | Yes | + +If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below: + +- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients. +- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications. +- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled. + +In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart. + +> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password. + +For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret: + +```console +kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks +``` + +> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation. + +As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it. + +You can deploy the chart with authentication using the following parameters: + +```console +replicaCount=2 +auth.clientProtocol=sasl +auth.interBrokerProtocol=tls +auth.certificatesSecret=kafka-jks +auth.certificatesPassword=jksPassword +auth.jaas.clientUsers[0]=brokerUser +auth.jaas.clientPassword[0]=brokerPassword +auth.jaas.zookeeperUser=zookeeperUser +auth.jaas.zookeeperPassword=zookeeperPassword +zookeeper.auth.enabled=true +zookeeper.auth.serverUsers=zookeeperUser +zookeeper.auth.serverPasswords=zookeeperPassword +zookeeper.auth.clientUser=zookeeperUser +zookeeper.auth.clientPassword=zookeeperPassword +``` + +If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags: + +```console +metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""} +``` + +### Accessing Kafka brokers from outside the cluster + +In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created. + +There are two ways of configuring external access. Using LoadBalancer services or using NodePort services. + +#### Using LoadBalancer services + +You have two alternatives to use LoadBalancer services: + +- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the load balancer IPs: + +```console +externalAccess.enabled=true +externalAccess.service.type=LoadBalancer +externalAccess.service.port=9094 +externalAccess.service.loadBalancerIPs[0]='external-ip-1' +externalAccess.service.loadBalancerIPs[1]='external-ip-2'} +``` + +Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it. + +#### Using NodePort services + +You have two alternatives to use NodePort services: + +- Option A) Use random node ports using an **initContainer** that discover them automatically. + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.autoDiscovery.enabled=true +serviceAccount.create=true +rbac.create=true +``` + +Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled. + +- Option B) Manually specify the node ports: + +```console +externalAccess.enabled=true +externalAccess.service.type=NodePort +externalAccess.serivce.nodePorts[0]='node-port-1' +externalAccess.serivce.nodePorts[1]='node-port-2' +``` + +Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it. + +The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided. + +Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections. + +### Sidecars + +If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Deploying extra resources + +There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB: + +```yaml +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: |- + - apiVersion: apps/v1 + kind: Deployment + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }} + app.kubernetes.io/component: connector + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 10 }} + app.kubernetes.io/component: connector + spec: + containers: + - name: connect + image: KAFKA-CONNECT-IMAGE + imagePullPolicy: IfNotPresent + ports: + - name: connector + containerPort: 8083 + volumeMounts: + - name: configuration + mountPath: /opt/bitnami/kafka/config + volumes: + - name: configuration + configMap: + name: {{ include "kafka.fullname" . }}-connect + - apiVersion: v1 + kind: ConfigMap + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + data: + connect-standalone.properties: |- + bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }} + ... + mongodb.properties: |- + connection.uri=mongodb://root:password@mongodb-hostname:27017 + ... + - apiVersion: v1 + kind: Service + metadata: + name: {{ include "kafka.fullname" . }}-connect + labels: {{- include "kafka.labels" . | nindent 6 }} + app.kubernetes.io/component: connector + spec: + ports: + - protocol: TCP + port: 8083 + targetPort: connector + selector: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: connector +``` + +You can create the Kafka Connect image using the Dockerfile below: + +```Dockerfile +FROM bitnami/kafka:latest +# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb +RUN mkdir -p /opt/bitnami/kafka/plugins && \ + cd /opt/bitnami/kafka/plugins && \ + curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar +CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties +``` + +## Persistence + +The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +## Upgrading + +### To 11.8.0 + +External access to brokers can now be archived through the cluster's Kafka service. + +- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external` + +### To 11.7.0 + +The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords. + +- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array). +- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array). + +### To 11.0.0 + +The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information. + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version: + +- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters. +- `auth.certificatesSecret` -> renamed to `auth.jksSecret`. +- `auth.certificatesPassword` -> renamed to `auth.jksPassword`. +- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`. +- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser` +- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword` +- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser` +- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword` +- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret` +- `service.sslPort` -> deprecated in favor of `service.internalPort` +- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort` +- `metrics.kafka.extraFlag` -> new parameter +- `metrics.kafka.certificatesSecret` -> new parameter + +### To 10.0.0 + +If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later. + +### To 9.0.0 + +Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version: + +```diff +- securityContext.enabled +- securityContext.fsGroup +- securityContext.fsGroup ++ podSecurityContext +- externalAccess.service.loadBalancerIP ++ externalAccess.service.loadBalancerIPs +- externalAccess.service.nodePort ++ externalAccess.service.nodePorts +- metrics.jmx.configMap.enabled +- metrics.jmx.configMap.overrideConfig ++ metrics.jmx.config +- metrics.jmx.configMap.overrideName ++ metrics.jmx.existingConfigmap +``` + +Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/). + +### To 8.0.0 + +There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028). + +### To 7.0.0 + +Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments. +Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka: + +```console +helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false +helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true +``` + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka: + +```console +kubectl delete statefulset kafka-kafka --cascade=false +kubectl delete statefulset kafka-zookeeper --cascade=false +``` diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml new file mode 100755 index 000000000..c3b15dc5c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/Chart.yaml @@ -0,0 +1,20 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 3.6.2 +description: A centralized service for maintaining configuration information, naming, + providing distributed synchronization, and providing group services for distributed + applications. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper +icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png +keywords: +- zookeeper +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: zookeeper +sources: +- https://github.com/bitnami/bitnami-docker-zookeeper +- https://zookeeper.apache.org/ +version: 5.21.9 diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/README.md b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/README.md new file mode 100755 index 000000000..0291875ed --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/README.md @@ -0,0 +1,297 @@ +# ZooKeeper + +[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications. + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +## Introduction + +This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/zookeeper +``` + +These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component: + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | + +### Common parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `nameOverride` | String to partially override zookeeper.fullname | `nil` | +| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` | +| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` | +| `commonLabels` | Labels to add to all deployed objects | `{}` | +| `commonAnnotations` | Annotations to add to all deployed objects | `{}` | +| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) | + +### Zookeeper chart parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `image.registry` | ZooKeeper image registry | `docker.io` | +| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` | +| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` | +| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` | +| `syncLimit` | How far out of date a server can be from a leader | `5` | +| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` | +| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` | +| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` | +| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` | +| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` | +| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` | +| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` | +| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` | +| `auth.enabled` | Enable ZooKeeper auth | `false` | +| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` | +| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` | +| `auth.serverUsers` | List of user to be created | `nil` | +| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` | +| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` | +| `logLevel` | Log level of ZooKeeper server | `ERROR` | +| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` | +| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` | +| `dataLogDir` | Data log directory | `""` | + +### Statefulset parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `replicaCount` | Number of ZooKeeper nodes | `1` | +| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` | +| `rollingUpdatePartition` | Partition update strategy | `nil` | +| `podManagementPolicy` | Pod management policy | `Parallel` | +| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) | +| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) | +| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) | +| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` | +| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` | +| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` | +| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file | +| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file | +| `extraVolumes` | Extra volumes | `nil` | +| `extraVolumeMounts` | Mount extra volume(s) | `nil` | +| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` | + +### Exposure parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | ZooKeeper port | `2181` | +| `service.followerPort` | ZooKeeper follower port | `2888` | +| `service.electionPort` | ZooKeeper election port | `3888` | +| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` | +| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` | +| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template | +| `service.tls.client_enable` | Enable tls for client connections | `false` | +| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` | +| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` | +| `service.tls.client_port` | Service port for tls client connections | `3181` | +| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` | +| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` | +| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` | +| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` | +| `service.annotations` | Annotations for the Service | `{}` | +| `service.headless.annotations` | Annotations for the Headless Service | `{}` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | + +### Persistence parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) | +| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` | +| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` | +| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) | +| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` | +| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) | + +### Volume Permissions parameters + +| Parameter | Description | Default | +|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | + +### Metrics parameters + +| Parameter | Description | Default | +|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------| +| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` | +| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` | +| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` | +| `metrics.service.port` | Prometheus metrics service port | `9141` | +| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace | +| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) | +| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` | +| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace | +| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` | +| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set auth.clientUser=newUser \ + bitnami/zookeeper +``` + +The above command sets the ZooKeeper user to `newUser`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/zookeeper +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Number of ZooKeeper nodes: + +```diff +- replicaCount: 1 ++ replicaCount: 3 +``` + +- Enable prometheus metrics: + +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +### Log level + +You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection. + +## Persistence + +The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. + +### Data Log Directory + +You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior). + +When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information. + +## Upgrading + +### To 5.21.0 + +A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones: + +- `metrics.port` is renamed to `metrics.containerPort`. +- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`. + +### To 3.0.0 + +This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade +of the application, each node will need to have at least one snapshot file created in the data directory. If not, the +new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056) +in order to find ways to workaround this issue in case you are facing it. + +### To 2.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets. +Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` + +### To 1.0.0 + +Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments. +Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper: + +```console +$ kubectl delete statefulset zookeeper-zookeeper --cascade=false +``` diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt new file mode 100755 index 000000000..3cc2edbed --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/NOTES.txt @@ -0,0 +1,57 @@ +{{- if contains .Values.service.type "LoadBalancer" }} +{{- if not .Values.auth.clientPassword }} +------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true" + you have most likely exposed the ZooKeeper service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also specify a valid password on the + "auth.clientPassword" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} + +** Please be patient while the chart is being deployed ** + +ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster: + + {{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To connect to your ZooKeeper server run the following commands: + + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}") + kubectl exec -it $POD_NAME -- zkCli.sh + +To connect to your ZooKeeper server from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }}) + zkCli.sh $NODE_IP:$NODE_PORT + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + zkCli.sh $SERVICE_IP:2181 + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 & + zkCli.sh 127.0.0.1:2181 + +{{- end }} + +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} + +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ + +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl new file mode 100755 index 000000000..f82502d69 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/_helpers.tpl @@ -0,0 +1,212 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "zookeeper.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "zookeeper.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "zookeeper.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* + Create the name of the service account to use + */}} +{{- define "zookeeper.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Zookeeper image name +*/}} +{{- define "zookeeper.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "zookeeper.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "zookeeper.labels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +helm.sh/chart: {{ include "zookeeper.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "zookeeper.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "zookeeper.matchLabels" -}} +app.kubernetes.io/name: {{ include "zookeeper.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return ZooKeeper Client Password +*/}} +{{- define "zookeeper.clientPassword" -}} +{{- if .Values.auth.clientPassword -}} + {{- .Values.auth.clientPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return ZooKeeper Servers Passwords +*/}} +{{- define "zookeeper.serverPasswords" -}} +{{- if .Values.auth.serverPasswords -}} + {{- .Values.auth.serverPasswords -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "zookeeper.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "zookeeper.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml new file mode 100755 index 000000000..1a4061565 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/configmap.yaml @@ -0,0 +1,17 @@ +{{- if .Values.config }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + zoo.cfg: |- +{{ .Values.config | indent 4 }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml new file mode 100755 index 000000000..3e26ed6c8 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/metrics-svc.yaml @@ -0,0 +1,29 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-metrics + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.service.annotations }} + {{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + ports: + - name: tcp-metrics + port: {{ .Values.metrics.service.port }} + targetPort: metrics + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml new file mode 100755 index 000000000..f7e30b4bc --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/networkpolicy.yaml @@ -0,0 +1,43 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections to zookeeper + - ports: + - port: {{ .Values.service.port }} + from: + {{- if not .Values.networkPolicy.allowExternal }} + - podSelector: + matchLabels: + {{ include "zookeeper.fullname" . }}-client: "true" + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + {{- else }} + - podSelector: + matchLabels: {} + {{- end }} + # Internal ports + - ports: &intranodes_ports + - port: {{ .Values.service.followerPort }} + - port: {{ .Values.service.electionPort }} + from: + - podSelector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }} + egress: + - ports: *intranodes_ports + # Allow outbound connections from zookeeper nodes + +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..818950c66 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/poddisruptionbudget.yaml @@ -0,0 +1,21 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if gt $replicaCount 1 }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + {{- toYaml .Values.podDisruptionBudget | nindent 2 }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml new file mode 100755 index 000000000..9cda3985c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/prometheusrules.yaml @@ -0,0 +1,27 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ include "zookeeper.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.prometheusRule.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + groups: + - name: {{ include "zookeeper.fullname" . }} + rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }} +{{- end }} + diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml new file mode 100755 index 000000000..b3d727fec --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/secrets.yaml @@ -0,0 +1,18 @@ +{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }} + server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml new file mode 100755 index 000000000..3f7ef39fd --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "zookeeper.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml new file mode 100755 index 000000000..5782dad59 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/servicemonitor.yaml @@ -0,0 +1,38 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "zookeeper.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace }} + {{- end }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + endpoints: + - port: tcp-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml new file mode 100755 index 000000000..fa1e5231f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/statefulset.yaml @@ -0,0 +1,334 @@ +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + role: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "zookeeper.fullname" . }}-headless + replicas: {{ .Values.replicaCount }} + podManagementPolicy: {{ .Values.podManagementPolicy }} + updateStrategy: + type: {{ .Values.updateStrategy }} + {{- if (eq "Recreate" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + selector: + matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: zookeeper + template: + metadata: + name: {{ template "zookeeper.fullname" . }} + labels: {{- include "zookeeper.labels" . | nindent 8 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.podLabels }} + {{- include "zookeeper.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.podAnnotations }} + annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + {{- include "zookeeper.imagePullSecrets" . | nindent 6 }} + serviceAccountName: {{ template "zookeeper.serviceAccountName" . }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if .Values.affinity }} + affinity: {{- include "zookeeper.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "zookeeper.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "zookeeper.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "zookeeper.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - chown + args: + - -R + - {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + - /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - {{ .Values.dataLogDir }} + {{- end }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- end }} + containers: + - name: zookeeper + image: {{ template "zookeeper.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + env: + - name: ZOO_DATA_LOG_DIR + value: {{ .Values.dataLogDir | quote }} + - name: ZOO_PORT_NUMBER + value: {{ .Values.service.port | quote }} + - name: ZOO_TICK_TIME + value: {{ .Values.tickTime | quote }} + - name: ZOO_INIT_LIMIT + value: {{ .Values.initLimit | quote }} + - name: ZOO_SYNC_LIMIT + value: {{ .Values.syncLimit | quote }} + - name: ZOO_MAX_CLIENT_CNXNS + value: {{ .Values.maxClientCnxns | quote }} + - name: ZOO_4LW_COMMANDS_WHITELIST + value: {{ .Values.fourlwCommandsWhitelist | quote }} + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: {{ ternary "yes" "no" .Values.listenOnAllIPs | quote }} + - name: ZOO_AUTOPURGE_INTERVAL + value: {{ .Values.autopurge.purgeInterval | quote }} + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: {{ .Values.autopurge.snapRetainCount | quote }} + - name: ZOO_MAX_SESSION_TIMEOUT + value: {{ .Values.maxSessionTimeout | quote }} + - name: ZOO_SERVERS + {{- $replicaCount := int .Values.replicaCount }} + {{- $followerPort := int .Values.service.followerPort }} + {{- $electionPort := int .Values.service.electionPort }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $zookeeperFullname := include "zookeeper.fullname" . }} + {{- $zookeeperHeadlessServiceName := printf "%s-%s" $zookeeperFullname "headless" | trunc 63 }} + {{- $clusterDomain := .Values.clusterDomain }} + value: {{ range $i, $e := until $replicaCount }}{{ $zookeeperFullname }}-{{ $e }}.{{ $zookeeperHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $followerPort }}:{{ $electionPort }} {{ end }} + - name: ZOO_ENABLE_AUTH + value: {{ ternary "yes" "no" .Values.auth.enabled | quote }} + {{- if .Values.auth.enabled }} + - name: ZOO_CLIENT_USER + value: {{ .Values.auth.clientUser | quote }} + - name: ZOO_CLIENT_PASSWORD + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: client-password + - name: ZOO_SERVER_USERS + value: {{ .Values.auth.serverUsers | quote }} + - name: ZOO_SERVER_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ if .Values.auth.existingSecret }}{{ .Values.auth.existingSecret }}{{ else }}{{ template "zookeeper.fullname" . }}{{ end }} + key: server-password + {{- end }} + - name: ZOO_HEAP_SIZE + value: {{ .Values.heapSize | quote }} + - name: ZOO_LOG_LEVEL + value: {{ .Values.logLevel | quote }} + - name: ALLOW_ANONYMOUS_LOGIN + value: {{ ternary "yes" "no" .Values.allowAnonymousLogin | quote }} + {{- if .Values.jvmFlags }} + - name: JVMFLAGS + value: {{ .Values.jvmFlags | quote }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: ZOO_ENABLE_PROMETHEUS_METRICS + value: "yes" + - name: ZOO_PROMETHEUS_METRICS_PORT_NUMBER + value: {{ .Values.metrics.containerPort | quote }} + {{- end }} + {{- if .Values.service.tls.client_enable }} + - name: ZOO_TLS_CLIENT_ENABLE + value: {{ .Values.service.tls.client_enable | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_FILE + value: {{ .Values.service.tls.client_keystore_path | quote }} + - name: ZOO_TLS_CLIENT_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.client_keystore_password | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_FILE + value: {{ .Values.service.tls.client_truststore_path | quote }} + - name: ZOO_TLS_CLIENT_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.client_truststore_password | quote }} + {{ end }} + {{- if .Values.service.tls.quorum_enable }} + - name: ZOO_TLS_QUORUM_ENABLE + value: {{ .Values.service.tls.quorum_enable | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_FILE + value: {{ .Values.service.tls.quorum_keystore_path | quote }} + - name: ZOO_TLS_QUORUM_KEYSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_keystore_password | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_FILE + value: {{ .Values.service.tls.quorum_truststore_path | quote }} + - name: ZOO_TLS_QUORUM_TRUSTSTORE_PASSWORD + value: {{ .Values.service.tls.quorum_truststore_password | quote }} + {{ end }} + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + {{- if .Values.extraEnvVars }} + {{- toYaml .Values.extraEnvVars | nindent 12 }} + {{- end }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: client + containerPort: {{ .Values.service.port }} + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: client-tls + containerPort: {{ .Values.service.tls.client_port }} + {{ end }} + - name: follower + containerPort: {{ .Values.service.followerPort }} + - name: election + containerPort: {{ .Values.service.electionPort }} + {{- if .Values.metrics.enabled }} + - name: metrics + containerPort: {{ .Values.metrics.containerPort }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} nc -w {{ .Values.livenessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.livenessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + {{- if not .Values.service.tls.disable_base_client_port }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} nc -w {{ .Values.readinessProbe.probeCommandTimeout }} localhost {{ .Values.service.port }} | grep imok'] + {{- else }} + command: ['/bin/bash', '-c', 'echo "ruok" | timeout {{ .Values.readinessProbe.probeCommandTimeout }} openssl s_client -quiet -crlf -connect localhost:{{ .Values.service.tls.client_port }} | grep imok'] + {{- end }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + {{- if .Values.dataLogDir }} + - name: data-log + mountPath: {{ .Values.dataLogDir }} + {{- end }} + {{- if .Values.config }} + - name: config + mountPath: /opt/bitnami/zookeeper/conf/zoo.cfg + subPath: zoo.cfg + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + volumes: + {{- if .Values.config }} + - name: config + configMap: + name: {{ template "zookeeper.fullname" . }} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} + {{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} + {{- end }} + {{- if and .Values.persistence.enabled .Values.persistence.dataLogDir.existingClaim }} + - name: data-log + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.dataLogDir.existingClaim .) }} + {{- else if and ( not .Values.persistence.enabled ) .Values.dataLogDir }} + - name: data-log + emptyDir: {} + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.persistence.enabled (not (and .Values.persistence.existingClaim .Values.persistence.dataLogDir.existingClaim) )}} + volumeClaimTemplates: + {{- if not .Values.persistence.existingClaim }} + - metadata: + name: data + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- if and (not .Values.persistence.dataLogDir.existingClaim) .Values.dataLogDir }} + - metadata: + name: data-log + annotations: + {{- range $key, $value := .Values.persistence.annotations }} + {{ $key }}: {{ $value }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.dataLogDir.size | quote }} + {{- include "zookeeper.storageClass" . | nindent 8 }} + {{- end }} + {{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml new file mode 100755 index 000000000..972efb51d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc-headless.yaml @@ -0,0 +1,42 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.headless.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.headless.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: {{ .Values.service.publishNotReadyAddresses }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml new file mode 100755 index 000000000..da3a2895a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/templates/svc.yaml @@ -0,0 +1,40 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "zookeeper.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "zookeeper.labels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper + {{- if .Values.commonLabels }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.commonAnnotations .Values.service.annotations }} + annotations: + {{- if .Values.service.annotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.service.annotations "context" $ ) | nindent 4 }}\ + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + ports: + {{ if not .Values.service.tls.disable_base_client_port }} + - name: tcp-client + port: 2181 + targetPort: client + {{ end }} + {{ if .Values.service.tls.client_enable }} + - name: tcp-client-tls + port: {{ .Values.service.tls.client_port }} + targetPort: client-tls + {{ end }} + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: {{- include "zookeeper.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: zookeeper diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml new file mode 100755 index 000000000..7d678603f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values-production.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 3 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: true + quorum_enable: true + disable_base_client_port: true + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml new file mode 100755 index 000000000..a40decb54 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/charts/zookeeper/values.yaml @@ -0,0 +1,430 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Zookeeper image version +## ref: https://hub.docker.com/r/bitnami/zookeeper/tags/ +## +image: + registry: docker.io + repository: bitnami/zookeeper + tag: 3.6.2-debian-10-r10 + + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override zookeeper.fullname template (will maintain the release name) +# nameOverride: + +## String to fully override zookeeper.fullname template +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Example Use Cases: +## mount certificates to enable tls +# extraVolumes: +# - name: zookeeper-keystore +# secret: +# defaultMode: 288 +# secretName: zookeeper-keystore +# - name: zookeeper-trustsore +# secret: +# defaultMode: 288 +# secretName: zookeeper-truststore +# extraVolumeMounts: +# - name: zookeeper-keystore +# mountPath: /certs/keystore +# readOnly: true +# - name: zookeeper-truststore +# mountPath: /certs/truststore +# readOnly: true + +## StatefulSet controller supports automated updates. There are two valid update strategies: RollingUpdate and OnDelete +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Limits the number of pods of the replicated application that are down simultaneously from voluntary disruptions +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +podDisruptionBudget: + maxUnavailable: 1 + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy +## +podManagementPolicy: Parallel + +## Number of ZooKeeper nodes +## +replicaCount: 1 + +## Basic time unit in milliseconds used by ZooKeeper for heartbeats +## +tickTime: 2000 + +## ZooKeeper uses to limit the length of time the ZooKeeper servers in quorum have to connect to a leader +## +initLimit: 10 + +## How far out of date a server can be from a leader +## +syncLimit: 5 + +## Limits the number of concurrent connections that a single client may make to a single member of the ZooKeeper ensemble +## +maxClientCnxns: 60 + +## A list of comma separated Four Letter Words commands to use +## +fourlwCommandsWhitelist: srvr, mntr, ruok + +## Allow zookeeper to listen for peers on all IPs +## +listenOnAllIPs: false + +## Allow to accept connections from unauthenticated users +## +allowAnonymousLogin: true + +autopurge: + ## Retains the snapRetainCount most recent snapshots and the corresponding transaction logs and deletes the rest + ## + snapRetainCount: 3 + ## The time interval in hours for which the purge task has to be triggered. Set to a positive integer (1 and above) to enable the auto purging. + ## + purgeInterval: 0 + +## Maximum session timeout in milliseconds that the server will allow the client to negotiate. Defaults to 20 times the tickTime. +## +maxSessionTimeout: 40000 + +auth: + ## Use existing secret (ignores previous password) + ## + # existingSecret: + ## Enable Zookeeper auth. It uses SASL/Digest-MD5 + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + clientUser: + ## Password that will use Zookeeper clients to auth + ## + clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: + +## Size in MB for the Java Heap options (Xmx and XMs). This env var is ignored if Xmx an Xms are configured via JVMFLAGS +## +heapSize: 1024 + +## Log level for the Zookeeper server. ERROR by default. Have in mind if you set it to INFO or WARN the ReadinessProve will produce a lot of logs. +## +logLevel: ERROR + +## Data log directory. Specifying this option will direct zookeeper to write the transaction log to the dataLogDir rather than the dataDir. +## This allows a dedicated log device to be used, and helps avoid competition between logging and snaphots. +## Example: +## dataLogDir: /bitnami/zookeeper/dataLog +## +dataLogDir: "" + +## Default JVMFLAGS for the ZooKeeper process +## +# jvmFlags: + +## Configure ZooKeeper with a custom zoo.cfg file +## +# config: + +## Kubernetes configuration +## For minikube, set this to NodePort, elsewhere use LoadBalancer +## +service: + type: ClusterIP + port: 2181 + followerPort: 2888 + electionPort: 3888 + publishNotReadyAddresses: true + tls: + client_enable: false + quorum_enable: false + disable_base_client_port: false + + client_port: 3181 + + client_keystore_path: /tls_key_store/key_store_file + client_keystore_password: "" + client_truststore_path: /tls_trust_store/trust_store_file + client_truststore_password: "" + + quorum_keystore_path: /tls_key_store/key_store_file + quorum_keystore_password: "" + quorum_truststore_path: /tls_trust_store/trust_store_file + quorum_truststore_password: "" + annotations: {} + headless: + annotations: {} + +## Service account for Zookeeper to use. +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the zookeeper.fullname template + # name: + +## Zookeeper Pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Zookeeper data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + enabled: true + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + dataLogDir: + size: 8Gi + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + + +## Node labels for pod assignment +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Labels +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Annotations +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +podAnnotations: {} + +## Name of the priority class to be used by zookeeper pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + probeCommandTimeout: 2 + +## Network policies +## Ref: https://kubernetes.io/docs/concepts/services-networking/network-policies/ +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis is listening + ## on. When true, zookeeper accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + +## Zookeeper Prometheus Exporter configuration +## +metrics: + enabled: false + + ## Zookeeper Prometheus Exporter container port + ## + containerPort: 9141 + + ## Service configuration + ## + service: + ## Zookeeper Prometheus Exporter service type + ## + type: ClusterIP + ## Zookeeper Prometheus Exporter service port + ## + port: 9141 + ## Annotations for the Zookeeper Prometheus Exporter metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace for the ServiceMonitor Resource (defaults to the Release Namespace) + ## + namespace: + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Prometheus Operator PrometheusRule configuration + ## + prometheusRule: + enabled: false + ## Namespace for the PrometheusRule Resource (defaults to the Release Namespace) + ## + namespace: + + ## PrometheusRule selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + + ## Some example rules. + rules: [] + # - alert: ZookeeperSyncedFollowers + # annotations: + # message: The number of synced followers for the leader node in Zookeeper deployment my-release is less than 2. This usually means that some of the Zookeeper nodes aren't communicating properly. If it doesn't resolve itself you can try killing the pods (one by one). + # expr: max(synced_followers{service="my-release-metrics"}) < 2 + # for: 5m + # labels: + # severity: critical + # - alert: ZookeeperOutstandingRequests + # annotations: + # message: The number of outstanding requests for Zookeeper pod {{ $labels.pod }} is greater than 10. This can indicate a performance issue with the Pod or cluster a whole. + # expr: outstanding_requests{service="my-release-metrics"} > 10 + # for: 5m + # labels: + # severity: critical diff --git a/scripts/helm/helmcharts/databases/charts/kafka/files/jks/README.md b/scripts/helm/helmcharts/databases/charts/kafka/files/jks/README.md new file mode 100755 index 000000000..e110a8825 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/files/jks/README.md @@ -0,0 +1,10 @@ +# Java Key Stores + +You can copy here your Java Key Stores (JKS) files so a secret is created including them. Remember to use a truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. For instance, if you have 3 brokers you need to copy here the following files: + +- kafka.truststore.jks +- kafka-0.keystore.jks +- kafka-1.keystore.jks +- kafka-2.keystore.jks + +Find more info in [this section](https://github.com/bitnami/charts/tree/master/bitnami/kafka#enable-security-for-kafka-and-zookeeper) of the README.md file. diff --git a/scripts/helm/helmcharts/databases/charts/kafka/kafka.yaml b/scripts/helm/helmcharts/databases/charts/kafka/kafka.yaml new file mode 100644 index 000000000..acd718957 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/kafka.yaml @@ -0,0 +1,521 @@ +--- +# Source: kafka/templates/serviceaccount.yaml +apiVersion: v1 +kind: ServiceAccount +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/scripts-configmap.yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: kafka-scripts + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm +data: + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"kafka-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + exec /entrypoint.sh /run.sh +--- +# Source: kafka/charts/zookeeper/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper-headless + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + clusterIP: None + publishNotReadyAddresses: true + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/charts/zookeeper/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper +spec: + type: ClusterIP + ports: + + - name: tcp-client + port: 2181 + targetPort: client + + + - name: follower + port: 2888 + targetPort: follower + - name: tcp-election + port: 3888 + targetPort: election + selector: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper +--- +# Source: kafka/templates/kafka-metrics-svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-metrics + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + annotations: + + prometheus.io/path: /metrics + prometheus.io/port: '9308' + prometheus.io/scrape: "true" +spec: + type: ClusterIP + ports: + - name: http-metrics + port: 9308 + protocol: TCP + targetPort: metrics + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics +--- +# Source: kafka/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka-headless + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: 9093 + protocol: TCP + targetPort: kafka-internal + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + type: ClusterIP + ports: + - name: tcp-client + port: 9092 + protocol: TCP + targetPort: kafka-client + nodePort: null + selector: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka +--- +# Source: kafka/templates/kafka-metrics-deployment.yaml +apiVersion: apps/v1 +kind: Deployment +metadata: + name: kafka-exporter + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics +spec: + replicas: 1 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: metrics + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: metrics + spec: + containers: + - name: kafka-exporter + image: docker.io/bitnami/kafka-exporter:1.2.0-debian-10-r220 + imagePullPolicy: "IfNotPresent" + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + --kafka.server=kafka-0.kafka-headless.db.svc.cluster.local:9092 \ + --kafka.server=kafka-1.kafka-headless.db.svc.cluster.local:9092 \ + --web.listen-address=:9308 + ports: + - name: metrics + containerPort: 9308 + resources: + limits: {} + requests: {} +--- +# Source: kafka/charts/zookeeper/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka-zookeeper + namespace: db + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + role: zookeeper +spec: + serviceName: kafka-zookeeper-headless + replicas: 1 + podManagementPolicy: Parallel + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: zookeeper + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: zookeeper + template: + metadata: + name: kafka-zookeeper + labels: + app.kubernetes.io/name: zookeeper + helm.sh/chart: zookeeper-5.21.9 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: zookeeper + spec: + + serviceAccountName: default + securityContext: + fsGroup: 1001 + containers: + - name: zookeeper + image: docker.io/bitnami/zookeeper:3.6.2-debian-10-r10 + imagePullPolicy: "IfNotPresent" + securityContext: + runAsUser: 1001 + command: + - bash + - -ec + - | + # Execute entrypoint as usual after obtaining ZOO_SERVER_ID based on POD hostname + HOSTNAME=`hostname -s` + if [[ $HOSTNAME =~ (.*)-([0-9]+)$ ]]; then + ORD=${BASH_REMATCH[2]} + export ZOO_SERVER_ID=$((ORD+1)) + else + echo "Failed to get index from hostname $HOST" + exit 1 + fi + exec /entrypoint.sh /run.sh + resources: + requests: + cpu: 250m + memory: 256Mi + env: + - name: ZOO_DATA_LOG_DIR + value: "" + - name: ZOO_PORT_NUMBER + value: "2181" + - name: ZOO_TICK_TIME + value: "2000" + - name: ZOO_INIT_LIMIT + value: "10" + - name: ZOO_SYNC_LIMIT + value: "5" + - name: ZOO_MAX_CLIENT_CNXNS + value: "60" + - name: ZOO_4LW_COMMANDS_WHITELIST + value: "srvr, mntr, ruok" + - name: ZOO_LISTEN_ALLIPS_ENABLED + value: "no" + - name: ZOO_AUTOPURGE_INTERVAL + value: "0" + - name: ZOO_AUTOPURGE_RETAIN_COUNT + value: "3" + - name: ZOO_MAX_SESSION_TIMEOUT + value: "40000" + - name: ZOO_SERVERS + value: kafka-zookeeper-0.kafka-zookeeper-headless.db.svc.cluster.local:2888:3888 + - name: ZOO_ENABLE_AUTH + value: "no" + - name: ZOO_HEAP_SIZE + value: "1024" + - name: ZOO_LOG_LEVEL + value: "ERROR" + - name: ALLOW_ANONYMOUS_LOGIN + value: "yes" + - name: POD_NAME + valueFrom: + fieldRef: + apiVersion: v1 + fieldPath: metadata.name + ports: + + - name: client + containerPort: 2181 + + + - name: follower + containerPort: 2888 + - name: election + containerPort: 3888 + livenessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: ['/bin/bash', '-c', 'echo "ruok" | timeout 2 nc -w 2 localhost 2181 | grep imok'] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: data + mountPath: /bitnami/zookeeper + volumes: + volumeClaimTemplates: + - metadata: + name: data + annotations: + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" +--- +# Source: kafka/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: kafka + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka +spec: + podManagementPolicy: Parallel + replicas: 2 + selector: + matchLabels: + app.kubernetes.io/name: kafka + app.kubernetes.io/instance: kafka + app.kubernetes.io/component: kafka + serviceName: kafka-headless + updateStrategy: + type: "RollingUpdate" + template: + metadata: + labels: + app.kubernetes.io/name: kafka + helm.sh/chart: kafka-11.8.6 + app.kubernetes.io/instance: kafka + app.kubernetes.io/managed-by: Helm + app.kubernetes.io/component: kafka + spec: + securityContext: + fsGroup: 1001 + runAsUser: 1001 + serviceAccountName: kafka + containers: + - name: kafka + image: docker.io/bitnami/kafka:2.6.0-debian-10-r30 + imagePullPolicy: "IfNotPresent" + command: + - /scripts/setup.sh + env: + - name: BITNAMI_DEBUG + value: "false" + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + value: "kafka-zookeeper" + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: "INTERNAL" + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + value: "INTERNAL:PLAINTEXT,CLIENT:PLAINTEXT" + - name: KAFKA_CFG_LISTENERS + value: "INTERNAL://:9093,CLIENT://:9092" + - name: KAFKA_CFG_ADVERTISED_LISTENERS + value: "INTERNAL://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9093,CLIENT://$(MY_POD_NAME).kafka-headless.db.svc.cluster.local:9092" + - name: ALLOW_PLAINTEXT_LISTENER + value: "yes" + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: "false" + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: "true" + - name: KAFKA_HEAP_OPTS + value: "-Xmx1024m -Xms1024m" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: "10000" + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: "1000" + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: "300000" + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: "168" + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: "1000012" + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: "1073741824" + - name: KAFKA_CFG_LOG_DIRS + value: "/bitnami/kafka/data" + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: "1" + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: "1" + - name: KAFKA_CFG_NUM_IO_THREADS + value: "8" + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: "3" + - name: KAFKA_CFG_NUM_PARTITIONS + value: "1" + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: "1" + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: "104857600" + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: "102400" + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: "6000" + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: 9093 + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + failureThreshold: + periodSeconds: + successThreshold: + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + timeoutSeconds: 5 + failureThreshold: 6 + periodSeconds: + successThreshold: + resources: + limits: {} + requests: {} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + volumes: + - name: scripts + configMap: + name: kafka-scripts + defaultMode: 0755 + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/scripts/helm/helmcharts/databases/charts/kafka/requirements.lock b/scripts/helm/helmcharts/databases/charts/kafka/requirements.lock new file mode 100755 index 000000000..115d0b229 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: zookeeper + repository: https://charts.bitnami.com/bitnami + version: 5.21.9 +digest: sha256:2f3c43ce02e3966648b8c89be121fe39537f62ea1d161ad908f51ddc90e4243e +generated: "2020-09-29T07:43:56.483358254Z" diff --git a/scripts/helm/helmcharts/databases/charts/kafka/requirements.yaml b/scripts/helm/helmcharts/databases/charts/kafka/requirements.yaml new file mode 100755 index 000000000..533875258 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/requirements.yaml @@ -0,0 +1,5 @@ +dependencies: + - name: zookeeper + version: 5.x.x + repository: https://charts.bitnami.com/bitnami + condition: zookeeper.enabled diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/NOTES.txt b/scripts/helm/helmcharts/databases/charts/kafka/templates/NOTES.txt new file mode 100755 index 000000000..0347c21c4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/NOTES.txt @@ -0,0 +1,181 @@ +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $servicePort := int .Values.service.port -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs -}} +{{- if and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer") }} + +############################################################################### +### ERROR: You enabled external access to Kafka brokers without specifying ### +### the array of load balancer IPs for Kafka brokers. ### +############################################################################### + +This deployment will be incomplete until you configure the array of load balancer +IPs for Kafka brokers. To complete your deployment follow the steps below: + +1. Wait for the load balancer IPs (it may take a few minutes for them to be available): + + kubectl get svc --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "kafka.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=kafka,pod" -w + +2. Obtain the load balancer IPs and upgrade your chart: + + {{- range $i, $e := until $replicaCount }} + LOAD_BALANCER_IP_{{ add $i 1 }}="$(kubectl get svc --namespace {{ $releaseNamespace }} {{ $fullname }}-{{ $i }}-external -o jsonpath='{.status.loadBalancer.ingress[0].ip}')" + {{- end }} + +3. Upgrade you chart: + + helm upgrade {{ .Release.Name }} bitnami/{{ .Chart.Name }} \ + --set replicaCount={{ $replicaCount }} \ + --set externalAccess.enabled=true \ + {{- range $i, $e := until $replicaCount }} + --set externalAccess.service.loadBalancerIPs[{{ $i }}]=$LOAD_BALANCER_IP_{{ add $i 1 }} \ + {{- end }} + --set externalAccess.service.type=LoadBalancer + +{{- else }} + +{{- if and (or (eq .Values.service.type "LoadBalancer") .Values.externalAccess.enabled) (eq .Values.auth.clientProtocol "plaintext") }} +--------------------------------------------------------------------------------------------- + WARNING + + By specifying "serviceType=LoadBalancer" and not configuring the authentication + you have most likely exposed the Kafka service externally without any + authentication mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also configure the Kafka authentication. + +--------------------------------------------------------------------------------------------- +{{- end }} + +** Please be patient while the chart is being deployed ** + +Kafka can be accessed by consumers via port {{ $servicePort }} on the following DNS name from within your cluster: + + {{ $fullname }}.{{ $releaseNamespace }}.svc.{{ $clusterDomain }} + +Each Kafka broker can be accessed by producers via port {{ $servicePort }} on the following DNS name(s) from within your cluster: + +{{- $brokerList := list }} +{{- range $e, $i := until $replicaCount }} +{{- $brokerList = append $brokerList (printf "%s-%d.%s-headless.%s.svc.%s:%d" $fullname $i $fullname $releaseNamespace $clusterDomain $servicePort) }} +{{- end }} +{{ join "\n" $brokerList | nindent 4 }} + + +{{- if (include "kafka.client.saslAuthentication" .) }} + +You need to configure your Kafka client to access using SASL authentication. To do so, you need to create the 'kafka_jaas.conf' and 'client.properties' configuration files by executing these commands: + + - kafka_jaas.conf: + +cat > kafka_jaas.conf < client.properties <<>(Value) + name: kafka_controller_$1_$2_$4 + labels: + broker_id: "$3" + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Value) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.controller<>(Count) + name: kafka_controller_$1_$2_$3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$4 + labels: + client_id: "$3" + - pattern : kafka.network<>(Value) + name: kafka_network_$1_$2_$4 + labels: + network_processor: $3 + - pattern : kafka.network<>(Count) + name: kafka_network_$1_$2_$4 + labels: + request: $3 + - pattern: kafka.server<>(Count|OneMinuteRate) + name: kafka_server_$1_$2_$4 + labels: + topic: $3 + - pattern: kafka.server<>(Value) + name: kafka_server_$1_$2_$3_$4 + - pattern: kafka.server<>(Count|Value|OneMinuteRate) + name: kafka_server_$1_total_$2_$3 + - pattern: kafka.server<>(queue-size) + name: kafka_server_$1_$2 + - pattern: java.lang<(.+)>(\w+) + name: java_lang_$1_$4_$3_$2 + - pattern: java.lang<>(\w+) + name: java_lang_$1_$3_$2 + - pattern : java.lang + - pattern: kafka.log<>Value + name: kafka_log_$1_$2 + labels: + topic: $3 + partition: $4 +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml new file mode 100755 index 000000000..83edd8422 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/jmx-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.jmx.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.jmx.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.jmx.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.jmx.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.jmx.service.type }} + {{- if eq .Values.metrics.jmx.service.type "LoadBalancer" }} + {{- if .Values.metrics.jmx.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.jmx.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.jmx.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.jmx.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.jmx.service.type "ClusterIP") .Values.metrics.jmx.service.clusterIP }} + clusterIP: {{ .Values.metrics.jmx.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.jmx.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.jmx.service.type "NodePort") (eq .Values.metrics.jmx.service.type "LoadBalancer")) (not (empty .Values.metrics.jmx.service.nodePort)) }} + nodePort: {{ .Values.metrics.jmx.service.nodePort }} + {{- else if eq .Values.metrics.jmx.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml new file mode 100755 index 000000000..c547fbb39 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-deployment.yaml @@ -0,0 +1,87 @@ +{{- if .Values.metrics.kafka.enabled }} +{{- $replicaCount := int .Values.replicaCount -}} +{{- $releaseNamespace := .Release.Namespace -}} +{{- $clusterDomain := .Values.clusterDomain -}} +{{- $fullname := include "kafka.fullname" . -}} +{{- $servicePort := int .Values.service.port -}} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ template "kafka.fullname" . }}-exporter + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + replicas: 1 + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: metrics + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + containers: + - name: kafka-exporter + image: {{ include "kafka.metrics.kafka.image" . }} + imagePullPolicy: {{ .Values.metrics.kafka.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + read -r -a sasl_passwords <<< "$(tr ',;' ' ' <<< "${SASL_USER_PASSWORD}")" + kafka_exporter \ + {{- range $i, $e := until $replicaCount }} + --kafka.server={{ $fullname }}-{{ $i }}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $servicePort }} \ + {{- end }} + {{- if (include "kafka.client.saslAuthentication" .) }} + --sasl.enabled \ + --sasl.username="$SASL_USERNAME" \ + --sasl.password="${sasl_passwords[0]}" \ + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + --tls.enabled \ + {{- if .Values.metrics.kafka.certificatesSecret }} + --tls.ca-file="/opt/bitnami/kafka-exporter/certs/ca-file" \ + --tls.cert-file="/opt/bitnami/kafka-exporter/certs/cert-file" \ + --tls.key-file="/opt/bitnami/kafka-exporter/certs/key-file" \ + {{- end }} + {{- end }} + {{- range $key, $value := .Values.metrics.kafka.extraFlags }} + --{{ $key }}{{ if $value }}={{ $value }}{{ end }} \ + {{- end }} + --web.listen-address=:9308 + {{- if (include "kafka.client.saslAuthentication" .) }} + env: + - name: SASL_USERNAME + value: {{ index .Values.auth.jaas.clientUsers 0 | quote }} + - name: SASL_USER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + ports: + - name: metrics + containerPort: 9308 + {{- if .Values.metrics.kafka.resources }} + resources: {{ toYaml .Values.metrics.kafka.resources | nindent 12 }} + {{- end }} + {{- if and (include "kafka.tlsEncryption" .) .Values.metrics.kafka.certificatesSecret }} + volumeMounts: + - name: kafka-exporter-certificates + mountPath: /opt/bitnami/kafka-exporter/certs/ + readOnly: true + volumes: + - name: kafka-exporter-certificates + secret: + secretName: {{ .Values.metrics.kafka.certificatesSecret }} + defaultMode: 0440 + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml new file mode 100755 index 000000000..54a4ccb0b --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/kafka-metrics-svc.yaml @@ -0,0 +1,45 @@ +{{- if .Values.metrics.kafka.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-metrics + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.metrics.kafka.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.metrics.kafka.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.metrics.kafka.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.metrics.kafka.service.type }} + {{- if eq .Values.metrics.kafka.service.type "LoadBalancer" }} + {{- if .Values.metrics.kafka.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.kafka.service.loadBalancerIP }} + {{- end }} + {{- if .Values.metrics.kafka.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.metrics.kafka.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + {{- if and (eq .Values.metrics.kafka.service.type "ClusterIP") .Values.metrics.kafka.service.clusterIP }} + clusterIP: {{ .Values.metrics.kafka.service.clusterIP }} + {{- end }} + ports: + - name: http-metrics + port: {{ .Values.metrics.kafka.service.port }} + protocol: TCP + targetPort: metrics + {{- if and (or (eq .Values.metrics.kafka.service.type "NodePort") (eq .Values.metrics.kafka.service.type "LoadBalancer")) (not (empty .Values.metrics.kafka.service.nodePort)) }} + nodePort: {{ .Values.metrics.kafka.service.nodePort }} + {{- else if eq .Values.metrics.kafka.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: metrics +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml new file mode 100755 index 000000000..0a34d50dd --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/log4j-configmap.yaml @@ -0,0 +1,16 @@ +{{- if (include "kafka.log4j.createConfigMap" .) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "kafka.log4j.configMapName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + log4j.properties: |- + {{ .Values.log4j | nindent 4 }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml new file mode 100755 index 000000000..cf515becb --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/poddisruptionbudget.yaml @@ -0,0 +1,25 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- if and .Values.pdb.create (gt $replicaCount 1) }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + {{- if .Values.pdb.minAvailable }} + minAvailable: {{ .Values.pdb.minAvailable }} + {{- end }} + {{- if .Values.pdb.maxUnavailable }} + maxUnavailable: {{ .Values.pdb.maxUnavailable }} + {{- end }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/role.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/role.yaml new file mode 100755 index 000000000..943c5bf3c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/role.yaml @@ -0,0 +1,23 @@ +{{- if .Values.rbac.create -}} +apiVersion: rbac.authorization.k8s.io/v1 +kind: Role +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + - apiGroups: + - "" + resources: + - services + verbs: + - get + - list + - watch +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/rolebinding.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/rolebinding.yaml new file mode 100755 index 000000000..78f940f85 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/rolebinding.yaml @@ -0,0 +1,22 @@ +{{- if and .Values.serviceAccount.create .Values.rbac.create }} +apiVersion: rbac.authorization.k8s.io/v1 +kind: RoleBinding +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "kafka.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ template "kafka.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml new file mode 100755 index 000000000..705545a61 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/scripts-configmap.yaml @@ -0,0 +1,118 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "kafka.fullname" . }}-scripts + labels: {{- include "kafka.labels" . | nindent 4 }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + {{- $fullname := include "kafka.fullname" . }} + {{- $releaseNamespace := .Release.Namespace }} + {{- $clusterDomain := .Values.clusterDomain }} + {{- $interBrokerPort := .Values.service.internalPort }} + {{- $clientPort := .Values.service.port }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + auto-discovery.sh: |- + #!/bin/bash + + SVC_NAME="${MY_POD_NAME}-external" + + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + # Auxiliar functions + retry_while() { + local -r cmd="${1:?cmd is missing}" + local -r retries="${2:-12}" + local -r sleep_time="${3:-5}" + local return_value=1 + + read -r -a command <<< "$cmd" + for ((i = 1 ; i <= retries ; i+=1 )); do + "${command[@]}" && return_value=0 && break + sleep "$sleep_time" + done + return $return_value + } + k8s_svc_lb_ip() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local service_ip=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].ip}") + local service_hostname=$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.status.loadBalancer.ingress[0].hostname}") + + if [[ -n ${service_ip} ]]; then + echo "${service_ip}" + else + echo "${service_hostname}" + fi + } + k8s_svc_lb_ip_ready() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + [[ -n "$(k8s_svc_lb_ip "$namespace" "$service")" ]] + } + # Wait until LoadBalancer IP is ready + retry_while "k8s_svc_lb_ip_ready {{ $releaseNamespace }} $SVC_NAME" || exit 1 + # Obtain LoadBalancer external IP + k8s_svc_lb_ip "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + k8s_svc_node_port() { + local namespace=${1:?namespace is missing} + local service=${2:?service is missing} + local index=${3:-0} + local node_port="$(kubectl get svc "$service" -n "$namespace" -o jsonpath="{.spec.ports[$index].nodePort}")" + echo "$node_port" + } + k8s_svc_node_port "{{ $releaseNamespace }}" "$SVC_NAME" | tee "$SHARED_FILE" + {{- end }} + {{- end }} + setup.sh: |- + #!/bin/bash + + ID="${MY_POD_NAME#"{{ $fullname }}-"}" + export KAFKA_CFG_BROKER_ID="$ID" + + {{- if .Values.externalAccess.enabled }} + # Configure external ip and port + {{- if eq .Values.externalAccess.service.type "LoadBalancer" }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_IP="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_IP=$(echo '{{ .Values.externalAccess.service.loadBalancerIPs }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + export EXTERNAL_ACCESS_PORT={{ .Values.externalAccess.service.port }} + {{- else if eq .Values.externalAccess.service.type "NodePort" }} + {{- if .Values.externalAccess.service.domain }} + export EXTERNAL_ACCESS_IP={{ .Values.externalAccess.service.domain }} + {{- else }} + export EXTERNAL_ACCESS_IP=$(curl -s https://ipinfo.io/ip) + {{- end }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + export EXTERNAL_ACCESS_PORT="$(<${SHARED_FILE})" + {{- else }} + export EXTERNAL_ACCESS_PORT=$(echo '{{ .Values.externalAccess.service.nodePorts }}' | tr -d '[]' | cut -d ' ' -f "$(($ID + 1))") + {{- end }} + {{- end }} + + # Configure Kafka advertised listeners + {{- if .Values.advertisedListeners }} + export KAFKA_CFG_ADVERTISED_LISTENERS={{ .Values.advertisedListeners }} + {{- else }} + export KAFKA_CFG_ADVERTISED_LISTENERS="INTERNAL://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://${MY_POD_NAME}.{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }},EXTERNAL://${EXTERNAL_ACCESS_IP}:${EXTERNAL_ACCESS_PORT}" + {{- end }} + {{- end }} + + {{- if (include "kafka.tlsEncryption" .) }} + if [[ -f "/certs/kafka.truststore.jks" ]] && [[ -f "/certs/kafka-${ID}.keystore.jks" ]]; then + mkdir -p /opt/bitnami/kafka/config/certs + cp "/certs/kafka.truststore.jks" "/opt/bitnami/kafka/config/certs/kafka.truststore.jks" + cp "/certs/kafka-${ID}.keystore.jks" "/opt/bitnami/kafka/config/certs/kafka.keystore.jks" + else + echo "Couldn't find the expected Java Key Stores (JKS) files! They are mandatory when encryption via TLS is enabled." + exit 1 + fi + {{- end }} + + exec /entrypoint.sh /run.sh diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml new file mode 100755 index 000000000..790790b3f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/serviceaccount.yaml @@ -0,0 +1,14 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "kafka.serviceAccountName" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml new file mode 100755 index 000000000..250bb5306 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-jmx-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.jmx.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-jmx-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + endpoints: + - port: http-metrics + path: "/" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml new file mode 100755 index 000000000..951bf7c41 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/servicemonitor-metrics.yaml @@ -0,0 +1,36 @@ +{{- if and .Values.metrics.kafka.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "kafka.fullname" . }}-metrics + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: metrics + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: metrics + endpoints: + - port: http-metrics + path: "/metrics" + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/statefulset.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/statefulset.yaml new file mode 100755 index 000000000..e9b5ce8f9 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/statefulset.yaml @@ -0,0 +1,435 @@ +{{- $replicaCount := int .Values.replicaCount }} +{{- $fullname := include "kafka.fullname" . }} +{{- $releaseNamespace := .Release.Namespace }} +{{- $clusterDomain := .Values.clusterDomain }} +{{- $interBrokerPort := .Values.service.internalPort }} +{{- $clientPort := .Values.service.port }} +{{- $interBrokerProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.interBrokerProtocol ) -}} +{{- $clientProtocol := include "kafka.listenerType" ( dict "protocol" .Values.auth.clientProtocol ) -}} +{{- $loadBalancerIPListLength := len .Values.externalAccess.service.loadBalancerIPs }} +{{- if not (and .Values.externalAccess.enabled (not .Values.externalAccess.autoDiscovery.enabled) (not (eq $replicaCount $loadBalancerIPListLength )) (eq .Values.externalAccess.service.type "LoadBalancer")) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podManagementPolicy: Parallel + replicas: {{ .Values.replicaCount }} + selector: + matchLabels: {{- include "kafka.matchLabels" . | nindent 6 }} + app.kubernetes.io/component: kafka + serviceName: {{ template "kafka.fullname" . }}-headless + updateStrategy: + type: {{ .Values.updateStrategy | quote }} + {{- if (eq "OnDelete" .Values.updateStrategy) }} + rollingUpdate: null + {{- else if .Values.rollingUpdatePartition }} + rollingUpdate: + partition: {{ .Values.rollingUpdatePartition }} + {{- end }} + template: + metadata: + labels: {{- include "kafka.labels" . | nindent 8 }} + app.kubernetes.io/component: kafka + {{- if .Values.podLabels }} + {{- include "kafka.tplValue" (dict "value" .Values.podLabels "context" $) | nindent 8 }} + {{- end }} + {{- if or (include "kafka.createConfigmap" .) (include "kafka.createJaasSecret" .) .Values.externalAccess.enabled (include "kafka.metrics.jmx.createConfigmap" .) .Values.podAnnotations }} + annotations: + {{- if (include "kafka.createConfigmap" .) }} + checksum/configuration: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.createJaasSecret" .) }} + checksum/secret: {{ include (print $.Template.BasePath "/jaas-secret.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.externalAccess.enabled }} + checksum/scripts: {{ include (print $.Template.BasePath "/scripts-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if (include "kafka.metrics.jmx.createConfigmap" .) }} + checksum/jmx-configuration: {{ include (print $.Template.BasePath "/jmx-configmap.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "kafka.tplValue" (dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: +{{- include "kafka.imagePullSecrets" . | indent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "kafka.tplValue" ( dict "value" .Values.affinity "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "kafka.tplValue" ( dict "value" .Values.nodeSelector "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "kafka.tplValue" ( dict "value" .Values.tolerations "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.priorityClassName }} + priorityClassName: {{ .Values.priorityClassName }} + {{- end }} + {{- if .Values.podSecurityContext }} + securityContext: {{- toYaml .Values.podSecurityContext | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.create }} + serviceAccountName: {{ template "kafka.serviceAccountName" . }} + {{- end }} + {{- if or (and .Values.volumePermissions.enabled .Values.persistence.enabled) (and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled) }} + initContainers: + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + - name: volume-permissions + image: {{ include "kafka.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + args: + - -ec + - | + mkdir -p /bitnami/kafka + chown -R "{{ .Values.podSecurityContext.runAsUser }}:{{ .Values.podSecurityContext.fsGroup }}" "/bitnami/kafka" + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- end }} + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: auto-discovery + image: {{ include "kafka.externalAccess.autoDiscovery.image" . }} + imagePullPolicy: {{ .Values.externalAccess.autoDiscovery.image.pullPolicy | quote }} + command: + - /scripts/auto-discovery.sh + env: + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: SHARED_FILE + value: "/shared/info.txt" + {{- if .Values.externalAccess.autoDiscovery.resources }} + resources: {{- toYaml .Values.externalAccess.autoDiscovery.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: shared + mountPath: /shared + - name: scripts + mountPath: /scripts/auto-discovery.sh + subPath: auto-discovery.sh + {{- end }} + {{- end }} + containers: + - name: kafka + image: {{ include "kafka.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + command: {{- include "kafka.tplValue" (dict "value" .Values.command "context" $) | nindent 12 }} + {{- if .Values.args }} + args: {{- include "kafka.tplValue" (dict "value" .Values.args "context" $) | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MY_POD_IP + valueFrom: + fieldRef: + fieldPath: status.podIP + - name: MY_POD_NAME + valueFrom: + fieldRef: + fieldPath: metadata.name + - name: KAFKA_CFG_ZOOKEEPER_CONNECT + {{- if .Values.zookeeper.enabled }} + value: {{ include "kafka.zookeeper.fullname" . | quote }} + {{- else }} + value: {{ join "," .Values.externalZookeeper.servers | quote }} + {{- end }} + - name: KAFKA_INTER_BROKER_LISTENER_NAME + value: {{ .Values.interBrokerListenerName | quote }} + - name: KAFKA_CFG_LISTENER_SECURITY_PROTOCOL_MAP + {{- if .Values.listenerSecurityProtocolMap }} + value: {{ .Values.listenerSecurityProtocolMap | quote }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }},EXTERNAL:{{ $clientProtocol }}" + {{- else }} + value: "INTERNAL:{{ $interBrokerProtocol }},CLIENT:{{ $clientProtocol }}" + {{- end }} + {{- if or ($clientProtocol | regexFind "SASL") ($interBrokerProtocol | regexFind "SASL") .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_CFG_SASL_ENABLED_MECHANISMS + value: {{ include "kafka.auth.saslMechanisms" ( dict "type" .Values.auth.saslMechanisms ) }} + - name: KAFKA_CFG_SASL_MECHANISM_INTER_BROKER_PROTOCOL + value: {{ upper .Values.auth.saslInterBrokerMechanism | quote }} + {{- end }} + - name: KAFKA_CFG_LISTENERS + {{- if .Values.listeners }} + value: {{ .Values.listeners }} + {{- else if .Values.externalAccess.enabled }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092,EXTERNAL://:9094" + {{- else }} + value: "INTERNAL://:{{ $interBrokerPort }},CLIENT://:9092" + {{- end }} + {{- if .Values.externalAccess.enabled }} + {{- if .Values.externalAccess.autoDiscovery.enabled }} + - name: SHARED_FILE + value: "/shared/info.txt" + {{- end }} + {{- else }} + - name: KAFKA_CFG_ADVERTISED_LISTENERS + {{- if .Values.advertisedListeners }} + value: {{ .Values.advertisedListeners }} + {{- else }} + value: "INTERNAL://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $interBrokerPort }},CLIENT://$(MY_POD_NAME).{{ $fullname }}-headless.{{ $releaseNamespace }}.svc.{{ $clusterDomain }}:{{ $clientPort }}" + {{- end }} + {{- end }} + - name: ALLOW_PLAINTEXT_LISTENER + value: {{ ternary "yes" "no" (or .Values.auth.enabled .Values.allowPlaintextListener) | quote }} + {{- if or (include "kafka.client.saslAuthentication" .) (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_OPTS + value: "-Djava.security.auth.login.config=/opt/bitnami/kafka/config/kafka_jaas.conf" + {{- if (include "kafka.client.saslAuthentication" .) }} + - name: KAFKA_CLIENT_USERS + value: {{ join "," .Values.auth.jaas.clientUsers | quote }} + - name: KAFKA_CLIENT_PASSWORDS + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: client-passwords + {{- end }} + {{- if .Values.auth.jaas.zookeeperUser }} + - name: KAFKA_ZOOKEEPER_PROTOCOL + value: "SASL" + - name: KAFKA_ZOOKEEPER_USER + value: {{ .Values.auth.jaas.zookeeperUser | quote }} + - name: KAFKA_ZOOKEEPER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: zookeeper-password + {{- end }} + {{- if (include "kafka.interBroker.saslAuthentication" .) }} + - name: KAFKA_INTER_BROKER_USER + value: {{ .Values.auth.jaas.interBrokerUser | quote }} + - name: KAFKA_INTER_BROKER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "kafka.jaasSecretName" . }} + key: inter-broker-password + {{- end }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: KAFKA_CFG_SSL_ENDPOINT_IDENTIFICATION_ALGORITHM + value: {{ .Values.auth.tlsEndpointIdentificationAlgorithm | quote }} + {{- if .Values.auth.jksPassword }} + - name: KAFKA_CERTIFICATE_PASSWORD + value: {{ .Values.auth.jksPassword | quote }} + {{- end }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: JMX_PORT + value: "5555" + {{- end }} + - name: KAFKA_CFG_DELETE_TOPIC_ENABLE + value: {{ .Values.deleteTopicEnable | quote }} + - name: KAFKA_CFG_AUTO_CREATE_TOPICS_ENABLE + value: {{ .Values.autoCreateTopicsEnable | quote }} + - name: KAFKA_HEAP_OPTS + value: {{ .Values.heapOpts | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MESSAGES + value: {{ .Values.logFlushIntervalMessages | quote }} + - name: KAFKA_CFG_LOG_FLUSH_INTERVAL_MS + value: {{ .Values.logFlushIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_BYTES + value: {{ .Values.logRetentionBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_RETENTION_CHECK_INTERVALS_MS + value: {{ .Values.logRetentionCheckIntervalMs | quote }} + - name: KAFKA_CFG_LOG_RETENTION_HOURS + value: {{ .Values.logRetentionHours | quote }} + - name: KAFKA_CFG_MESSAGE_MAX_BYTES + value: {{ .Values.maxMessageBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_SEGMENT_BYTES + value: {{ .Values.logSegmentBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_LOG_DIRS + value: {{ .Values.logsDirs | quote }} + - name: KAFKA_CFG_DEFAULT_REPLICATION_FACTOR + value: {{ .Values.defaultReplicationFactor | quote }} + - name: KAFKA_CFG_OFFSETS_TOPIC_REPLICATION_FACTOR + value: {{ .Values.offsetsTopicReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_REPLICATION_FACTOR + value: {{ .Values.transactionStateLogReplicationFactor | quote }} + - name: KAFKA_CFG_TRANSACTION_STATE_LOG_MIN_ISR + value: {{ .Values.transactionStateLogMinIsr | quote }} + - name: KAFKA_CFG_NUM_IO_THREADS + value: {{ .Values.numIoThreads | quote }} + - name: KAFKA_CFG_NUM_NETWORK_THREADS + value: {{ .Values.numNetworkThreads | quote }} + - name: KAFKA_CFG_NUM_PARTITIONS + value: {{ .Values.numPartitions | quote }} + - name: KAFKA_CFG_NUM_RECOVERY_THREADS_PER_DATA_DIR + value: {{ .Values.numRecoveryThreadsPerDataDir | quote }} + - name: KAFKA_CFG_SOCKET_RECEIVE_BUFFER_BYTES + value: {{ .Values.socketReceiveBufferBytes | quote }} + - name: KAFKA_CFG_SOCKET_REQUEST_MAX_BYTES + value: {{ .Values.socketRequestMaxBytes | replace "_" "" | quote }} + - name: KAFKA_CFG_SOCKET_SEND_BUFFER_BYTES + value: {{ .Values.socketSendBufferBytes | quote }} + - name: KAFKA_CFG_ZOOKEEPER_CONNECTION_TIMEOUT_MS + value: {{ .Values.zookeeperConnectionTimeoutMs | quote }} + {{- if .Values.extraEnvVars }} + {{ include "kafka.tplValue" ( dict "value" .Values.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + ports: + - name: kafka-client + containerPort: 9092 + - name: kafka-internal + containerPort: {{ $interBrokerPort }} + {{- if .Values.externalAccess.enabled }} + - name: kafka-external + containerPort: 9094 + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customlivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "kafka.tplValue" (dict "value" .Values.customreadinessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: /bitnami/kafka + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + mountPath: /bitnami/kafka/config/server.properties + subPath: server.properties + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + mountPath: /bitnami/kafka/config/log4j.properties + subPath: log4j.properties + {{- end }} + - name: scripts + mountPath: /scripts/setup.sh + subPath: setup.sh + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + mountPath: /shared + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + mountPath: /certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- toYaml .Values.extraVolumeMounts | nindent 12 }} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-exporter + image: {{ template "kafka.metrics.jmx.image" . }} + imagePullPolicy: {{ .Values.metrics.jmx.image.pullPolicy | quote }} + command: + - java + - -XX:+UnlockExperimentalVMOptions + - -XX:+UseCGroupMemoryLimitForHeap + - -XX:MaxRAMFraction=1 + - -XshowSettings:vm + - -jar + - jmx_prometheus_httpserver.jar + - "5556" + - /etc/jmx-kafka/jmx-kafka-prometheus.yml + ports: + - name: metrics + containerPort: 5556 + {{- if .Values.metrics.jmx.resources }} + resources: {{- toYaml .Values.metrics.jmx.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: jmx-config + mountPath: /etc/jmx-kafka + {{- end }} + {{- if .Values.sidecars }} + {{- include "kafka.tplValue" (dict "value" .Values.sidecars "context" $) | nindent 8 }} + {{- end }} + volumes: + {{- if or .Values.config .Values.existingConfigmap }} + - name: kafka-config + configMap: + name: {{ include "kafka.configmapName" . }} + {{- end }} + {{- if or .Values.log4j .Values.existingLog4jConfigMap }} + - name: log4j-config + configMap: + name: {{ include "kafka.log4j.configMapName" . }} + {{ end }} + - name: scripts + configMap: + name: {{ include "kafka.fullname" . }}-scripts + defaultMode: 0755 + {{- if and .Values.externalAccess.enabled .Values.externalAccess.autoDiscovery.enabled }} + - name: shared + emptyDir: {} + {{- end }} + {{- if .Values.metrics.jmx.enabled }} + - name: jmx-config + configMap: + name: {{ include "kafka.metrics.jmx.configmapName" . }} + {{- end }} + {{- if (include "kafka.tlsEncryption" .) }} + - name: kafka-certificates + secret: + secretName: {{ include "kafka.jksSecretName" . }} + defaultMode: 256 + {{- end }} + {{- if .Values.extraVolumes }} + {{- toYaml .Values.extraVolumes | nindent 8 }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: + claimName: {{ printf "%s" (tpl .Values.persistence.existingClaim .) }} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + {{- if .Values.persistence.annotations }} + annotations: {{- include "kafka.tplValue" (dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "kafka.storageClass" . | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml new file mode 100755 index 000000000..eefe0046d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-external-access.yaml @@ -0,0 +1,52 @@ +{{- if .Values.externalAccess.enabled }} +{{- $fullName := include "kafka.fullname" . }} +{{- $replicaCount := .Values.replicaCount | int }} +{{- $root := . }} + +{{- range $i, $e := until $replicaCount }} +{{- $targetPod := printf "%s-%d" (printf "%s" $fullName) $i }} +--- +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" $ }}-{{ $i }}-external + labels: {{- include "kafka.labels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + pod: {{ $targetPod }} + {{- if $root.Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or $root.Values.externalAccess.service.annotations $root.Values.commonAnnotations }} + annotations: + {{- if $root.Values.externalAccess.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" $root.Values.externalAccess.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if $root.Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" $root.Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ $root.Values.externalAccess.service.type }} + {{- if eq $root.Values.externalAccess.service.type "LoadBalancer" }} + {{- if not (empty $root.Values.externalAccess.service.loadBalancerIPs) }} + loadBalancerIP: {{ index $root.Values.externalAccess.service.loadBalancerIPs $i }} + {{- end }} + {{- if $root.Values.externalAccess.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml $root.Values.externalAccess.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-kafka + port: {{ $root.Values.externalAccess.service.port }} + {{- if not (empty $root.Values.externalAccess.service.nodePorts) }} + nodePort: {{ index $root.Values.externalAccess.service.nodePorts $i }} + {{- else }} + nodePort: null + {{- end }} + targetPort: kafka-external + selector: {{- include "kafka.matchLabels" $ | nindent 4 }} + app.kubernetes.io/component: kafka + statefulset.kubernetes.io/pod-name: {{ $targetPod }} +--- +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-headless.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-headless.yaml new file mode 100755 index 000000000..e7c2e5e6e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc-headless.yaml @@ -0,0 +1,26 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }}-headless + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + - name: tcp-internal + port: {{ .Values.service.internalPort }} + protocol: TCP + targetPort: kafka-internal + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/scripts/helm/helmcharts/databases/charts/kafka/templates/svc.yaml b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc.yaml new file mode 100755 index 000000000..189cb9ffd --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/templates/svc.yaml @@ -0,0 +1,49 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "kafka.fullname" . }} + labels: {{- include "kafka.labels" . | nindent 4 }} + app.kubernetes.io/component: kafka + {{- if .Values.commonLabels }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }} + {{- end }} + {{- if or .Values.service.annotations .Values.commonAnnotations }} + annotations: + {{- if .Values.service.annotations }} + {{ include "kafka.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + {{- include "kafka.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if eq .Values.service.type "LoadBalancer" }} + {{- if .Values.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + {{- if .Values.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- toYaml .Values.service.loadBalancerSourceRanges | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-client + port: {{ .Values.service.port }} + protocol: TCP + targetPort: kafka-client + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePorts.client)) }} + nodePort: {{ .Values.service.nodePorts.client }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + {{- if and .Values.externalAccess.enabled (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) }} + - name: tcp-external + port: {{ .Values.service.externalPort }} + protocol: TCP + targetPort: kafka-external + {{- if (not (empty .Values.service.nodePorts.external)) }} + nodePort: {{ .Values.service.nodePorts.external }} + {{- end }} + {{- end }} + selector: {{- include "kafka.matchLabels" . | nindent 4 }} + app.kubernetes.io/component: kafka diff --git a/scripts/helm/helmcharts/databases/charts/kafka/values-production.yaml b/scripts/helm/helmcharts/databases/charts/kafka/values-production.yaml new file mode 100755 index 000000000..af6f43dba --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/values-production.yaml @@ -0,0 +1,931 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: false + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 3 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 3 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 3 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 3 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: sasl + interBrokerProtocol: sasl + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords + ## + ## clientPasswords: + ## - password1 + ## - password2 + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + zookeeperUser: zookeeperUser + + ## Kafka Zookeeper password + ## + zookeeperPassword: zookeeperPassword + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-password=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: false + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 3 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + tcpSocket: + port: kafka-client + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: true + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: true + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: true + ## User that will use Zookeeper clients to auth + ## + clientUser: zookeeperUser + ## Password that will use Zookeeper clients to auth + ## + clientPassword: zookeeperPassword + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + serverUsers: zookeeperUser + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + serverPasswords: zookeeperPassword + metrics: + enabled: true + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/scripts/helm/helmcharts/databases/charts/kafka/values.yaml b/scripts/helm/helmcharts/databases/charts/kafka/values.yaml new file mode 100755 index 000000000..154d71bd5 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/kafka/values.yaml @@ -0,0 +1,934 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +# global: +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami Kafka image version +## ref: https://hub.docker.com/r/bitnami/kafka/tags/ +## +image: + registry: docker.io + repository: bitnami/kafka + tag: 2.6.0-debian-10-r30 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Set to true if you would like to see extra information on logs + ## + debug: false + +## String to partially override kafka.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override kafka.fullname template +## +# fullnameOverride: + +## Kubernetes Cluster Domain +## +clusterDomain: cluster.local + +## Add labels to all the deployed resources +## +commonLabels: {} + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +## Kafka Configuration +## Specify content for server.properties +## The server.properties is auto-generated based on other parameters when this paremeter is not specified +## +## Example: +## config: |- +## broker.id=-1 +## listeners=PLAINTEXT://:9092 +## advertised.listeners=PLAINTEXT://KAFKA_IP:9092 +## num.network.threads=3 +## num.io.threads=8 +## socket.send.buffer.bytes=102400 +## socket.receive.buffer.bytes=102400 +## socket.request.max.bytes=104857600 +## log.dirs=/bitnami/kafka/data +## num.partitions=1 +## num.recovery.threads.per.data.dir=1 +## offsets.topic.replication.factor=1 +## transaction.state.log.replication.factor=1 +## transaction.state.log.min.isr=1 +## log.flush.interval.messages=10000 +## log.flush.interval.ms=1000 +## log.retention.hours=168 +## log.retention.bytes=1073741824 +## log.segment.bytes=1073741824 +## log.retention.check.interval.ms=300000 +## zookeeper.connect=ZOOKEEPER_SERVICE_NAME +## zookeeper.connection.timeout.ms=6000 +## group.initial.rebalance.delay.ms=0 +## +# config: + +## ConfigMap with Kafka Configuration +## NOTE: This will override config +## +# existingConfigmap: + +## Kafka Log4J Configuration +## An optional log4j.properties file to overwrite the default of the Kafka brokers. +## See an example log4j.properties at: +## https://github.com/apache/kafka/blob/trunk/config/log4j.properties +## +# log4j: + +## Kafka Log4j ConfigMap +## The name of an existing ConfigMap containing a log4j.properties file. +## NOTE: this will override log4j. +## +# existingLog4jConfigMap: + +## Kafka's Java Heap size +## +heapOpts: -Xmx1024m -Xms1024m + +## Switch to enable topic deletion or not. +## +deleteTopicEnable: false + +## Switch to enable auto creation of topics. +## Enabling auto creation of topics not recommended for production or similar environments. +## +autoCreateTopicsEnable: true + +## The number of messages to accept before forcing a flush of data to disk. +## +logFlushIntervalMessages: 10000 + +## The maximum amount of time a message can sit in a log before we force a flush. +## +logFlushIntervalMs: 1000 + +## A size-based retention policy for logs. +## +logRetentionBytes: _1073741824 + +## The interval at which log segments are checked to see if they can be deleted. +## +logRetentionCheckIntervalMs: 300000 + +## The minimum age of a log file to be eligible for deletion due to age. +## +logRetentionHours: 168 + +## The maximum size of a log segment file. When this size is reached a new log segment will be created. +## +logSegmentBytes: _1073741824 + +## A comma separated list of directories under which to store log files. +## +logsDirs: /bitnami/kafka/data + +## The largest record batch size allowed by Kafka +## +maxMessageBytes: _1000012 + +## Default replication factors for automatically created topics +## +defaultReplicationFactor: 1 + +## The replication factor for the offsets topic +## +offsetsTopicReplicationFactor: 1 + +## The replication factor for the transaction topic +## +transactionStateLogReplicationFactor: 1 + +## Overridden min.insync.replicas config for the transaction topic +## +transactionStateLogMinIsr: 1 + +## The number of threads doing disk I/O. +## +numIoThreads: 8 + +## The number of threads handling network requests. +## +numNetworkThreads: 3 + +## The default number of log partitions per topic. +## +numPartitions: 1 + +## The number of threads per data directory to be used for log recovery at startup and flushing at shutdown. +## +numRecoveryThreadsPerDataDir: 1 + +## The receive buffer (SO_RCVBUF) used by the socket server. +## +socketReceiveBufferBytes: 102400 + +## The maximum size of a request that the socket server will accept (protection against OOM). +## +socketRequestMaxBytes: _104857600 + +## The send buffer (SO_SNDBUF) used by the socket server. +## +socketSendBufferBytes: 102400 + +## Timeout in ms for connecting to zookeeper. +## +zookeeperConnectionTimeoutMs: 6000 + +## Command and args for running the container. Use array form +## +command: + - /scripts/setup.sh +args: + +## All the parameters from the configuration file can be overwritten by using environment variables with this format: KAFKA_CFG_{KEY} +## ref: https://github.com/bitnami/bitnami-docker-kafka#configuration +## Example: +## extraEnvVars: +## - name: KAFKA_CFG_BACKGROUND_THREADS +## value: "10" +## +extraEnvVars: [] + +## extraVolumes and extraVolumeMounts allows you to mount other volumes +## Examples: +# extraVolumes: +# - name: kafka-jaas +# secret: +# secretName: kafka-jaas +# extraVolumeMounts: +# - name: kafka-jaas +# mountPath: /bitnami/kafka/config/kafka_jaas.conf +# subPath: kafka_jaas.conf +extraVolumes: [] +extraVolumeMounts: [] + +## Extra objects to deploy (value evaluated as a template) +## +extraDeploy: [] + +## Authentication parameteres +## https://github.com/bitnami/bitnami-docker-kafka#security +## +auth: + ## Authentication protocol for client and inter-broker communications + ## Supported values: 'plaintext', 'tls', 'mtls', 'sasl' and 'sasl_tls' + ## This table shows the security provided on each protocol: + ## | Method | Authentication | Encryption via TLS | + ## | plaintext | None | No | + ## | tls | None | Yes | + ## | mtls | Yes (two-way authentication) | Yes | + ## | sasl | Yes (via SASL) | No | + ## | sasl_tls | Yes (via SASL) | Yes | + ## + clientProtocol: plaintext + interBrokerProtocol: plaintext + + ## Allowed SASL mechanisms when clientProtocol or interBrokerProtocol are using either sasl or sasl_tls + ## + saslMechanisms: plain,scram-sha-256,scram-sha-512 + ## SASL mechanism for inter broker communication + ## + saslInterBrokerMechanism: plain + + ## Name of the existing secret containing the truststore and + ## one keystore per Kafka broker you have in the Kafka cluster. + ## MANDATORY when 'tls', 'mtls', or 'sasl_tls' authentication protocols are used. + ## Create this secret following the steps below: + ## 1) Generate your trustore and keystore files. Helpful script: https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh + ## 2) Rename your truststore to `kafka.truststore.jks`. + ## 3) Rename your keystores to `kafka-X.keystore.jks` where X is the ID of each Kafka broker. + ## 4) Run the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks ... + ## Alternatively, you can put your JKS files under the files/jks directory + ## + # jksSecret: + + ## Password to access the JKS files when they are password-protected. + ## + # jksPassword: + + ## The endpoint identification algorithm used by clients to validate server host name. + ## Disable server host name verification by setting it to an empty string + ## See: https://docs.confluent.io/current/kafka/authentication_ssl.html#optional-settings + ## + tlsEndpointIdentificationAlgorithm: https + + ## JAAS configuration for SASL authentication + ## MANDATORY when method is 'sasl', or 'sasl_tls' + ## + jaas: + ## Kafka client user list + ## + ## clientUsers: + ## - user1 + ## - user2 + ## + clientUsers: + - user + + ## Kafka client passwords. This is mandatory if more than one user is specified in clientUsers. + ## + ## clientPasswords: + ## - password1 + ## - password2" + ## + clientPasswords: [] + + ## Kafka inter broker communication user + ## + interBrokerUser: admin + + ## Kafka inter broker communication password + ## + interBrokerPassword: "" + + ## Kafka Zookeeper user + ## + # zookeeperUser: + + ## Kafka Zookeeper password + ## + # zookeeperPassword: + + ## Name of the existing secret containing credentials for clientUsers, interBrokerUser and zookeeperUser. + ## Create this secret running the command below where SECRET_NAME is the name of the secret you want to create: + ## kubectl create secret generic SECRET_NAME --from-literal=client-passwords=CLIENT_PASSWORD1,CLIENT_PASSWORD2 --from-literal=inter-broker-password=INTER_BROKER_PASSWORD --from-literal=zookeeper-password=ZOOKEEPER_PASSWORD + ## + # existingSecret: + +## The address(es) the socket server listens on. +## When it's set to an empty array, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +listeners: [] + +## The address(es) (hostname:port) the brokers will advertise to producers and consumers. +## When it's set to an empty array, the advertised listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +advertisedListeners: [] + +## The listener->protocol mapping +## When it's nil, the listeners will be configured +## based on the authentication protocols (auth.clientProtocol and auth.interBrokerProtocol parameters) +## +# listenerSecurityProtocolMap: + +## Allow to use the PLAINTEXT listener. +## +allowPlaintextListener: true + +## Name of listener used for communication between brokers. +## +interBrokerListenerName: INTERNAL + +## Number of Kafka brokers to deploy +## +replicaCount: 2 + +## StrategyType, can be set to RollingUpdate or OnDelete by default. +## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets +## +updateStrategy: RollingUpdate + +## Partition update strategy +## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions +## +# rollingUpdatePartition: + +## Pod labels. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ +## +podLabels: {} + +## Pod annotations. Evaluated as a template +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Name of the priority class to be used by kafka pods, priority class needs to be created beforehand +## Ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption/ +## +priorityClassName: "" + +## Affinity for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template +## ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment. Evaluated as a template +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## Kafka pods' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-pod +## +podSecurityContext: + fsGroup: 1001 + runAsUser: 1001 + +## Kafka containers' Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/#set-the-security-context-for-a-container +## Example: +## containerSecurityContext: +## capabilities: +## drop: ["NET_RAW"] +## readOnlyRootFilesystem: true +## +containerSecurityContext: {} + +## Kafka containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 1Gi + requests: {} + # cpu: 250m + # memory: 256Mi + +## Kafka containers' liveness and readiness probes. Evaluated as a template. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 10 + timeoutSeconds: 5 + # failureThreshold: 3 + # periodSeconds: 10 + # successThreshold: 1 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + failureThreshold: 6 + timeoutSeconds: 5 + # periodSeconds: 10 + # successThreshold: 1 + +## Custom liveness/readiness probes that will override the default ones +## +customLivenessProbe: {} +customReadinessProbe: {} + +## Pod Disruption Budget configuration +## The PDB will only be created if replicaCount is greater than 1 +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions +## +pdb: + create: true + ## Min number of pods that must still be available after the eviction + ## + # minAvailable: 1 + ## Max number of pods that can be unavailable after the eviction + ## + maxUnavailable: 1 + +## Add sidecars to the pod. +## Example: +## sidecars: +## - name: your-image-name +## image: your-image +## imagePullPolicy: Always +## ports: +## - name: portname +## containerPort: 1234 +## +sidecars: {} + +## Service parameters +## +service: + ## Service type + ## + type: ClusterIP + ## Kafka port for client connections + ## + port: 9092 + ## Kafka port for inter-broker connections + ## + internalPort: 9093 + ## Kafka port for external connections + ## + externalPort: 9094 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePorts: + client: "" + external: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## External Access to Kafka brokers configuration +## +externalAccess: + ## Enable Kubernetes external cluster access to Kafka brokers + ## + enabled: false + + ## External IPs auto-discovery configuration + ## An init container is used to auto-detect LB IPs or node ports by querying the K8s API + ## Note: RBAC might be required + ## + autoDiscovery: + ## Enable external IP/ports auto-discovery + ## + enabled: false + ## Bitnami Kubectl image + ## ref: https://hub.docker.com/r/bitnami/kubectl/tags/ + ## + image: + registry: docker.io + repository: bitnami/kubectl + tag: 1.17.12-debian-10-r3 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Parameters to configure K8s service(s) used to externally access Kafka brokers + ## A new service per broker will be created + ## + service: + ## Service type. Allowed values: LoadBalancer or NodePort + ## + type: LoadBalancer + ## Port used when service type is LoadBalancer + ## + port: 9094 + ## Array of load balancer IPs for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## loadBalancerIPs: + ## - X.X.X.X + ## - Y.Y.Y.Y + ## + loadBalancerIPs: [] + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Array of node ports used for each Kafka broker. Length must be the same as replicaCount + ## Example: + ## nodePorts: + ## - 30001 + ## - 30002 + ## + nodePorts: [] + ## When service type is NodePort, you can specify the domain used for Kafka advertised listeners. + ## If not specified, the container will try to get the kubernetes node external IP + ## + # domain: mydomain.com + ## Provide any additional annotations which may be required. Evaluated as a template + ## + annotations: {} + +## Persistence paramaters +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template + ## + # existingClaim: + ## PV Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. + ## + # storageClass: "-" + ## PV Access Mode + ## + accessModes: + - ReadWriteOnce + ## PVC size + ## + size: 8Gi + ## PVC annotations + ## + annotations: {} + +## Init Container paramaters +## Change the owner and group of the persistent volume(s) mountpoint(s) to 'runAsUser:fsGroup' on each component +## values from the securityContext section of the component +## +volumePermissions: + enabled: false + ## Bitnami Minideb image + ## ref: https://hub.docker.com/r/bitnami/minideb/tags/ + ## + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + ## Init Container resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## Kafka pods ServiceAccount +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: true + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fluentd.fullname template + ## + # name: + +## Role Based Access +## ref: https://kubernetes.io/docs/admin/authorization/rbac/ +## +rbac: + ## Specifies whether RBAC rules should be created + ## binding Kafka ServiceAccount to a role + ## that allows Kafka pods querying the K8s API + ## + create: false + +## Prometheus Exporters / Metrics +## +metrics: + ## Prometheus Kafka Exporter: exposes complimentary metrics to JMX Exporter + ## + kafka: + enabled: false + + ## Bitnami Kafka exporter image + ## ref: https://hub.docker.com/r/bitnami/kafka-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/kafka-exporter + tag: 1.2.0-debian-10-r220 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Extra flags to be passed to Kafka exporter + ## Example: + ## extraFlags: + ## tls.insecure-skip-tls-verify: "" + ## web.telemetry-path: "/metrics" + ## + extraFlags: {} + + ## Name of the existing secret containing the optional certificate and key files + ## for Kafka Exporter client authentication + ## + # certificatesSecret: + + ## Prometheus Kafka Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## Kafka Exporter Service type + ## + type: ClusterIP + ## Kafka Exporter Prometheus port + ## + port: 9308 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the Kafka Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.kafka.service.port }}" + prometheus.io/path: "/metrics" + + ## Prometheus JMX Exporter: exposes the majority of Kafkas metrics + ## + jmx: + enabled: false + + ## Bitnami JMX exporter image + ## ref: https://hub.docker.com/r/bitnami/jmx-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/jmx-exporter + tag: 0.14.0-debian-10-r15 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets (secrets must be manually created in the namespace) + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## Example: + ## pullSecrets: + ## - myRegistryKeySecretName + ## + pullSecrets: [] + + ## Prometheus JMX Exporter' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + + ## Service configuration + ## + service: + ## JMX Exporter Service type + ## + type: ClusterIP + ## JMX Exporter Prometheus port + ## + port: 5556 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + nodePort: "" + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## Example: + ## loadBalancerSourceRanges: + ## - 10.10.10.0/24 + ## + loadBalancerSourceRanges: [] + ## Set the Cluster IP to use + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#choosing-your-own-ip-address + ## + # clusterIP: None + ## Annotations for the JMX Exporter Prometheus metrics service + ## + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "{{ .Values.metrics.jmx.service.port }}" + prometheus.io/path: "/" + + ## JMX Whitelist Objects, can be set to control which JMX metrics are exposed. Only whitelisted + ## values will be exposed via JMX Exporter. They must also be exposed via Rules. To expose all metrics + ## (warning its crazy excessive and they aren't formatted in a prometheus style) (1) `whitelistObjectNames: []` + ## (2) commented out above `overrideConfig`. + ## + whitelistObjectNames: + - kafka.controller:* + - kafka.server:* + - java.lang:* + - kafka.network:* + - kafka.log:* + + ## Prometheus JMX exporter configuration + ## Specify content for jmx-kafka-prometheus.yml. Evaluated as a template + ## + ## Credits to the incubator/kafka chart for the JMX configuration. + ## https://github.com/helm/charts/tree/master/incubator/kafka + ## + config: |- + jmxUrl: service:jmx:rmi:///jndi/rmi://127.0.0.1:5555/jmxrmi + lowercaseOutputName: true + lowercaseOutputLabelNames: true + ssl: false + {{- if .Values.metrics.jmx.whitelistObjectNames }} + whitelistObjectNames: ["{{ join "\",\"" .Values.metrics.jmx.whitelistObjectNames }}"] + {{- end }} + + ## ConfigMap with Prometheus JMX exporter configuration + ## NOTE: This will override metrics.jmx.config + ## + # existingConfigmap: + + ## Prometheus Operator ServiceMonitor configuration + ## + serviceMonitor: + enabled: false + ## Namespace in which Prometheus is running + ## + # namespace: monitoring + + ## Interval at which metrics should be scraped. + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # interval: 10s + + ## Timeout after which the scrape is ended + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#endpoint + ## + # scrapeTimeout: 10s + + ## ServiceMonitor selector labels + ## ref: https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-configuration + ## + # selector: + # prometheus: my-prometheus + +## +## Zookeeper chart configuration +## +## https://github.com/bitnami/charts/blob/master/bitnami/zookeeper/values.yaml +## +zookeeper: + enabled: true + auth: + ## Enable Zookeeper auth + ## + enabled: false + ## User that will use Zookeeper clients to auth + ## + # clientUser: + ## Password that will use Zookeeper clients to auth + ## + # clientPassword: + ## Comma, semicolon or whitespace separated list of user to be created. Specify them as a string, for example: "user1,user2,admin" + ## + # serverUsers: + ## Comma, semicolon or whitespace separated list of passwords to assign to users when created. Specify them as a string, for example: "pass4user1, pass4user2, pass4admin" + ## + # serverPasswords: + +## This value is only used when zookeeper.enabled is set to false +## +externalZookeeper: + ## Server or list of external zookeeper servers to use. + ## + servers: [] diff --git a/scripts/helm/helmcharts/databases/charts/minio/.helmignore b/scripts/helm/helmcharts/databases/charts/minio/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helm/helmcharts/databases/charts/minio/Chart.yaml b/scripts/helm/helmcharts/databases/charts/minio/Chart.yaml new file mode 100755 index 000000000..395e3ac91 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/Chart.yaml @@ -0,0 +1,24 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 2020.10.9 +description: MinIO is an object storage server, compatible with Amazon S3 cloud storage + service, mainly used for storing unstructured data (such as photos, videos, log + files, etc.) +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/minio +icon: https://bitnami.com/assets/stacks/minio/img/minio-stack-220x234.png +keywords: +- minio +- storage +- object-storage +- s3 +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: minio +sources: +- https://github.com/bitnami/bitnami-docker-minio +- https://min.io +version: 3.7.14 diff --git a/scripts/helm/helmcharts/databases/charts/minio/README.md b/scripts/helm/helmcharts/databases/charts/minio/README.md new file mode 100755 index 000000000..4a4c45c65 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/README.md @@ -0,0 +1,259 @@ +# MinIO + +[MinIO](https://min.io) is an object storage server, compatible with Amazon S3 cloud storage service, mainly used for storing unstructured data (such as photos, videos, log files, etc.) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/minio +``` + +## Introduction + +This chart bootstraps a [MinIO](https://github.com/bitnami/bitnami-docker-minio) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure +- ReadWriteMany volumes for deployment scaling + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/minio +``` + +These commands deploy MinIO on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the MinIO chart and their default values. + +| Parameter | Description | Default | +|--------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.minio.existingSecret` | Name of existing secret to use for MinIO credentials (overrides `existingSecret`) | `nil` | +| `global.minio.accessKey` | MinIO Access Key (overrides `accessKey.password`) | `nil` | +| `global.minio.secretKey` | MinIO Secret Key (overrides `secretKey.password`) | `nil` | +| `image.registry` | MinIO image registry | `docker.io` | +| `image.repository` | MinIO image name | `bitnami/minio` | +| `image.tag` | MinIO image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug logs should be enabled | `false` | +| `nameOverride` | String to partially override minio.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override minio.fullname template with a string | `nil` | +| `schedulerName` | Specifies the schedulerName, if it's nil uses kube-scheduler | `nil` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `true` | +| `serviceAccount.name` | If serviceAccount.create is enabled, what should the serviceAccount name be - otherwise defaults to the fullname | `nil` | +| `clusterDomain` | Kubernetes cluster domain | `cluster.local` | +| `clientImage.registry` | MinIO Client image registry | `docker.io` | +| `clientImage.repository` | MinIO Client image name | `bitnami/minio-client` | +| `clientImage.tag` | MinIO Client image tag | `{TAG_NAME}` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources` | Init container resource requests/limit | `nil` | +| `mode` | MinIO server mode (`standalone` or `distributed`) | `standalone` | +| `statefulset.replicaCount` | Number of pods (only for Minio distributed mode). Should be 4 <= x <= 32 | `4` | +| `statefulset.updateStrategy` | Statefulset update strategy policy | `RollingUpdate` | +| `statefulset.podManagementpolicy` | Statefulset pods management policy | `Parallel` | +| `deployment.updateStrategy` | Deployment update strategy policy | `Recreate` | +| `existingSecret` | Existing secret with MinIO credentials | `nil` | +| `useCredentialsFile` | Have the secret mounted as a file instead of env vars | `false` | +| `forceNewKeys` | Force admin credentials (access and secret key) to be reconfigured every time they change in the secrets | `false` | +| `accessKey.password` | MinIO Access Key. Ignored if existing secret is provided. | _random 10 character alphanumeric string_ | +| `accessKey.forcePassword` | Force users to specify an Access Key | `false` | +| `secretKey.password` | MinIO Secret Key. Ignored if existing secret is provided. | _random 40 character alphanumeric string_ | +| `secretKey.forcePassword` | Force users to specify an Secret Key | `false` | +| `defaultBuckets` | Comma, semi-colon or space separated list of buckets to create (only in standalone mode) | `nil` | +| `disableWebUI` | Disable MinIO Web UI | `false` | +| `extraEnv` | Any extra environment variables you would like to pass to the pods | `{}` | +| `command` | Command for the minio container | `{}` | +| `resources` | Minio containers' resources | `{}` | +| `podAnnotations` | Pod annotations | `{}` | +| `affinity` | Map of node/pod affinities | `{}` (The value is evaluated as a template) | +| `nodeSelector` | Node labels for pod assignment | `{}` (The value is evaluated as a template) | +| `tolerations` | Tolerations for pod assignment | `[]` (The value is evaluated as a template) | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the container | `1001` | +| `securityContext.runAsUser` | User ID for the container | `1001` | +| `livenessProbe.enabled` | Enable/disable the Liveness probe | `true` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | `60` | +| `livenessProbe.periodSeconds` | How often to perform the probe | `10` | +| `livenessProbe.timeoutSeconds` | When the probe times out | `5` | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.enabled` | Enable/disable the Readiness probe | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | `5` | +| `readinessProbe.periodSeconds` | How often to perform the probe | `10` | +| `readinessProbe.timeoutSeconds` | When the probe times out | `5` | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `6` | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed. | `1` | +| `persistence.enabled` | Use a PVC to persist data | `true` | +| `persistence.mountPath` | Path to mount the volume at | `/data` | +| `persistence.storageClass` | Storage class of backing PVC | `nil` (uses alpha storage class annotation) | +| `persistence.accessMode` | Use volume as ReadOnly or ReadWrite | `ReadWriteOnce` | +| `persistence.size` | Size of data volume | `8Gi` | +| `persistence.annotations` | Persistent Volume annotations | `{}` | +| `persistence.existingClaim` | Name of an existing PVC to use (only in "standalone" mode) | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | MinIO service port | `9000` | +| `service.nodePort` | Port to bind to for NodePort service type | `nil` | +| `service.loadBalancerIP` | Static IP Address to use for LoadBalancer service type | `nil` | +| `service.annotations` | Kubernetes service annotations | `{}` | +| `ingress.enabled` | Enable/disable ingress | `false` | +| `ingress.certManager` | Add annotations for cert-manager | `false` | +| `ingress.annotations` | Ingress annotations | `[]` | +| `ingress.labels` | Ingress additional labels | `{}` | +| `ingress.hosts[0].name` | Hostname to your MinIO installation | `minio.local` | +| `ingress.hosts[0].path` | Path within the url structure | `/` | +| `ingress.hosts[0].tls` | Utilize TLS backend in ingress | `false` | +| `ingress.hosts[0].tlsHosts` | Array of TLS hosts for ingress record (defaults to `ingress.hosts[0].name` if `nil`) | `nil` | +| `ingress.hosts[0].tlsSecret` | TLS Secret (certificates) | `minio.local-tls` | +| `ingress.secrets[0].name` | TLS Secret Name | `nil` | +| `ingress.secrets[0].certificate` | TLS Secret Certificate | `nil` | +| `ingress.secrets[0].key` | TLS Secret Key | `nil` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `prometheusAuthType` | Authentication mode for Prometheus (`jwt` or `public`) | `public` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set accessKey.password=minio-access-key \ + --set secretKey.password=minio-secret-key \ + bitnami/minio +``` + +The above command sets the MinIO Server access key and secret key to `minio-access-key` and `minio-secret-key`, respectively. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/minio +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- MinIO server mode: +```diff +- mode: standalone ++ mode: distributed +``` + +- Disable MinIO Web UI: +```diff +- disableWebUI: false ++ disableWebUI: true +``` + +- Annotations to be added to pods: +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/prometheus/metrics" ++ prometheus.io/port: "9000" +``` + +- Pod resources: +```diff +- resources: {} ++ resources: ++ requests: ++ memory: 256Mi ++ cpu: 250m +``` + +- Enable NetworkPolicy: +```diff +- networkPolicy.enabled: false ++ networkPolicy.enabled: true +``` + +- Don't require client label for connections: +```diff +- networkPolicy.allowExternal: true ++ networkPolicy.allowExternal: false +``` + +- Change Prometheus authentication: +```diff +- prometheusAuthType: public ++ prometheusAuthType: jwt +``` + +### Distributed mode + +You can start the MinIO chart in distributed mode with the following parameter: `mode=distributed` + +This chart sets Minio server in distributed mode with 4 nodes by default. You can change the number of nodes setting the `statefulset.replicaCount` parameter, for example to `statefulset.replicaCount=8` + +> Note: that the number of replicas must even, greater than 4 and lower than 32 + +### Prometheus exporter + +MinIO exports Prometheus metrics at `/minio/prometheus/metrics`. To allow Prometheus collecting your MinIO metrics, modify the `values.yaml` adding the corresponding annotations: + +```diff +- podAnnotations: {} ++ podAnnotations: ++ prometheus.io/scrape: "true" ++ prometheus.io/path: "/minio/prometheus/metrics" ++ prometheus.io/port: "9000" +``` + +> Find more information about MinIO metrics at https://docs.min.io/docs/how-to-monitor-minio-using-prometheus.html + +## Persistence + +The [Bitnami MinIO](https://github.com/bitnami/bitnami-docker-minio) image stores data at the `/data` path of the container. + +The chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at this location. The volume is created using dynamic volume provisioning. + +### Adjust permissions of persistent volume mountpoint + +As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it. + +By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions. +As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination. + +You can enable this initContainer by setting `volumePermissions.enabled` to `true`. diff --git a/scripts/helm/helmcharts/databases/charts/minio/ci/values-production.yaml b/scripts/helm/helmcharts/databases/charts/minio/ci/values-production.yaml new file mode 100755 index 000000000..d5e966334 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/ci/values-production.yaml @@ -0,0 +1,27 @@ +# Test values file for generating all of the yaml and check that +# the rendering is correct + +volumePermissions: + enabled: true + +mode: distributed + +useCredentialsFile: true + +disableWebUI: false + +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/minio/prometheus/metric" + prometheus.io/port: "9000" + +resources: + requests: + cpu: 250m + memory: 256Mi + +ingress: + enabled: true + +networkPolicy: + enabled: true diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/NOTES.txt b/scripts/helm/helmcharts/databases/charts/minio/templates/NOTES.txt new file mode 100755 index 000000000..e7492fbe7 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/NOTES.txt @@ -0,0 +1,71 @@ +** Please be patient while the chart is being deployed ** + +MinIO can be accessed via port {{ .Values.service.port }} on the following DNS name from within your cluster: + + {{ include "minio.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +To get your credentials run: + + export MINIO_ACCESS_KEY=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} -o jsonpath="{.data.access-key}" | base64 --decode) + export MINIO_SECRET_KEY=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} -o jsonpath="{.data.secret-key}" | base64 --decode) + +To connect to your MinIO server using a client: + +- Run a MinIO Client pod and append the desired command (e.g. 'admin info server'): + + kubectl run --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }}-client \ + --rm --tty -i --restart='Never' \ + --env MINIO_SERVER_ACCESS_KEY=$MINIO_ACCESS_KEY \ + --env MINIO_SERVER_SECRET_KEY=$MINIO_SECRET_KEY \ + --env MINIO_SERVER_HOST={{ include "minio.fullname" . }} \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "minio.name" . }}-client=true" \ + {{- end }} + --image {{ template "minio.clientImage" . }} -- admin info server minio + +{{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + + NOTE: Since NetworkPolicy is enabled, only pods with label + "{{ template "minio.fullname" . }}-client=true" will be able to connect to MinIO. +{{- end }} +{{- if not .Values.disableWebUI }} + +To access the MinIO web UI: + +- Get the MinIO URL: + +{{- if .Values.ingress.enabled }} + + You should be able to access your new MinIO web UI through + + {{- range .Values.ingress.hosts }} + {{ if .tls }}https{{ else }}http{{ end }}://{{ .name }}/minio/ + {{- end }} +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ include "minio.fullname" . }}' + + {{- $port:=.Values.service.port | toString }} + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "minio.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo "MinIO web URL: http://$SERVICE_IP{{- if ne $port "80" }}:{{ .Values.service.port }}{{ end }}/minio" + +{{- else if contains "ClusterIP" .Values.service.type }} + + echo "MinIO web URL: http://127.0.0.1:9000/minio" + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ include "minio.fullname" . }} 9000:{{ .Values.service.port }} + +{{- else if contains "NodePort" .Values.service.type }} + + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "minio.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo "MinIO web URL: http://$NODE_IP:$NODE_PORT/minio" + +{{- end }} +{{- else }} + + WARN: MinIO Web UI is disabled. +{{- end }} + +{{ include "minio.validateValues" . }} +{{ include "minio.checkRollingTags" . }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/charts/minio/templates/_helpers.tpl new file mode 100755 index 000000000..dd67e53e6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/_helpers.tpl @@ -0,0 +1,265 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Expand the name of the chart. +*/}} +{{- define "minio.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "minio.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "minio.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Common labels +*/}} +{{- define "minio.labels" -}} +app.kubernetes.io/name: {{ include "minio.name" . }} +helm.sh/chart: {{ include "minio.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "minio.matchLabels" -}} +app.kubernetes.io/name: {{ include "minio.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} + +{{/* +Return the proper MinIO image name +*/}} +{{- define "minio.image" -}} +{{- $registryName := coalesce .Values.global.imageRegistry .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper MinIO Client image name +*/}} +{{- define "minio.clientImage" -}} +{{- $registryName := coalesce .Values.global.imageRegistry .Values.clientImage.registry -}} +{{- $repositoryName := .Values.clientImage.repository -}} +{{- $tag := .Values.clientImage.tag | toString -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "minio.imagePullSecrets" -}} +{{- $imagePullSecrets := coalesce .Values.global.imagePullSecrets .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets -}} +{{- if $imagePullSecrets }} +imagePullSecrets: +{{- range $imagePullSecrets }} + - name: {{ . }} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return MinIO accessKey +*/}} +{{- define "minio.accessKey" -}} +{{- $accessKey := coalesce .Values.global.minio.accessKey .Values.accessKey.password -}} +{{- if $accessKey }} + {{- $accessKey -}} +{{- else if (not .Values.accessKey.forcePassword) }} + {{- randAlphaNum 10 -}} +{{- else -}} + {{ required "An Access Key is required!" .Values.accessKey.password }} +{{- end -}} +{{- end -}} + +{{/* +Return MinIO secretKey +*/}} +{{- define "minio.secretKey" -}} +{{- $secretKey := coalesce .Values.global.minio.secretKey .Values.secretKey.password -}} +{{- if $secretKey }} + {{- $secretKey -}} +{{- else if (not .Values.secretKey.forcePassword) }} + {{- randAlphaNum 40 -}} +{{- else -}} + {{ required "A Secret Key is required!" .Values.secretKey.password }} +{{- end -}} +{{- end -}} + +{{/* +Get the credentials secret. +*/}} +{{- define "minio.secretName" -}} +{{- if .Values.global.minio.existingSecret }} + {{- printf "%s" .Values.global.minio.existingSecret -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" .Values.existingSecret -}} +{{- else -}} + {{- printf "%s" (include "minio.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "minio.createSecret" -}} +{{- if .Values.global.minio.existingSecret }} +{{- else if .Values.existingSecret -}} +{{- else -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "minio.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "minio.validateValues.mode" .) -}} +{{- $messages := append $messages (include "minio.validateValues.replicaCount" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of MinIO - must provide a valid mode ("distributed" or "standalone") */}} +{{- define "minio.validateValues.mode" -}} +{{- if and (ne .Values.mode "distributed") (ne .Values.mode "standalone") -}} +minio: mode + Invalid mode selected. Valid values are "distributed" and + "standalone". Please set a valid mode (--set mode="xxxx") +{{- end -}} +{{- end -}} + +{{/* Validate values of MinIO - number of replicas must be even, greater than 4 and lower than 32 */}} +{{- define "minio.validateValues.replicaCount" -}} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- if and (eq .Values.mode "distributed") (or (eq (mod $replicaCount 2) 1) (lt $replicaCount 4) (gt $replicaCount 32)) -}} +minio: replicaCount + Number of replicas must be even, greater than 4 and lower than 32!! + Please set a valid number of replicas (--set statefulset.replicaCount=X) +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "minio.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.clientImage.repository) (not (.Values.clientImage.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.clientImage.repository }}:{{ .Values.clientImage.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "minio.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "minio.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "minio.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "minio.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} + +{{/* +Returns the proper service account name depending if an explicit service account name is set +in the values file. If the name is not set it will default to either minio.fullname if serviceAccount.create +is true or default otherwise. +*/}} +{{- define "minio.serviceAccountName" -}} + {{- if .Values.serviceAccount.create -}} + {{ default (include "minio.fullname" .) .Values.serviceAccount.name }} + {{- else -}} + {{ default "default" .Values.serviceAccount.name }} + {{- end -}} +{{- end -}} \ No newline at end of file diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml new file mode 100755 index 000000000..23a7232a8 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/deployment-standalone.yaml @@ -0,0 +1,160 @@ +{{- if eq .Values.mode "standalone" }} +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + {{- if .Values.deployment.updateStrategy }} + strategy: {{ toYaml .Values.deployment.updateStrategy | nindent 4 }} + {{- end }} + selector: + matchLabels: {{- include "minio.matchLabels" . | nindent 6 }} + template: + metadata: + labels: {{- include "minio.labels" . | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "minio.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} +{{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "minio.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "minio.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "minio.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.forceNewKeys | quote }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_ACCESS_KEY_FILE + value: "/opt/bitnami/minio/secrets/access-key" + {{- else }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: access-key + {{- end }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_SECRET_KEY_FILE + value: "/opt/bitnami/minio/secrets/secret-key" + {{- else }} + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: secret-key + {{- end }} + {{- if .Values.defaultBuckets }} + - name: MINIO_DEFAULT_BUCKETS + value: {{ .Values.defaultBuckets }} + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + {{- if .Values.prometheusAuthType }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.prometheusAuthType }} + {{- end }} + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + {{ if .Values.command }} + command: {{- toYaml .Values.command | nindent 12 }} + {{- end }} + ports: + - name: minio + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + path: /minio/health/live + port: minio + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + tcpSocket: + port: minio + {{- end }} + {{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: "data" + mountPath: {{ .Values.persistence.mountPath }} + volumes: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} + - name: data + {{- if .Values.persistence.enabled }} + persistentVolumeClaim: + claimName: {{ if .Values.persistence.existingClaim }}{{ .Values.persistence.existingClaim }}{{- else }}{{ include "minio.fullname" . }}{{- end }} + {{- else }} + emptyDir: {} + {{- end }} + {{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/ingress.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/ingress.yaml new file mode 100755 index 000000000..486feb5ac --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/ingress.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.ingress.enabled (not .Values.disableWebUI ) -}} +apiVersion: extensions/v1beta1 +kind: Ingress +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +{{- range $key, $value := .Values.ingress.labels }} + {{ $key }}: {{ $value }} +{{- end }} + annotations: + {{- if .Values.ingress.certManager }} + kubernetes.io/tls-acme: "true" + {{- end }} + {{- range $key, $value := .Values.ingress.annotations }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .name }} + http: + paths: + - path: {{ default "/" .path }} + backend: + serviceName: {{ include "minio.fullname" $ }} + servicePort: minio + {{- end }} + tls: + {{- range .Values.ingress.hosts }} + {{- if .tls }} + - hosts: + {{- if .tlsHosts }} + {{- range $host := .tlsHosts }} + - {{ $host }} + {{- end }} + {{- else }} + - {{ .name }} + {{- end }} + secretName: {{ .tlsSecret }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/networkpolicy.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/networkpolicy.yaml new file mode 100755 index 000000000..cffc5be13 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/networkpolicy.yaml @@ -0,0 +1,23 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: networking.k8s.io/v1 +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + podSelector: + matchLabels: + app.kubernetes.io/name: {{ include "minio.name" . }} + app.kubernetes.io/instance: {{ .Release.Name }} + ingress: + # Allow inbound connections + - ports: + - port: 9000 + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ include "minio.fullname" . }}-client: "true" + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml new file mode 100755 index 000000000..8e4be7da4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/pvc-standalone.yaml @@ -0,0 +1,17 @@ +{{- if and .Values.persistence.enabled (not .Values.persistence.existingClaim) (eq .Values.mode "standalone") }} +kind: PersistentVolumeClaim +apiVersion: v1 +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "minio.storageClass" . }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/secrets.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/secrets.yaml new file mode 100755 index 000000000..1a813eeda --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/secrets.yaml @@ -0,0 +1,12 @@ +{{- if (include "minio.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +type: Opaque +data: + access-key: {{ include "minio.accessKey" . | b64enc | quote }} + secret-key: {{ include "minio.secretKey" . | b64enc | quote }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/service.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/service.yaml new file mode 100755 index 000000000..7a6c380de --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/service.yaml @@ -0,0 +1,24 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} + {{- if .Values.service.annotations }} + annotations: {{- include "minio.tplValue" ( dict "value" .Values.service.annotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.service.type }} + {{- if and (eq .Values.service.type "LoadBalancer") (not (empty .Values.service.loadBalancerIP)) }} + loadBalancerIP: {{ .Values.service.loadBalancerIP }} + {{- end }} + ports: + - name: minio + port: {{ .Values.service.port }} + targetPort: minio + {{- if and (or (eq .Values.service.type "NodePort") (eq .Values.service.type "LoadBalancer")) (not (empty .Values.service.nodePort)) }} + nodePort: {{ .Values.service.nodePort }} + {{- else if eq .Values.service.type "ClusterIP" }} + nodePort: null + {{- end }} + selector: {{- include "minio.matchLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/serviceaccount.yaml new file mode 100755 index 000000000..76efe62f0 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/serviceaccount.yaml @@ -0,0 +1,10 @@ +{{- if .Values.serviceAccount.create }} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "minio.serviceAccountName" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +secrets: + - name: {{ include "minio.fullname" . }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/statefulset.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/statefulset.yaml new file mode 100755 index 000000000..7add4501f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/statefulset.yaml @@ -0,0 +1,181 @@ +{{- if eq .Values.mode "distributed" }} +{{- $replicaCount := int .Values.statefulset.replicaCount }} +{{- if and (eq (mod $replicaCount 2) 0) (gt $replicaCount 3) (lt $replicaCount 33) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ include "minio.fullname" . }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + selector: + matchLabels: {{- include "minio.matchLabels" . | nindent 6 }} + serviceName: {{ include "minio.fullname" . }}-headless + replicas: {{ .Values.statefulset.replicaCount }} + podManagementPolicy: {{ .Values.statefulset.podManagementPolicy }} + updateStrategy: + type: {{ .Values.statefulset.updateStrategy }} + {{- if (eq "Recreate" .Values.statefulset.updateStrategy) }} + rollingUpdate: null + {{- end }} + template: + metadata: + labels: {{- include "minio.labels" . | nindent 8 }} + {{- if or .Values.podAnnotations (include "minio.createSecret" .) }} + annotations: + {{- if (include "minio.createSecret" .) }} + checksum/credentials-secret: {{ include (print $.Template.BasePath "/secrets.yaml") . | sha256sum }} + {{- end }} + {{- if .Values.podAnnotations }} + {{- include "minio.tplValue" ( dict "value" .Values.podAnnotations "context" $) | nindent 8 }} + {{- end }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: {{ .Values.schedulerName }} + {{- end }} + serviceAccountName: {{ template "minio.serviceAccountName" . }} +{{- include "minio.imagePullSecrets" . | nindent 6 }} + {{- if .Values.affinity }} + affinity: {{- include "minio.tplValue" (dict "value" .Values.affinity "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.nodeSelector }} + nodeSelector: {{- include "minio.tplValue" (dict "value" .Values.nodeSelector "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.tolerations }} + tolerations: {{- include "minio.tplValue" (dict "value" .Values.tolerations "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: + fsGroup: {{ .Values.securityContext.fsGroup }} + {{- end }} + {{- if and .Values.volumePermissions.enabled .Values.persistence.enabled }} + initContainers: + - name: volume-permissions + image: {{ template "minio.volumePermissions.image" . }} + imagePullPolicy: {{ default "" .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + chown -R {{ .Values.securityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + securityContext: + runAsUser: 0 + {{- if .Values.volumePermissions.resources }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + {{- end }} + volumeMounts: + - name: data + mountPath: {{ .Values.persistence.mountPath }} + {{- end }} + containers: + - name: minio + image: {{ include "minio.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.securityContext.enabled }} + securityContext: + runAsUser: {{ .Values.securityContext.runAsUser }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: MINIO_DISTRIBUTED_MODE_ENABLED + value: "yes" + - name: MINIO_SKIP_CLIENT + value: "yes" + - name: MINIO_DISTRIBUTED_NODES + {{- $minioFullname := include "minio.fullname" . }} + {{- $minioHeadlessServiceName := printf "%s-%s" $minioFullname "headless" | trunc 63 }} + {{- $releaseNamespace := .Release.Namespace }} + value: {{range $i, $e := until $replicaCount }}{{ $minioFullname }}-{{ $e }}.{{ $minioHeadlessServiceName }}.{{ $releaseNamespace }}.svc.{{ $.Values.clusterDomain }},{{ end }} + - name: MINIO_FORCE_NEW_KEYS + value: {{ ternary "yes" "no" .Values.forceNewKeys | quote }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_ACCESS_KEY_FILE + value: "/opt/bitnami/minio/secrets/access-key" + {{- else }} + - name: MINIO_ACCESS_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: access-key + {{- end }} + {{- if .Values.useCredentialsFile }} + - name: MINIO_SECRET_KEY_FILE + value: "/opt/bitnami/minio/secrets/secret-key" + {{- else }} + - name: MINIO_SECRET_KEY + valueFrom: + secretKeyRef: + name: {{ include "minio.secretName" . }} + key: secret-key + {{- end }} + - name: MINIO_BROWSER + value: {{ ternary "off" "on" .Values.disableWebUI | quote }} + {{- if .Values.prometheusAuthType }} + - name: MINIO_PROMETHEUS_AUTH_TYPE + value: {{ .Values.prometheusAuthType }} + {{- end }} + {{- if .Values.extraEnv }} + {{- toYaml .Values.extraEnv | nindent 12 }} + {{- end }} + ports: + - name: minio + containerPort: 9000 + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + httpGet: + path: /minio/health/live + port: minio + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + tcpSocket: + port: minio + {{- end }} + {{- if .Values.resources }} + resources: {{ toYaml .Values.resources | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + mountPath: /opt/bitnami/minio/secrets/ + {{- end }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + volumes: + {{- if .Values.useCredentialsFile }} + - name: minio-credentials + secret: + secretName: {{ include "minio.secretName" . }} + {{- end }} +{{- if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else }} + volumeClaimTemplates: + - metadata: + name: data + labels: {{- include "minio.matchLabels" . | nindent 10 }} + {{- if .Values.persistence.annotations }} + annotations: {{- include "minio.tplValue" ( dict "value" .Values.persistence.annotations "context" $) | nindent 10 }} + {{- end }} + spec: + accessModes: {{ toYaml .Values.persistence.accessModes | nindent 10 }} + {{ include "minio.storageClass" . }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} +{{- end }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/svc-headless.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/svc-headless.yaml new file mode 100755 index 000000000..41a4cf507 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/svc-headless.yaml @@ -0,0 +1,16 @@ +{{- if eq .Values.mode "distributed" }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "minio.fullname" . }}-headless + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" . | nindent 4 }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: minio + port: {{ .Values.service.port }} + targetPort: minio + selector: {{- include "minio.matchLabels" . | nindent 4 }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/templates/tls-secrets.yaml b/scripts/helm/helmcharts/databases/charts/minio/templates/tls-secrets.yaml new file mode 100755 index 000000000..68b31d324 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/templates/tls-secrets.yaml @@ -0,0 +1,14 @@ +{{- if .Values.ingress.enabled }} +{{- range .Values.ingress.secrets }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ .name }} + namespace: {{ .Release.Namespace }} + labels: {{- include "minio.labels" $ | nindent 4 }} +type: kubernetes.io/tls +data: + tls.crt: {{ .certificate | b64enc }} + tls.key: {{ .key | b64enc }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/minio/values-production.yaml b/scripts/helm/helmcharts/databases/charts/minio/values-production.yaml new file mode 100755 index 000000000..d4d639732 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/values-production.yaml @@ -0,0 +1,389 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + minio: {} +# minio: +# existingSecret: "" +# accessKey: "" +# secretKey: "" +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MinIO image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2020.10.9-debian-10-r6 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override minio.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override minio.fullname template +## +# fullnameOverride: + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Cluster domain +## +clusterDomain: cluster.local + +## Bitnami MinIO Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2020.10.3-debian-10-r9 + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MinIO server mode. Allowed values: standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: distributed + +## MinIO deployment parameters +## Only when mode is 'standalone' +## +deployment: + ## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods is destroyed first. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## Example: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate + +## MinIO statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## Update strategy, can be set to RollingUpdate or OnDelete by default. + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + updateStrategy: RollingUpdate + + ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + + ## Number of replicas, it must even and greater than 4 + ## + replicaCount: 4 + +## Use existing secret (ignores accessKey, and secretKey passwords) +## +# existingSecret: + +## Mount MinIO secret as a file instead of passing environment variable +## +useCredentialsFile: false + +## Force reconfiguring new keys whenever the credentials change +## +forceNewKeys: false + +## MinIO credentials +## +accessKey: + ## MinIO Access Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false +secretKey: + ## MinIO Secret Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + +## Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## +# defaultBuckets: "my-bucket, my-second-bucket" + +## Disable MinIO Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: true + +## Define custom environment variables to pass to the image here +## +extraEnv: {} + +## Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/path: "/minio/prometheus/metric" + prometheus.io/port: "9000" + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## MinIO containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: + cpu: 250m + memory: 256Mi + +## MinIO containers' liveness and readiness probes +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + + ## Enable persistence using an existing PVC (only in standalone mode) + ## + # existingClaim: + + ## Data volume mount path + ## + mountPath: /data + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + + ## Persistent Volume size + ## + size: 8Gi + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Persistent Volume Claim annotations + ## + annotations: {} + +## MinIO Service properties +## +service: + ## MinIO Service type + ## + type: ClusterIP + + ## MinIO Service port + ## + port: 9000 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## MinIO web browser. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + # annotations: + # kubernetes.io/ingress.class: nginx + + ## Ingress additional labels done as key:value pairs + labels: {} + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: minio.local + path: / + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## Optionally specify the TLS hosts for the ingress record + ## Useful when the Ingress controller supports www-redirection + ## If not specified, the above host name will be used + # tlsHosts: + # - www.minio.local + # - minio.local + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: minio.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: minio.local-tls + # key: + # certificate: + +## NetworkPolicy parameters +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port MinIO is listening + ## on. When true, MinIO will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: false + +## MinIO supports two authentication modes for Prometheus either jwt or public, by default MinIO runs in jwt mode. +## To allow public access without authentication for prometheus metrics set environment as follows. +prometheusAuthType: jwt diff --git a/scripts/helm/helmcharts/databases/charts/minio/values.yaml b/scripts/helm/helmcharts/databases/charts/minio/values.yaml new file mode 100755 index 000000000..8aee06beb --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/minio/values.yaml @@ -0,0 +1,391 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + minio: {} +# accessKey: "minios3AccessKeyS3cr3t" +# secretKey: "m1n10s3CretK3yPassw0rd" +# minio: {} +# existingSecret: "" +# accessKey: "" +# secretKey: "" +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami MinIO image version +## ref: https://hub.docker.com/r/bitnami/minio/tags/ +## +image: + registry: docker.io + repository: bitnami/minio + tag: 2020.10.9-debian-10-r6 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + debug: false + +## String to partially override minio.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override minio.fullname template +## +# fullnameOverride: + +## Scheduler name +## https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: stork + +serviceAccount: + # Specifies whether a ServiceAccount should be created + create: true + # The name of the ServiceAccount to use. + # If not set and create is true, a name is generated using the fullname template + name: + +## Cluster domain +## +clusterDomain: cluster.local + +## Bitnami MinIO Client image version +## ref: https://hub.docker.com/r/bitnami/minio-client/tags/ +## +clientImage: + registry: docker.io + repository: bitnami/minio-client + tag: 2020.10.3-debian-10-r9 + +## Init containers parameters: +## volumePermissions: Change the owner and group of the persistent volume mountpoint to runAsUser:fsGroup values from the securityContext section. +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container' resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 100m + # memory: 128Mi + requests: {} + # cpu: 100m + # memory: 128Mi + +## MinIO server mode. Allowed values: standalone or distributed. +## Distributed Minio ref: https://docs.minio.io/docs/distributed-minio-quickstart-guide +## +mode: standalone + +## MinIO deployment parameters +## Only when mode is 'standalone' +## +deployment: + ## Set to Recreate if you use persistent volume that cannot be mounted by more than one pods to makesure the pods is destroyed first. + ## ref: https://kubernetes.io/docs/concepts/workloads/controllers/deployment/#strategy + ## Example: + ## updateStrategy: + ## type: RollingUpdate + ## rollingUpdate: + ## maxSurge: 25% + ## maxUnavailable: 25% + ## + updateStrategy: + type: Recreate + +## MinIO statefulset parameters +## Only when mode is 'distributed' +## +statefulset: + ## Update strategy, can be set to RollingUpdate or OnDelete by default. + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + updateStrategy: RollingUpdate + + ## StatefulSet controller supports relax its ordering guarantees while preserving its uniqueness and identity guarantees. There are two valid pod management policies: OrderedReady and Parallel + ## ref: https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#pod-management-policy + ## + podManagementPolicy: Parallel + + ## Number of replicas, it must even and greater than 4 + ## + replicaCount: 4 + +## Use existing secret (ignores accessKey, and secretKey passwords) +## +# existingSecret: + +## Mount MinIO secret as a file instead of passing environment variable +## +useCredentialsFile: false + +## Force reconfiguring new keys whenever the credentials change +## +forceNewKeys: false + +## MinIO credentials +## +accessKey: + ## MinIO Access Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false +secretKey: + ## MinIO Secret Key + ## ref: https://github.com/bitnami/bitnami-docker-minio/#setting-up-minio-in-distributed-mode + ## + password: + ## Option to force users to specify a password. That is required for 'helm upgrade' to work properly. + ## If it is not force, a random password will be generated. + ## + forcePassword: false + +## Comma, semi-colon or space separated list of buckets to create at initialization (only in standalone mode) +## +# defaultBuckets: "my-bucket, my-second-bucket" + +## Disable MinIO Web UI +## ref: https://github.com/minio/minio/tree/master/docs/config/#browser +## +disableWebUI: false + +## Define custom environment variables to pass to the image here +## +extraEnv: {} + +## Define a custom command for the minio container +command: {} + +## Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + +## Pod annotations +## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/annotations/ +## +podAnnotations: {} + +## Affinity for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity +## +affinity: {} + +## Node labels for pod assignment. Evaluated as a template. +## Ref: https://kubernetes.io/docs/user-guide/node-selection/ +## +nodeSelector: {} + +## Tolerations for pod assignment +## Ref: https://kubernetes.io/docs/concepts/configuration/taint-and-toleration/ +## +tolerations: [] + +## MinIO containers' resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + limits: {} + # cpu: 250m + # memory: 256Mi + requests: {} + # cpu: 250m + # memory: 256Mi + +## MinIO containers' liveness and readiness probes +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-lifecycle/#container-probes +## +livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + +## Enable persistence using Persistent Volume Claims +## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ +## +persistence: + ## If true, use a Persistent Volume Claim, If false, use emptyDir + ## + enabled: true + + ## Enable persistence using an existing PVC (only in standalone mode) + ## + # existingClaim: + + ## Data volume mount path + ## + mountPath: /data + + ## Persistent Volume Access Mode + ## + accessModes: + - ReadWriteOnce + + ## Persistent Volume size + ## + size: 50Gi + + ## Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + + ## Persistent Volume Claim annotations + ## + annotations: {} + +## MinIO Service properties +## +service: + ## MinIO Service type + ## + type: ClusterIP + + ## MinIO Service port + ## + port: 9000 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + + ## loadBalancerIP for the PrestaShop Service (optional, cloud specific) + ## ref: http://kubernetes.io/docs/user-guide/services/#type-loadbalancer + ## + # loadBalancerIP: + +## Configure the ingress resource that allows you to access the +## MinIO web browser. Set up the URL +## ref: http://kubernetes.io/docs/user-guide/ingress/ +## +ingress: + ## Set to true to enable ingress record generation + enabled: false + + ## Set this to true in order to add the corresponding annotations for cert-manager + certManager: false + + ## Ingress annotations done as key:value pairs + ## For a full list of possible ingress annotations, please see + ## ref: https://github.com/kubernetes/ingress-nginx/blob/master/docs/user-guide/nginx-configuration/annotations.md + ## + ## If tls is set to true, annotation ingress.kubernetes.io/secure-backends: "true" will automatically be set + ## If certManager is set to true, annotation kubernetes.io/tls-acme: "true" will automatically be set + # annotations: + # kubernetes.io/ingress.class: nginx + + ## Ingress additional labels done as key:value pairs + labels: {} + + ## The list of hostnames to be covered with this ingress record. + ## Most likely this will be just one host, but in the event more hosts are needed, this is an array + hosts: + - name: minio.local + path: / + + ## Set this to true in order to enable TLS on the ingress record + tls: false + + ## Optionally specify the TLS hosts for the ingress record + ## Useful when the Ingress controller supports www-redirection + ## If not specified, the above host name will be used + # tlsHosts: + # - www.minio.local + # - minio.local + + ## If TLS is set to true, you must declare what secret will store the key/certificate for TLS + tlsSecret: minio.local-tls + + secrets: + ## If you're providing your own certificates, please use this to add the certificates as secrets + ## key and certificate should start with -----BEGIN CERTIFICATE----- or + ## -----BEGIN RSA PRIVATE KEY----- + ## + ## name should line up with a tlsSecret set further up + ## If you're using cert-manager, this is unneeded, as it will create the secret for you if it is not set + ## + ## It is also possible to create and manage the certificates outside of this helm chart + ## Please see README.md for more information + # - name: minio.local-tls + # key: + # certificate: + +## NetworkPolicy parameters +## +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port MinIO is listening + ## on. When true, MinIO will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + +## MinIO supports two authentication modes for Prometheus either jwt or public, by default MinIO runs in jwt mode. +## To allow public access without authentication for prometheus metrics set environment as follows. +prometheusAuthType: public diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/.helmignore b/scripts/helm/helmcharts/databases/charts/postgresql/.helmignore new file mode 100755 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/Chart.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/Chart.yaml new file mode 100755 index 000000000..3ac2d3605 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/Chart.yaml @@ -0,0 +1,26 @@ +annotations: + category: Database +apiVersion: v1 +appVersion: 11.9.0 +description: Chart for PostgreSQL, an object-relational database management system + (ORDBMS) with an emphasis on extensibility and on standards-compliance. +engine: gotpl +home: https://github.com/bitnami/charts/tree/master/bitnami/postgresql +icon: https://bitnami.com/assets/stacks/postgresql/img/postgresql-stack-110x117.png +keywords: +- postgresql +- postgres +- database +- sql +- replication +- cluster +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: postgresql +sources: +- https://github.com/bitnami/bitnami-docker-postgresql +- https://www.postgresql.org/ +version: 9.8.2 diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/README.md b/scripts/helm/helmcharts/databases/charts/postgresql/README.md new file mode 100755 index 000000000..8cdb2ca13 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/README.md @@ -0,0 +1,707 @@ +# PostgreSQL + +[PostgreSQL](https://www.postgresql.org/) is an object-relational database management system (ORDBMS) with an emphasis on extensibility and on standards-compliance. + +For HA, please see [this repo](https://github.com/bitnami/charts/tree/master/bitnami/postgresql-ha) + +## TL;DR + +```console +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/postgresql +``` + +## Introduction + +This chart bootstraps a [PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ +- PV provisioner support in the underlying infrastructure + +## Installing the Chart +To install the chart with the release name `my-release`: + +```console +$ helm install my-release bitnami/postgresql +``` + +The command deploys PostgreSQL on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```console +$ helm delete my-release +``` + +The command removes all the Kubernetes components but PVC's associated with the chart and deletes the release. + +To delete the PVC's associated with `my-release`: + +```console +$ kubectl delete pvc -l release=my-release +``` + +> **Note**: Deleting the PVC's will delete postgresql data as well. Please be cautious before doing it. + +## Parameters + +The following tables lists the configurable parameters of the PostgreSQL chart and their default values. + +| Parameter | Description | Default | +|-----------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------------| +| `global.imageRegistry` | Global Docker Image registry | `nil` | +| `global.postgresql.postgresqlDatabase` | PostgreSQL database (overrides `postgresqlDatabase`) | `nil` | +| `global.postgresql.postgresqlUsername` | PostgreSQL username (overrides `postgresqlUsername`) | `nil` | +| `global.postgresql.existingSecret` | Name of existing secret to use for PostgreSQL passwords (overrides `existingSecret`) | `nil` | +| `global.postgresql.postgresqlPassword` | PostgreSQL admin password (overrides `postgresqlPassword`) | `nil` | +| `global.postgresql.servicePort` | PostgreSQL port (overrides `service.port`) | `nil` | +| `global.postgresql.replicationPassword` | Replication user password (overrides `replication.password`) | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `image.registry` | PostgreSQL Image registry | `docker.io` | +| `image.repository` | PostgreSQL Image name | `bitnami/postgresql` | +| `image.tag` | PostgreSQL Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | PostgreSQL Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `image.debug` | Specify if debug values should be set | `false` | +| `nameOverride` | String to partially override postgresql.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override postgresql.fullname template with a string | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the data directory (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `volumePermissions.securityContext.runAsUser` | User ID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | `0` | +| `usePasswordFile` | Have the secrets mounted as a file instead of env vars | `false` | +| `ldap.enabled` | Enable LDAP support | `false` | +| `ldap.existingSecret` | Name of existing secret to use for LDAP passwords | `nil` | +| `ldap.url` | LDAP URL beginning in the form `ldap[s]://host[:port]/basedn[?[attribute][?[scope][?[filter]]]]` | `nil` | +| `ldap.server` | IP address or name of the LDAP server. | `nil` | +| `ldap.port` | Port number on the LDAP server to connect to | `nil` | +| `ldap.scheme` | Set to `ldaps` to use LDAPS. | `nil` | +| `ldap.tls` | Set to `1` to use TLS encryption | `nil` | +| `ldap.prefix` | String to prepend to the user name when forming the DN to bind | `nil` | +| `ldap.suffix` | String to append to the user name when forming the DN to bind | `nil` | +| `ldap.search_attr` | Attribute to match agains the user name in the search | `nil` | +| `ldap.search_filter` | The search filter to use when doing search+bind authentication | `nil` | +| `ldap.baseDN` | Root DN to begin the search for the user in | `nil` | +| `ldap.bindDN` | DN of user to bind to LDAP | `nil` | +| `ldap.bind_password` | Password for the user to bind to LDAP | `nil` | +| `replication.enabled` | Enable replication | `false` | +| `replication.user` | Replication user | `repl_user` | +| `replication.password` | Replication user password | `repl_password` | +| `replication.slaveReplicas` | Number of slaves replicas | `1` | +| `replication.synchronousCommit` | Set synchronous commit mode. Allowed values: `on`, `remote_apply`, `remote_write`, `local` and `off` | `off` | +| `replication.numSynchronousReplicas` | Number of replicas that will have synchronous replication. Note: Cannot be greater than `replication.slaveReplicas`. | `0` | +| `replication.applicationName` | Cluster application name. Useful for advanced replication settings | `my_application` | +| `existingSecret` | Name of existing secret to use for PostgreSQL passwords. The secret has to contain the keys `postgresql-password` which is the password for `postgresqlUsername` when it is different of `postgres`, `postgresql-postgres-password` which will override `postgresqlPassword`, `postgresql-replication-password` which will override `replication.password` and `postgresql-ldap-password` which will be sed to authenticate on LDAP. The value is evaluated as a template. | `nil` | +| `postgresqlPostgresPassword` | PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`, in which case`postgres` is the admin username). | _random 10 character alphanumeric string_ | +| `postgresqlUsername` | PostgreSQL user (creates a non-admin user when `postgresqlUsername` is not `postgres`) | `postgres` | +| `postgresqlPassword` | PostgreSQL user password | _random 10 character alphanumeric string_ | +| `postgresqlDatabase` | PostgreSQL database | `nil` | +| `postgresqlDataDir` | PostgreSQL data dir folder | `/bitnami/postgresql` (same value as persistence.mountPath) | +| `extraEnv` | Any extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `[]` | +| `extraEnvVarsCM` | Name of a Config Map containing extra environment variables you would like to pass on to the pod. The value is evaluated as a template. | `nil` | +| `postgresqlInitdbArgs` | PostgreSQL initdb extra arguments | `nil` | +| `postgresqlInitdbWalDir` | PostgreSQL location for transaction log | `nil` | +| `postgresqlConfiguration` | Runtime Config Parameters | `nil` | +| `postgresqlExtendedConf` | Extended Runtime Config Parameters (appended to main or default configuration) | `nil` | +| `pgHbaConfiguration` | Content of pg_hba.conf | `nil (do not create pg_hba.conf)` | +| `postgresqlSharedPreloadLibraries` | Shared preload libraries (comma-separated list) | `pgaudit` | +| `postgresqlMaxConnections` | Maximum total connections | `nil` | +| `postgresqlPostgresConnectionLimit` | Maximum total connections for the postgres user | `nil` | +| `postgresqlDbUserConnectionLimit` | Maximum total connections for the non-admin user | `nil` | +| `postgresqlTcpKeepalivesInterval` | TCP keepalives interval | `nil` | +| `postgresqlTcpKeepalivesIdle` | TCP keepalives idle | `nil` | +| `postgresqlTcpKeepalivesCount` | TCP keepalives count | `nil` | +| `postgresqlStatementTimeout` | Statement timeout | `nil` | +| `postgresqlPghbaRemoveFilters` | Comma-separated list of patterns to remove from the pg_hba.conf file | `nil` | +| `customLivenessProbe` | Override default liveness probe | `nil` | +| `customReadinessProbe` | Override default readiness probe | `nil` | +| `audit.logHostname` | Add client hostnames to the log file | `false` | +| `audit.logConnections` | Add client log-in operations to the log file | `false` | +| `audit.logDisconnections` | Add client log-outs operations to the log file | `false` | +| `audit.pgAuditLog` | Add operations to log using the pgAudit extension | `nil` | +| `audit.clientMinMessages` | Message log level to share with the user | `nil` | +| `audit.logLinePrefix` | Template string for the log line prefix | `nil` | +| `audit.logTimezone` | Timezone for the log timestamps | `nil` | +| `configurationConfigMap` | ConfigMap with the PostgreSQL configuration files (Note: Overrides `postgresqlConfiguration` and `pgHbaConfiguration`). The value is evaluated as a template. | `nil` | +| `extendedConfConfigMap` | ConfigMap with the extended PostgreSQL configuration files. The value is evaluated as a template. | `nil` | +| `initdbScripts` | Dictionary of initdb scripts | `nil` | +| `initdbUser` | PostgreSQL user to execute the .sql and sql.gz scripts | `nil` | +| `initdbPassword` | Password for the user specified in `initdbUser` | `nil` | +| `initdbScriptsConfigMap` | ConfigMap with the initdb scripts (Note: Overrides `initdbScripts`). The value is evaluated as a template. | `nil` | +| `initdbScriptsSecret` | Secret with initdb scripts that contain sensitive information (Note: can be used with `initdbScriptsConfigMap` or `initdbScripts`). The value is evaluated as a template. | `nil` | +| `service.type` | Kubernetes Service type | `ClusterIP` | +| `service.port` | PostgreSQL port | `5432` | +| `service.nodePort` | Kubernetes Service nodePort | `nil` | +| `service.annotations` | Annotations for PostgreSQL service | `{}` (evaluated as a template) | +| `service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` | +| `service.loadBalancerSourceRanges` | Address that are allowed when svc is LoadBalancer | `[]` (evaluated as a template) | +| `schedulerName` | Name of the k8s scheduler (other than default) | `nil` | +| `shmVolume.enabled` | Enable emptyDir volume for /dev/shm for master and slave(s) Pod(s) | `true` | +| `shmVolume.chmod.enabled` | Run at init chmod 777 of the /dev/shm (ignored if `volumePermissions.enabled` is `false`) | `true` | +| `persistence.enabled` | Enable persistence using PVC | `true` | +| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template. | `nil` | +| `persistence.mountPath` | Path to mount the volume at | `/bitnami/postgresql` | +| `persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `persistence.storageClass` | PVC Storage Class for PostgreSQL volume | `nil` | +| `persistence.accessModes` | PVC Access Mode for PostgreSQL volume | `[ReadWriteOnce]` | +| `persistence.size` | PVC Storage Request for PostgreSQL volume | `8Gi` | +| `persistence.annotations` | Annotations for the PVC | `{}` | +| `commonAnnotations` | Annotations to be added to all deployed resources (rendered as a template) | `{}` | +| `master.nodeSelector` | Node labels for pod assignment (postgresql master) | `{}` | +| `master.affinity` | Affinity labels for pod assignment (postgresql master) | `{}` | +| `master.tolerations` | Toleration labels for pod assignment (postgresql master) | `[]` | +| `master.anotations` | Map of annotations to add to the statefulset (postgresql master) | `{}` | +| `master.labels` | Map of labels to add to the statefulset (postgresql master) | `{}` | +| `master.podAnnotations` | Map of annotations to add to the pods (postgresql master) | `{}` | +| `master.podLabels` | Map of labels to add to the pods (postgresql master) | `{}` | +| `master.priorityClassName` | Priority Class to use for each pod (postgresql master) | `nil` | +| `master.extraInitContainers` | Additional init containers to add to the pods (postgresql master) | `[]` | +| `master.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql master) | `[]` | +| `master.extraVolumes` | Additional volumes to add to the pods (postgresql master) | `[]` | +| `master.sidecars` | Add additional containers to the pod | `[]` | +| `master.service.type` | Allows using a different service type for Master | `nil` | +| `master.service.nodePort` | Allows using a different nodePort for Master | `nil` | +| `master.service.clusterIP` | Allows using a different clusterIP for Master | `nil` | +| `masterAsStandBy.enabled` | Whether to enable current cluster's Master as standby server of another cluster or not. | `false` | +| `masterAsStandBy.masterHost` | The Host of replication Master in the other cluster. | `nil` | +| `masterAsStandBy.masterPort ` | The Port of replication Master in the other cluster. | `nil` | +| `slave.nodeSelector` | Node labels for pod assignment (postgresql slave) | `{}` | +| `slave.affinity` | Affinity labels for pod assignment (postgresql slave) | `{}` | +| `slave.tolerations` | Toleration labels for pod assignment (postgresql slave) | `[]` | +| `slave.anotations` | Map of annotations to add to the statefulsets (postgresql slave) | `{}` | +| `slave.resources` | CPU/Memory resource requests/limits override for slaves. Will fallback to `values.resources` if not defined. | `{}` | +| `slave.labels` | Map of labels to add to the statefulsets (postgresql slave) | `{}` | +| `slave.podAnnotations` | Map of annotations to add to the pods (postgresql slave) | `{}` | +| `slave.podLabels` | Map of labels to add to the pods (postgresql slave) | `{}` | +| `slave.priorityClassName` | Priority Class to use for each pod (postgresql slave) | `nil` | +| `slave.extraInitContainers` | Additional init containers to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumeMounts` | Additional volume mounts to add to the pods (postgresql slave) | `[]` | +| `slave.extraVolumes` | Additional volumes to add to the pods (postgresql slave) | `[]` | +| `slave.sidecars` | Add additional containers to the pod | `[]` | +| `slave.service.type` | Allows using a different service type for Slave | `nil` | +| `slave.service.nodePort` | Allows using a different nodePort for Slave | `nil` | +| `slave.service.clusterIP` | Allows using a different clusterIP for Slave | `nil` | +| `slave.persistence.enabled` | Whether to enable slave replicas persistence | `true` | +| `terminationGracePeriodSeconds` | Seconds the pod needs to terminate gracefully | `nil` | +| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context | `true` | +| `securityContext.fsGroup` | Group ID for the pod | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable container security context | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container | `1001` | +| `serviceAccount.enabled` | Enable service account (Note: Service Account will only be automatically created if `serviceAccount.name` is not set) | `false` | +| `serviceAccount.name` | Name of existing service account | `nil` | +| `livenessProbe.enabled` | Would you like a livenessProbe to be enabled | `true` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.explicitNamespacesSelector` | A Kubernetes LabelSelector to explicitly select namespaces from which ingress traffic could be allowed | `{}` | +| `livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated | 5 | +| `readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `tls.enabled` | Enable TLS traffic support | `false` | +| `tls.preferServerCiphers` | Whether to use the server's TLS cipher preferences rather than the client's | `true` | +| `tls.certificatesSecret` | Name of an existing secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `""` | +| `tls.certKeyFilename` | Certificate key filename | `""` | +| `tls.certCAFilename` | CA Certificate filename. If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate. | `nil` | +| `tls.crlFilename` | File containing a Certificate Revocation List | `nil` | +| `metrics.enabled` | Start a prometheus exporter | `false` | +| `metrics.service.type` | Kubernetes Service type | `ClusterIP` | +| `service.clusterIP` | Static clusterIP or None for headless services | `nil` | +| `metrics.service.annotations` | Additional annotations for metrics exporter pod | `{ prometheus.io/scrape: "true", prometheus.io/port: "9187"}` | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.serviceMonitor.enabled` | Set this to `true` to create ServiceMonitor for Prometheus operator | `false` | +| `metrics.serviceMonitor.additionalLabels` | Additional labels that can be used so ServiceMonitor will be discovered by Prometheus | `{}` | +| `metrics.serviceMonitor.namespace` | Optional namespace in which to create ServiceMonitor | `nil` | +| `metrics.serviceMonitor.interval` | Scrape interval. If not set, the Prometheus default scrape interval is used | `nil` | +| `metrics.serviceMonitor.scrapeTimeout` | Scrape timeout. If not set, the Prometheus default scrape timeout is used | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | the same namespace as postgresql | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.image.registry` | PostgreSQL Exporter Image registry | `docker.io` | +| `metrics.image.repository` | PostgreSQL Exporter Image name | `bitnami/postgres-exporter` | +| `metrics.image.tag` | PostgreSQL Exporter Image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | PostgreSQL Exporter Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify Image pull secrets | `nil` (does not add image pull secrets to deployed pods) | +| `metrics.customMetrics` | Additional custom metrics | `nil` | +| `metrics.extraEnvVars` | Extra environment variables to add to exporter | `{}` (evaluated as a template) | +| `metrics.securityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `metrics.securityContext.enabled` | Enable security context for metrics | `false` | +| `metrics.securityContext.runAsUser` | User ID for the container for metrics | `1001` | +| `metrics.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 30 | +| `metrics.livenessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.livenessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `metrics.readinessProbe.enabled` | would you like a readinessProbe to be enabled | `true` | +| `metrics.readinessProbe.initialDelaySeconds` | Delay before liveness probe is initiated | 5 | +| `metrics.readinessProbe.periodSeconds` | How often to perform the probe | 10 | +| `metrics.readinessProbe.timeoutSeconds` | When the probe times out | 5 | +| `metrics.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | 6 | +| `metrics.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed | 1 | +| `updateStrategy` | Update strategy policy | `{type: "RollingUpdate"}` | +| `psp.create` | Create Pod Security Policy | `false` | +| `rbac.create` | Create Role and RoleBinding (required for PSP to work) | `false` | +| `extraDeploy` | Array of extra objects to deploy with the release (evaluated as a template). | `nil` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```console +$ helm install my-release \ + --set postgresqlPassword=secretpassword,postgresqlDatabase=my-database \ + bitnami/postgresql +``` + +The above command sets the PostgreSQL `postgres` account password to `secretpassword`. Additionally it creates a database named `my-database`. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```console +$ helm install my-release -f values.yaml bitnami/postgresql +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Production configuration and horizontal scaling + +This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one. + +- Enable replication: +```diff +- replication.enabled: false ++ replication.enabled: true +``` + +- Number of slaves replicas: +```diff +- replication.slaveReplicas: 1 ++ replication.slaveReplicas: 2 +``` + +- Set synchronous commit mode: +```diff +- replication.synchronousCommit: "off" ++ replication.synchronousCommit: "on" +``` + +- Number of replicas that will have synchronous replication: +```diff +- replication.numSynchronousReplicas: 0 ++ replication.numSynchronousReplicas: 1 +``` + +- Start a prometheus exporter: +```diff +- metrics.enabled: false ++ metrics.enabled: true +``` + +To horizontally scale this chart, you can use the `--replicas` flag to modify the number of nodes in your PostgreSQL deployment. Also you can use the `values-production.yaml` file or modify the parameters shown above. + +### Customizing Master and Slave services in a replicated configuration + +At the top level, there is a service object which defines the services for both master and slave. For deeper customization, there are service objects for both the master and slave types individually. This allows you to override the values in the top level service object so that the master and slave can be of different service types and with different clusterIPs / nodePorts. Also in the case you want the master and slave to be of type nodePort, you will need to set the nodePorts to different values to prevent a collision. The values that are deeper in the master.service or slave.service objects will take precedence over the top level service object. + +### Change PostgreSQL version + +To modify the PostgreSQL version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/postgresql/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### postgresql.conf / pg_hba.conf files as configMap + +This helm chart also supports to customize the whole configuration file. + +Add your custom file to "files/postgresql.conf" in your working directory. This file will be mounted as configMap to the containers and it will be used for configuring the PostgreSQL server. + +Alternatively, you can specify PostgreSQL configuration parameters using the `postgresqlConfiguration` parameter as a dict, using camelCase, e.g. {"sharedBuffers": "500MB"}. + +In addition to these options, you can also set an external ConfigMap with all the configuration files. This is done by setting the `configurationConfigMap` parameter. Note that this will override the two previous options. + +### Allow settings to be loaded from files other than the default `postgresql.conf` + +If you don't want to provide the whole PostgreSQL configuration file and only specify certain parameters, you can add your extended `.conf` files to "files/conf.d/" in your working directory. +Those files will be mounted as configMap to the containers adding/overwriting the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +Alternatively, you can also set an external ConfigMap with all the extra configuration files. This is done by setting the `extendedConfConfigMap` parameter. Note that this will override the previous option. + +### Initialize a fresh instance + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image allows you to use your custom scripts to initialize a fresh instance. In order to execute the scripts, they must be located inside the chart folder `files/docker-entrypoint-initdb.d` so they can be consumed as a ConfigMap. + +Alternatively, you can specify custom scripts using the `initdbScripts` parameter as dict. + +In addition to these options, you can also set an external ConfigMap with all the initialization scripts. This is done by setting the `initdbScriptsConfigMap` parameter. Note that this will override the two previous options. If your initialization scripts contain sensitive information such as credentials or passwords, you can use the `initdbScriptsSecret` parameter. + +The allowed extensions are `.sh`, `.sql` and `.sql.gz`. + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of an existing secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. + +For example: + +* First, create the secret with the cetificates files: + + ```console + kubectl create secret generic certificates-tls-secret --from-file=./cert.crt --from-file=./cert.key --from-file=./ca.crt + ``` + +* Then, use the following parameters: + + ```console + volumePermissions.enabled=true + tls.enabled=true + tls.certificatesSecret="certificates-tls-secret" + tls.certFilename="cert.crt" + tls.certKeyFilename="cert.key" + ``` + + > Note TLS and VolumePermissions: PostgreSQL requires certain permissions on sensitive files (such as certificate keys) to start up. Due to an on-going [issue](https://github.com/kubernetes/kubernetes/issues/57923) regarding kubernetes permissions and the use of `containerSecurityContext.runAsUser`, you must enable `volumePermissions` to ensure everything works as expected. + +### Sidecars + +If you need additional containers to run within the same pod as PostgreSQL (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec. + +```yaml +# For the PostgreSQL master +master: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +# For the PostgreSQL replicas +slave: + sidecars: + - name: your-image-name + image: your-image + imagePullPolicy: Always + ports: + - name: portname + containerPort: 1234 +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9187) is not exposed and it is expected that the metrics are collected from inside the k8s cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). + +The exporter allows to create custom metrics from additional SQL queries. See the Chart's `values.yaml` for an example and consult the [exporters documentation](https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file) for more details. + +### Use of global variables + +In more complex scenarios, we may have the following tree of dependencies + +``` + +--------------+ + | | + +------------+ Chart 1 +-----------+ + | | | | + | --------+------+ | + | | | + | | | + | | | + | | | + v v v ++-------+------+ +--------+------+ +--------+------+ +| | | | | | +| PostgreSQL | | Sub-chart 1 | | Sub-chart 2 | +| | | | | | ++--------------+ +---------------+ +---------------+ +``` + +The three charts below depend on the parent chart Chart 1. However, subcharts 1 and 2 may need to connect to PostgreSQL as well. In order to do so, subcharts 1 and 2 need to know the PostgreSQL credentials, so one option for deploying could be deploy Chart 1 with the following parameters: + +``` +postgresql.postgresqlPassword=testtest +subchart1.postgresql.postgresqlPassword=testtest +subchart2.postgresql.postgresqlPassword=testtest +postgresql.postgresqlDatabase=db1 +subchart1.postgresql.postgresqlDatabase=db1 +subchart2.postgresql.postgresqlDatabase=db1 +``` + +If the number of dependent sub-charts increases, installing the chart with parameters can become increasingly difficult. An alternative would be to set the credentials using global variables as follows: + +``` +global.postgresql.postgresqlPassword=testtest +global.postgresql.postgresqlDatabase=db1 +``` + +This way, the credentials will be available in all of the subcharts. + +## Persistence + +The [Bitnami PostgreSQL](https://github.com/bitnami/bitnami-docker-postgresql) image stores the PostgreSQL data and configurations at the `/bitnami/postgresql` path of the container. + +Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. +See the [Parameters](#parameters) section to configure the PVC or to disable persistence. + +If you already have data in it, you will fail to sync to standby nodes for all commits, details can refer to [code](https://github.com/bitnami/bitnami-docker-postgresql/blob/8725fe1d7d30ebe8d9a16e9175d05f7ad9260c93/9.6/debian-9/rootfs/libpostgresql.sh#L518-L556). If you need to use those data, please covert them to sql and import after `helm install` finished. + +## NetworkPolicy + +To enable network policy for PostgreSQL, install [a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + +```console +$ kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" +``` + +With NetworkPolicy enabled, traffic will be limited to just port 5432. + +For more precise policy, set `networkPolicy.allowExternal=false`. This will only allow pods with the generated client label to connect to PostgreSQL. +This label will be displayed in the output of a successful install. + +## Differences between Bitnami PostgreSQL image and [Docker Official](https://hub.docker.com/_/postgres) image + +- The Docker Official PostgreSQL image does not support replication. If you pass any replication environment variable, this would be ignored. The only environment variables supported by the Docker Official image are POSTGRES_USER, POSTGRES_DB, POSTGRES_PASSWORD, POSTGRES_INITDB_ARGS, POSTGRES_INITDB_WALDIR and PGDATA. All the remaining environment variables are specific to the Bitnami PostgreSQL image. +- The Bitnami PostgreSQL image is non-root by default. This requires that you run the pod with `securityContext` and updates the permissions of the volume with an `initContainer`. A key benefit of this configuration is that the pod follows security best practices and is prepared to run on Kubernetes distributions with hard security constraints like OpenShift. +- For OpenShift, one may either define the runAsUser and fsGroup accordingly, or try this more dynamic option: volumePermissions.securityContext.runAsUser="auto",securityContext.enabled=false,containerSecurityContext.enabled=false,shmVolume.chmod.enabled=false + +### Deploy chart using Docker Official PostgreSQL Image + +From chart version 4.0.0, it is possible to use this chart with the Docker Official PostgreSQL image. +Besides specifying the new Docker repository and tag, it is important to modify the PostgreSQL data directory and volume mount point. Basically, the PostgreSQL data dir cannot be the mount point directly, it has to be a subdirectory. + +``` +image.repository=postgres +image.tag=10.6 +postgresqlDataDir=/data/pgdata +persistence.mountPath=/data/ +``` + +## Upgrade + +It's necessary to specify the existing passwords while performing an upgrade to ensure the secrets are not updated with invalid randomly generated passwords. Remember to specify the existing values of the `postgresqlPassword` and `replication.password` parameters when upgrading the chart: + +```bash +$ helm upgrade my-release stable/postgresql \ + --set postgresqlPassword=[POSTGRESQL_PASSWORD] \ + --set replication.password=[REPLICATION_PASSWORD] +``` + +> Note: you need to substitute the placeholders _[POSTGRESQL_PASSWORD]_, and _[REPLICATION_PASSWORD]_ with the values obtained from instructions in the installation notes. + +## 9.0.0 + +In this version the chart was adapted to follow the Helm label best practices, see [PR 3021](https://github.com/bitnami/charts/pull/3021). That means the backward compatibility is not guarantee when upgrading the chart to this major version. + +As a workaround, you can delete the existing statefulset (using the `--cascade=false` flag pods are not deleted) before upgrade the chart. For example, this can be a valid workflow: + +- Deploy an old version (8.X.X) +```console +$ helm install postgresql bitnami/postgresql --version 8.10.14 +``` + +- Old version is up and running +```console +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 1 2020-08-04 13:39:54.783480286 +0000 UTC deployed postgresql-8.10.14 11.8.0 + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 76s +``` + +- The upgrade to the latest one (9.X.X) is going to fail +```console +$ helm upgrade postgresql bitnami/postgresql +Error: UPGRADE FAILED: cannot patch "postgresql-postgresql" with kind StatefulSet: StatefulSet.apps "postgresql-postgresql" is invalid: spec: Forbidden: updates to statefulset spec for fields other than 'replicas', 'template', and 'updateStrategy' are forbidden +``` + +- Delete the statefulset +```console +$ kubectl delete statefulsets.apps --cascade=false postgresql-postgresql +statefulset.apps "postgresql-postgresql" deleted +``` + +- Now the upgrade works +```cosnole +$ helm upgrade postgresql bitnami/postgresql +$ helm ls +NAME NAMESPACE REVISION UPDATED STATUS CHART APP VERSION +postgresql default 3 2020-08-04 13:42:08.020385884 +0000 UTC deployed postgresql-9.1.2 11.8.0 +``` + +- We can kill the existing pod and the new statefulset is going to create a new one: +```console +$ kubectl delete pod postgresql-postgresql-0 +pod "postgresql-postgresql-0" deleted + +$ kubectl get pods +NAME READY STATUS RESTARTS AGE +postgresql-postgresql-0 1/1 Running 0 19s +``` + +Please, note that without the `--cascade=false` both objects (statefulset and pod) are going to be removed and both objects will be deployed again with the `helm upgrade` command + +## 8.0.0 + +Prefixes the port names with their protocols to comply with Istio conventions. + +If you depend on the port names in your setup, make sure to update them to reflect this change. + +## 7.1.0 + +Adds support for LDAP configuration. + +## 7.0.0 + +Helm performs a lookup for the object based on its group (apps), version (v1), and kind (Deployment). Also known as its GroupVersionKind, or GVK. Changing the GVK is considered a compatibility breaker from Kubernetes' point of view, so you cannot "upgrade" those objects to the new GVK in-place. Earlier versions of Helm 3 did not perform the lookup correctly which has since been fixed to match the spec. + +In https://github.com/helm/charts/pull/17281 the `apiVersion` of the statefulset resources was updated to `apps/v1` in tune with the api's deprecated, resulting in compatibility breakage. + +This major version bump signifies this change. + +## 6.5.7 + +In this version, the chart will use PostgreSQL with the Postgis extension included. The version used with Postgresql version 10, 11 and 12 is Postgis 2.5. It has been compiled with the following dependencies: + +- protobuf +- protobuf-c +- json-c +- geos +- proj + +## 5.0.0 + +In this version, the **chart is using PostgreSQL 11 instead of PostgreSQL 10**. You can find the main difference and notable changes in the following links: [https://www.postgresql.org/about/news/1894/](https://www.postgresql.org/about/news/1894/) and [https://www.postgresql.org/about/featurematrix/](https://www.postgresql.org/about/featurematrix/). + +For major releases of PostgreSQL, the internal data storage format is subject to change, thus complicating upgrades, you can see some errors like the following one in the logs: + +```console +Welcome to the Bitnami postgresql container +Subscribe to project updates by watching https://github.com/bitnami/bitnami-docker-postgresql +Submit issues and feature requests at https://github.com/bitnami/bitnami-docker-postgresql/issues +Send us your feedback at containers@bitnami.com + +INFO ==> ** Starting PostgreSQL setup ** +NFO ==> Validating settings in POSTGRESQL_* env vars.. +INFO ==> Initializing PostgreSQL database... +INFO ==> postgresql.conf file not detected. Generating it... +INFO ==> pg_hba.conf file not detected. Generating it... +INFO ==> Deploying PostgreSQL with persisted data... +INFO ==> Configuring replication parameters +INFO ==> Loading custom scripts... +INFO ==> Enabling remote connections +INFO ==> Stopping PostgreSQL... +INFO ==> ** PostgreSQL setup finished! ** + +INFO ==> ** Starting PostgreSQL ** + [1] FATAL: database files are incompatible with server + [1] DETAIL: The data directory was initialized by PostgreSQL version 10, which is not compatible with this version 11.3. +``` + +In this case, you should migrate the data from the old chart to the new one following an approach similar to that described in [this section](https://www.postgresql.org/docs/current/upgrading.html#UPGRADING-VIA-PGDUMPALL) from the official documentation. Basically, create a database dump in the old chart, move and restore it in the new one. + +### 4.0.0 + +This chart will use by default the Bitnami PostgreSQL container starting from version `10.7.0-r68`. This version moves the initialization logic from node.js to bash. This new version of the chart requires setting the `POSTGRES_PASSWORD` in the slaves as well, in order to properly configure the `pg_hba.conf` file. Users from previous versions of the chart are advised to upgrade immediately. + +IMPORTANT: If you do not want to upgrade the chart version then make sure you use the `10.7.0-r68` version of the container. Otherwise, you will get this error + +``` +The POSTGRESQL_PASSWORD environment variable is empty or not set. Set the environment variable ALLOW_EMPTY_PASSWORD=yes to allow the container to be started with blank passwords. This is recommended only for development +``` + +### 3.0.0 + +This releases make it possible to specify different nodeSelector, affinity and tolerations for master and slave pods. +It also fixes an issue with `postgresql.master.fullname` helper template not obeying fullnameOverride. + +#### Breaking changes + +- `affinty` has been renamed to `master.affinity` and `slave.affinity`. +- `tolerations` has been renamed to `master.tolerations` and `slave.tolerations`. +- `nodeSelector` has been renamed to `master.nodeSelector` and `slave.nodeSelector`. + +### 2.0.0 + +In order to upgrade from the `0.X.X` branch to `1.X.X`, you should follow the below steps: + + - Obtain the service name (`SERVICE_NAME`) and password (`OLD_PASSWORD`) of the existing postgresql chart. You can find the instructions to obtain the password in the NOTES.txt, the service name can be obtained by running + +```console +$ kubectl get svc +``` + +- Install (not upgrade) the new version + +```console +$ helm repo update +$ helm install my-release bitnami/postgresql +``` + +- Connect to the new pod (you can obtain the name by running `kubectl get pods`): + +```console +$ kubectl exec -it NAME bash +``` + +- Once logged in, create a dump file from the previous database using `pg_dump`, for that we should connect to the previous postgresql chart: + +```console +$ pg_dump -h SERVICE_NAME -U postgres DATABASE_NAME > /tmp/backup.sql +``` + +After run above command you should be prompted for a password, this password is the previous chart password (`OLD_PASSWORD`). +This operation could take some time depending on the database size. + +- Once you have the backup file, you can restore it with a command like the one below: + +```console +$ psql -U postgres DATABASE_NAME < /tmp/backup.sql +``` + +In this case, you are accessing to the local postgresql, so the password should be the new one (you can find it in NOTES.txt). + +If you want to restore the database and the database schema does not exist, it is necessary to first follow the steps described below. + +```console +$ psql -U postgres +postgres=# drop database DATABASE_NAME; +postgres=# create database DATABASE_NAME; +postgres=# create user USER_NAME; +postgres=# alter role USER_NAME with password 'BITNAMI_USER_PASSWORD'; +postgres=# grant all privileges on database DATABASE_NAME to USER_NAME; +postgres=# alter database DATABASE_NAME owner to USER_NAME; +``` diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/.helmignore b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/.helmignore new file mode 100755 index 000000000..50af03172 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml new file mode 100755 index 000000000..5566cdc21 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/Chart.yaml @@ -0,0 +1,22 @@ +annotations: + category: Infrastructure +apiVersion: v1 +appVersion: 0.8.1 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +version: 0.8.1 diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/README.md b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/README.md new file mode 100755 index 000000000..acdbe7bfa --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/README.md @@ -0,0 +1,286 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 2.12+ or Helm 3.0-beta3+ + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|------------------------------------------------|-----------------------------------------------------------------|----------------------------| +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Labels + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|-----------------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | + +### Storage + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frecuently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|------------------------------------------------------------------------------------------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "context" $` secret and field are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|--------------------------------|-----------------------------------------------------------------|------------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Notable changes + +N/A diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl new file mode 100755 index 000000000..40f575cb6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl new file mode 100755 index 000000000..143bef2a4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_capabilities.tpl @@ -0,0 +1,33 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl new file mode 100755 index 000000000..d6d3ec65a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_errors.tpl @@ -0,0 +1,20 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: you must provide your current passwords when upgrade the release%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl new file mode 100755 index 000000000..aafde9f3b --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_images.tpl @@ -0,0 +1,43 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl new file mode 100755 index 000000000..252066c7e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl new file mode 100755 index 000000000..adf2a74f4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl new file mode 100755 index 000000000..8eee91d21 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_secrets.tpl @@ -0,0 +1,49 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- $name = .name -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret - Optional. The path to the existing secrets in the values.yaml given by the user + to be used istead of the default one. +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl new file mode 100755 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl new file mode 100755 index 000000000..2db166851 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl new file mode 100755 index 000000000..74774a3ca --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_utils.tpl @@ -0,0 +1,45 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl new file mode 100755 index 000000000..05d1edbaf --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_validations.tpl @@ -0,0 +1,278 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s=$%s' to the command.%s" .valueKey .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} + +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliar function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl new file mode 100755 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/values.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/values.yaml new file mode 100755 index 000000000..9ecdc93f5 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml new file mode 100755 index 000000000..f6977823c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/ci/commonAnnotations.yaml @@ -0,0 +1,3 @@ +commonAnnotations: + helm.sh/hook: 'pre-install, pre-upgrade' + helm.sh/hook-weight: '-1' diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/ci/default-values.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/ci/default-values.yaml new file mode 100755 index 000000000..fc2ba605a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml new file mode 100755 index 000000000..347d3b40a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/ci/shmvolume-disabled-values.yaml @@ -0,0 +1,2 @@ +shmVolume: + enabled: false diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/files/README.md b/scripts/helm/helmcharts/databases/charts/postgresql/files/README.md new file mode 100755 index 000000000..1813a2fea --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/files/README.md @@ -0,0 +1 @@ +Copy here your postgresql.conf and/or pg_hba.conf files to use it as a config map. diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/files/conf.d/README.md b/scripts/helm/helmcharts/databases/charts/postgresql/files/conf.d/README.md new file mode 100755 index 000000000..184c1875d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/files/conf.d/README.md @@ -0,0 +1,4 @@ +If you don't want to provide the whole configuration file and only specify certain parameters, you can copy here your extended `.conf` files. +These files will be injected as a config maps and add/overwrite the default configuration using the `include_dir` directive that allows settings to be loaded from files other than the default `postgresql.conf`. + +More info in the [bitnami-docker-postgresql README](https://github.com/bitnami/bitnami-docker-postgresql#configuration-file). diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md b/scripts/helm/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md new file mode 100755 index 000000000..cba38091e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/files/docker-entrypoint-initdb.d/README.md @@ -0,0 +1,3 @@ +You can copy here your custom `.sh`, `.sql` or `.sql.gz` file so they are executed during the first boot of the image. + +More info in the [bitnami-docker-postgresql](https://github.com/bitnami/bitnami-docker-postgresql#initializing-a-new-instance) repository. \ No newline at end of file diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/postgresql.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/postgresql.yaml new file mode 100644 index 000000000..4e5e192d7 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/postgresql.yaml @@ -0,0 +1,185 @@ +apiVersion: v1 +kind: Namespace +metadata: + name: db +--- +# Source: postgresql/templates/secrets.yaml +apiVersion: v1 +kind: Secret +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm +type: Opaque +data: + postgresql-password: "YXNheWVyUG9zdGdyZXM=" +--- +# Source: postgresql/templates/svc-headless.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres-postgresql-headless + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres +--- +# Source: postgresql/templates/svc.yaml +apiVersion: v1 +kind: Service +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + annotations: +spec: + type: ClusterIP + ports: + - name: tcp-postgresql + port: 5432 + targetPort: tcp-postgresql + selector: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres + role: master +--- +# Source: postgresql/templates/statefulset.yaml +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + annotations: +spec: + serviceName: postgres-postgresql-headless + replicas: 1 + updateStrategy: + type: RollingUpdate + selector: + matchLabels: + app.kubernetes.io/name: postgresql + app.kubernetes.io/instance: postgres + role: master + template: + metadata: + name: postgres-postgresql + labels: + app.kubernetes.io/name: postgresql + helm.sh/chart: postgresql-9.8.2 + app.kubernetes.io/instance: postgres + app.kubernetes.io/managed-by: Helm + role: master + spec: + securityContext: + fsGroup: 1001 + containers: + - name: postgres-postgresql + image: docker.io/bitnami/postgresql:11.9.0-debian-10-r48 + imagePullPolicy: "IfNotPresent" + resources: + requests: + cpu: 250m + memory: 256Mi + securityContext: + runAsUser: 1001 + env: + - name: BITNAMI_DEBUG + value: "false" + - name: POSTGRESQL_PORT_NUMBER + value: "5432" + - name: POSTGRESQL_VOLUME_DIR + value: "/bitnami/postgresql" + - name: PGDATA + value: "/bitnami/postgresql/data" + - name: POSTGRES_USER + value: "postgres" + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: postgres-postgresql + key: postgresql-password + - name: POSTGRESQL_ENABLE_LDAP + value: "no" + - name: POSTGRESQL_ENABLE_TLS + value: "no" + - name: POSTGRESQL_LOG_HOSTNAME + value: "false" + - name: POSTGRESQL_LOG_CONNECTIONS + value: "false" + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: "false" + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: "off" + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: "error" + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: "pgaudit" + ports: + - name: tcp-postgresql + containerPort: 5432 + livenessProbe: + exec: + command: + - /bin/sh + - -c + - exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432 + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + - | + exec pg_isready -U "postgres" -h 127.0.0.1 -p 5432 + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 6 + volumeMounts: + - name: dshm + mountPath: /dev/shm + - name: data + mountPath: /bitnami/postgresql + subPath: + volumes: + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + volumeClaimTemplates: + - metadata: + name: data + spec: + accessModes: + - "ReadWriteOnce" + resources: + requests: + storage: "8Gi" diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/requirements.lock b/scripts/helm/helmcharts/databases/charts/postgresql/requirements.lock new file mode 100755 index 000000000..b0b7b0673 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/requirements.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 0.8.1 +digest: sha256:740783295d301fdd168fafdbaa760de27ab54b0ff36b513589a5a2515072b885 +generated: "2020-10-06T07:04:23.948475694Z" diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/requirements.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/requirements.yaml new file mode 100755 index 000000000..2c28bfe14 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/requirements.yaml @@ -0,0 +1,4 @@ +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/NOTES.txt b/scripts/helm/helmcharts/databases/charts/postgresql/templates/NOTES.txt new file mode 100755 index 000000000..596e969ce --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/NOTES.txt @@ -0,0 +1,59 @@ +** Please be patient while the chart is being deployed ** + +PostgreSQL can be accessed via port {{ template "postgresql.port" . }} on the following DNS name from within your cluster: + + {{ template "postgresql.fullname" . }}.{{ .Release.Namespace }}.svc.cluster.local - Read/Write connection +{{- if .Values.replication.enabled }} + {{ template "postgresql.fullname" . }}-read.{{ .Release.Namespace }}.svc.cluster.local - Read only connection +{{- end }} + +{{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + +To get the password for "postgres" run: + + export POSTGRES_ADMIN_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-postgres-password}" | base64 --decode) +{{- end }} + +To get the password for "{{ template "postgresql.username" . }}" run: + + export POSTGRES_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "postgresql.secretName" . }} -o jsonpath="{.data.postgresql-password}" | base64 --decode) + +To connect to your database run the following command: + + kubectl run {{ template "postgresql.fullname" . }}-client --rm --tty -i --restart='Never' --namespace {{ .Release.Namespace }} --image {{ template "postgresql.image" . }} --env="PGPASSWORD=$POSTGRES_PASSWORD" {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} + --labels="{{ template "postgresql.fullname" . }}-client=true" {{- end }} --command -- psql --host {{ template "postgresql.fullname" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label {{ template "postgresql.fullname" . }}-client=true" will be able to connect to this PostgreSQL cluster. +{{- end }} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "postgresql.fullname" . }}) + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $NODE_IP --port $NODE_PORT -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "LoadBalancer" .Values.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "postgresql.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "postgresql.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host $SERVICE_IP --port {{ template "postgresql.port" . }} -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} + +{{- else if contains "ClusterIP" .Values.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "postgresql.fullname" . }} {{ template "postgresql.port" . }}:{{ template "postgresql.port" . }} & + {{ if (include "postgresql.password" . ) }}PGPASSWORD="$POSTGRES_PASSWORD" {{ end }}psql --host 127.0.0.1 -U {{ .Values.postgresqlUsername }} -d {{- if .Values.postgresqlDatabase }} {{ .Values.postgresqlDatabase }}{{- else }} postgres{{- end }} -p {{ template "postgresql.port" . }} + +{{- end }} + +{{- include "postgresql.validateValues" . -}} + +{{- include "common.warnings.rollingTag" .Values.image -}} + +{{- $passwordValidationErrors := include "common.validations.values.postgresql.passwords" (dict "secret" (include "postgresql.fullname" .) "context" $) -}} + +{{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $passwordValidationErrors) "context" $) -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/charts/postgresql/templates/_helpers.tpl new file mode 100755 index 000000000..b6a683ae9 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/_helpers.tpl @@ -0,0 +1,488 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "postgresql.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +*/}} +{{- define "postgresql.master.fullname" -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- $fullname := default (printf "%s-%s" .Release.Name $name) .Values.fullnameOverride -}} +{{- if .Values.replication.enabled -}} +{{- printf "%s-%s" $fullname "master" | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s" $fullname | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "postgresql.networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"extensions/v1beta1" +{{- else if semverCompare "^1.7-0" .Capabilities.KubeVersion.GitVersion -}} +"networking.k8s.io/v1" +{{- end -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "postgresql.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL image name +*/}} +{{- define "postgresql.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL postgres user password +*/}} +{{- define "postgresql.postgres.password" -}} +{{- if .Values.global.postgresql.postgresqlPostgresPassword }} + {{- .Values.global.postgresql.postgresqlPostgresPassword -}} +{{- else if .Values.postgresqlPostgresPassword -}} + {{- .Values.postgresqlPostgresPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL password +*/}} +{{- define "postgresql.password" -}} +{{- if .Values.global.postgresql.postgresqlPassword }} + {{- .Values.global.postgresql.postgresqlPassword -}} +{{- else if .Values.postgresqlPassword -}} + {{- .Values.postgresqlPassword -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL replication password +*/}} +{{- define "postgresql.replication.password" -}} +{{- if .Values.global.postgresql.replicationPassword }} + {{- .Values.global.postgresql.replicationPassword -}} +{{- else if .Values.replication.password -}} + {{- .Values.replication.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL username +*/}} +{{- define "postgresql.username" -}} +{{- if .Values.global.postgresql.postgresqlUsername }} + {{- .Values.global.postgresql.postgresqlUsername -}} +{{- else -}} + {{- .Values.postgresqlUsername -}} +{{- end -}} +{{- end -}} + + +{{/* +Return PostgreSQL replication username +*/}} +{{- define "postgresql.replication.username" -}} +{{- if .Values.global.postgresql.replicationUser }} + {{- .Values.global.postgresql.replicationUser -}} +{{- else -}} + {{- .Values.replication.user -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL port +*/}} +{{- define "postgresql.port" -}} +{{- if .Values.global.postgresql.servicePort }} + {{- .Values.global.postgresql.servicePort -}} +{{- else -}} + {{- .Values.service.port -}} +{{- end -}} +{{- end -}} + +{{/* +Return PostgreSQL created database +*/}} +{{- define "postgresql.database" -}} +{{- if .Values.global.postgresql.postgresqlDatabase }} + {{- .Values.global.postgresql.postgresqlDatabase -}} +{{- else if .Values.postgresqlDatabase -}} + {{- .Values.postgresqlDatabase -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name to change the volume permissions +*/}} +{{- define "postgresql.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper PostgreSQL metrics image name +*/}} +{{- define "postgresql.metrics.image" -}} +{{- $registryName := default "docker.io" .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := default "latest" .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "postgresql.secretName" -}} +{{- if .Values.global.postgresql.existingSecret }} + {{- printf "%s" (tpl .Values.global.postgresql.existingSecret $) -}} +{{- else if .Values.existingSecret -}} + {{- printf "%s" (tpl .Values.existingSecret $) -}} +{{- else -}} + {{- printf "%s" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if we should use an existingSecret. +*/}} +{{- define "postgresql.useExistingSecret" -}} +{{- if or .Values.global.postgresql.existingSecret .Values.existingSecret -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a secret object should be created +*/}} +{{- define "postgresql.createSecret" -}} +{{- if not (include "postgresql.useExistingSecret" .) -}} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the configuration ConfigMap name. +*/}} +{{- define "postgresql.configurationCM" -}} +{{- if .Values.configurationConfigMap -}} +{{- printf "%s" (tpl .Values.configurationConfigMap $) -}} +{{- else -}} +{{- printf "%s-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the extended configuration ConfigMap name. +*/}} +{{- define "postgresql.extendedConfigurationCM" -}} +{{- if .Values.extendedConfConfigMap -}} +{{- printf "%s" (tpl .Values.extendedConfConfigMap $) -}} +{{- else -}} +{{- printf "%s-extended-configuration" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Return true if a configmap should be mounted with PostgreSQL configuration +*/}} +{{- define "postgresql.mountConfigurationCM" -}} +{{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + {{- true -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts ConfigMap name. +*/}} +{{- define "postgresql.initdbScriptsCM" -}} +{{- if .Values.initdbScriptsConfigMap -}} +{{- printf "%s" (tpl .Values.initdbScriptsConfigMap $) -}} +{{- else -}} +{{- printf "%s-init-scripts" (include "postgresql.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the initialization scripts Secret name. +*/}} +{{- define "postgresql.initdbScriptsSecret" -}} +{{- printf "%s" (tpl .Values.initdbScriptsSecret $) -}} +{{- end -}} + +{{/* +Get the metrics ConfigMap name. +*/}} +{{- define "postgresql.metricsCM" -}} +{{- printf "%s-metrics" (include "postgresql.fullname" .) -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "postgresql.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* +Get the readiness probe command +*/}} +{{- define "postgresql.readinessProbeCommand" -}} +- | +{{- if (include "postgresql.database" .) }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- else }} + exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} +{{- end }} +{{- if contains "bitnami/" .Values.image.repository }} + [ -f /opt/bitnami/postgresql/tmp/.initialized ] || [ -f /bitnami/postgresql/.initialized ] +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class +*/}} +{{- define "postgresql.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.persistence.storageClass -}} + {{- if (eq "-" .Values.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "postgresql.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "apps/v1beta2" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "postgresql.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "postgresql.validateValues.ldapConfigurationMethod" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.psp" .) -}} +{{- $messages := append $messages (include "postgresql.validateValues.tls" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If ldap.url is used then you don't need the other settings for ldap +*/}} +{{- define "postgresql.validateValues.ldapConfigurationMethod" -}} +{{- if and .Values.ldap.enabled (and (not (empty .Values.ldap.url)) (not (empty .Values.ldap.server))) }} +postgresql: ldap.url, ldap.server + You cannot set both `ldap.url` and `ldap.server` at the same time. + Please provide a unique way to configure LDAP. + More info at https://www.postgresql.org/docs/current/auth-ldap.html +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql - If PSP is enabled RBAC should be enabled too +*/}} +{{- define "postgresql.validateValues.psp" -}} +{{- if and .Values.psp.create (not .Values.rbac.create) }} +postgresql: psp.create, rbac.create + RBAC should be enabled if PSP is enabled in order for PSP to work. + More info at https://kubernetes.io/docs/concepts/policy/pod-security-policy/#authorizing-policies +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for podsecuritypolicy. +*/}} +{{- define "podsecuritypolicy.apiVersion" -}} +{{- if semverCompare "<1.10-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "policy/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Validate values of Postgresql TLS - When TLS is enabled, so must be VolumePermissions +*/}} +{{- define "postgresql.validateValues.tls" -}} +{{- if and .Values.tls.enabled (not .Values.volumePermissions.enabled) }} +postgresql: tls.enabled, volumePermissions.enabled + When TLS is enabled you must enable volumePermissions as well to ensure certificates files have + the right permissions. +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "postgresql.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "postgresql.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/postgresql/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "postgresql.tlsCACert" -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.certCAFilename -}} +{{- end -}} + +{{/* +Return the path to the CRL file. +*/}} +{{- define "postgresql.tlsCRL" -}} +{{- if .Values.tls.crlFilename -}} +{{- printf "/opt/bitnami/postgresql/certs/%s" .Values.tls.crlFilename -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/configmap.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/configmap.yaml new file mode 100755 index 000000000..bc78771d4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/configmap.yaml @@ -0,0 +1,26 @@ +{{ if and (or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration) (not .Values.configurationConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- if (.Files.Glob "files/postgresql.conf") }} +{{ (.Files.Glob "files/postgresql.conf").AsConfig | indent 2 }} +{{- else if .Values.postgresqlConfiguration }} + postgresql.conf: | +{{- range $key, $value := default dict .Values.postgresqlConfiguration }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- if (.Files.Glob "files/pg_hba.conf") }} +{{ (.Files.Glob "files/pg_hba.conf").AsConfig | indent 2 }} +{{- else if .Values.pgHbaConfiguration }} + pg_hba.conf: | +{{ .Values.pgHbaConfiguration | indent 4 }} +{{- end }} +{{ end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml new file mode 100755 index 000000000..c6793802f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/extended-config-configmap.yaml @@ -0,0 +1,21 @@ +{{- if and (or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf) (not .Values.extendedConfConfigMap)}} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-extended-configuration + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: +{{- with .Files.Glob "files/conf.d/*.conf" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{ with .Values.postgresqlExtendedConf }} + override.conf: | +{{- range $key, $value := . }} + {{ $key | snakecase }}={{ $value }} +{{- end }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/extra-list.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/extra-list.yaml new file mode 100755 index 000000000..b28a03c1c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/extra-list.yaml @@ -0,0 +1,5 @@ +{{- if .Values.extraDeploy }} +apiVersion: v1 +kind: List +items: {{- include "postgresql.tplValue" (dict "value" .Values.extraDeploy "context" $) | nindent 2 }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml new file mode 100755 index 000000000..2652ce732 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/initialization-configmap.yaml @@ -0,0 +1,24 @@ +{{- if and (or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScripts) (not .Values.initdbScriptsConfigMap) }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.fullname" . }}-init-scripts + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.sql.gz" }} +binaryData: +{{- range $path, $bytes := . }} + {{ base $path }}: {{ $.Files.Get $path | b64enc | quote }} +{{- end }} +{{- end }} +data: +{{- with .Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql}" }} +{{ .AsConfig | indent 2 }} +{{- end }} +{{- with .Values.initdbScripts }} +{{ toYaml . | indent 2 }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml new file mode 100755 index 000000000..6216eca84 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-configmap.yaml @@ -0,0 +1,13 @@ +{{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "postgresql.metricsCM" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +data: + custom-metrics.yaml: {{ toYaml .Values.metrics.customMetrics | quote }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml new file mode 100755 index 000000000..9181ac89a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/metrics-svc.yaml @@ -0,0 +1,25 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-metrics + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- toYaml .Values.metrics.service.annotations | nindent 4 }} +spec: + type: {{ .Values.metrics.service.type }} + {{- if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: http-metrics + port: 9187 + targetPort: http-metrics + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml new file mode 100755 index 000000000..f2752af77 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/networkpolicy.yaml @@ -0,0 +1,36 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "postgresql.networkPolicy.apiVersion" . }} +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + ingress: + # Allow inbound connections + - ports: + - port: {{ template "postgresql.port" . }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "postgresql.fullname" . }}-client: "true" + {{- if .Values.networkPolicy.explicitNamespacesSelector }} + namespaceSelector: +{{ toYaml .Values.networkPolicy.explicitNamespacesSelector | indent 12 }} + {{- end }} + - podSelector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 14 }} + role: slave + {{- end }} + # Allow prometheus scrapes + - ports: + - port: 9187 +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml new file mode 100755 index 000000000..fb4c52f20 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/podsecuritypolicy.yaml @@ -0,0 +1,37 @@ +{{- if .Values.psp.create }} +apiVersion: {{ include "podsecuritypolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + privileged: false + volumes: + - 'configMap' + - 'secret' + - 'persistentVolumeClaim' + - 'emptyDir' + - 'projected' + hostNetwork: false + hostIPC: false + hostPID: false + runAsUser: + rule: 'MustRunAsNonRoot' + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + fsGroup: + rule: 'MustRunAs' + ranges: + - min: 1 + max: 65535 + readOnlyRootFilesystem: false +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml new file mode 100755 index 000000000..0afd8f41f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/prometheusrule.yaml @@ -0,0 +1,23 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "postgresql.fullname" . }} +{{- with .Values.metrics.prometheusRule.namespace }} + namespace: {{ . }} +{{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.metrics.prometheusRule.additionalLabels }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "postgresql.name" $ }} + rules: {{ tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/role.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/role.yaml new file mode 100755 index 000000000..24148aa6b --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/role.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: Role +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +rules: + {{- if .Values.psp.create }} + - apiGroups: ["extensions"] + resources: ["podsecuritypolicies"] + verbs: ["use"] + resourceNames: + - {{ template "postgresql.fullname" . }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml new file mode 100755 index 000000000..a105fb41b --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create }} +kind: RoleBinding +apiVersion: rbac.authorization.k8s.io/v1 +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +roleRef: + kind: Role + name: {{ template "postgresql.fullname" . }} + apiGroup: rbac.authorization.k8s.io +subjects: + - kind: ServiceAccount + name: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + namespace: {{ .Release.Namespace }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/secrets.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/secrets.yaml new file mode 100755 index 000000000..8d968864c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/secrets.yaml @@ -0,0 +1,23 @@ +{{- if (include "postgresql.createSecret" .) }} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +type: Opaque +data: + {{- if and .Values.postgresqlPostgresPassword (not (eq .Values.postgresqlUsername "postgres")) }} + postgresql-postgres-password: {{ include "postgresql.postgres.password" . | b64enc | quote }} + {{- end }} + postgresql-password: {{ include "postgresql.password" . | b64enc | quote }} + {{- if .Values.replication.enabled }} + postgresql-replication-password: {{ include "postgresql.replication.password" . | b64enc | quote }} + {{- end }} + {{- if (and .Values.ldap.enabled .Values.ldap.bind_password)}} + postgresql-ldap-password: {{ .Values.ldap.bind_password | b64enc | quote }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml new file mode 100755 index 000000000..1e2a1f2a7 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/serviceaccount.yaml @@ -0,0 +1,11 @@ +{{- if and (.Values.serviceAccount.enabled) (not .Values.serviceAccount.name) }} +apiVersion: v1 +kind: ServiceAccount +metadata: + labels: + {{- include "common.labels.standard" . | nindent 4 }} + name: {{ template "postgresql.fullname" . }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml new file mode 100755 index 000000000..e118002a3 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/servicemonitor.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "postgresql.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- end }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.metrics.serviceMonitor.additionalLabels }} + {{- toYaml .Values.metrics.serviceMonitor.additionalLabels | nindent 4 }} + {{- end }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + +spec: + endpoints: + - port: http-metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.scrapeTimeout }} + scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }} + {{- end }} + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml new file mode 100755 index 000000000..d77142fa3 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset-slaves.yaml @@ -0,0 +1,403 @@ +{{- if .Values.replication.enabled }} +{{- $slaveResources := coalesce .Values.slave.resources .Values.resources -}} +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: "{{ template "postgresql.fullname" . }}-slave" + labels: + {{- include "common.labels.standard" . | nindent 4 }} +{{- with .Values.slave.labels }} +{{ toYaml . | indent 4 }} +{{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.slave.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: {{ .Values.replication.slaveReplicas }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: slave + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: slave +{{- with .Values.slave.podLabels }} +{{ toYaml . | indent 8 }} +{{- end }} +{{- with .Values.slave.podAnnotations }} + annotations: +{{ toYaml . | indent 8 }} +{{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: +{{ toYaml .Values.slave.nodeSelector | indent 8 }} + {{- end }} + {{- if .Values.slave.affinity }} + affinity: +{{ toYaml .Values.slave.affinity | indent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: +{{ toYaml .Values.slave.tolerations | indent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name}} + {{- end }} + {{- if or .Values.slave.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{ if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.slave.extraInitContainers }} +{{ tpl .Values.slave.extraInitContainers . | indent 8 }} + {{- end }} + {{- end }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if $slaveResources }} + resources: {{- toYaml $slaveResources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + - name: POSTGRES_REPLICATION_MODE + value: "slave" + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + - name: POSTGRES_MASTER_HOST + value: {{ template "postgresql.fullname" . }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ include "postgresql.port" . | quote }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{ end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- toYaml .Values.slave.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.slave.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.slave.sidecars "context" $ ) | nindent 8 }} +{{- end }} + volumes: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} + {{- if or (not .Values.persistence.enabled) (not .Values.slave.persistence.enabled) }} + - name: data + emptyDir: {} + {{- end }} + {{- if .Values.slave.extraVolumes }} + {{- toYaml .Values.slave.extraVolumes | nindent 8 }} + {{- end }} + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} +{{- if and .Values.persistence.enabled .Values.slave.persistence.enabled }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset.yaml new file mode 100755 index 000000000..10c1af166 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/statefulset.yaml @@ -0,0 +1,580 @@ +apiVersion: {{ template "postgresql.statefulset.apiVersion" . }} +kind: StatefulSet +metadata: + name: {{ template "postgresql.master.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- with .Values.master.labels }} + {{- toYaml . | nindent 4 }} + {{- end }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- with .Values.master.annotations }} + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + serviceName: {{ template "postgresql.fullname" . }}-headless + replicas: 1 + updateStrategy: + type: {{ .Values.updateStrategy.type }} + {{- if (eq "Recreate" .Values.updateStrategy.type) }} + rollingUpdate: null + {{- end }} + selector: + matchLabels: + {{- include "common.labels.matchLabels" . | nindent 6 }} + role: master + template: + metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 8 }} + role: master + {{- with .Values.master.podLabels }} + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.master.podAnnotations }} + annotations: {{- toYaml . | nindent 8 }} + {{- end }} + spec: + {{- if .Values.schedulerName }} + schedulerName: "{{ .Values.schedulerName }}" + {{- end }} +{{- include "postgresql.imagePullSecrets" . | indent 6 }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.affinity }} + affinity: {{- toYaml .Values.master.affinity | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.terminationGracePeriodSeconds }} + terminationGracePeriodSeconds: {{ .Values.terminationGracePeriodSeconds }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + {{- if .Values.serviceAccount.enabled }} + serviceAccountName: {{ default (include "postgresql.fullname" . ) .Values.serviceAccount.name }} + {{- end }} + {{- if or .Values.master.extraInitContainers (and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled))) }} + initContainers: + {{- if and .Values.volumePermissions.enabled (or .Values.persistence.enabled (and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled) .Values.tls.enabled) }} + - name: init-chmod-data + image: {{ template "postgresql.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + command: + - /bin/sh + - -cx + - | + {{- if .Values.persistence.enabled }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown `id -u`:`id -G | cut -d " " -f2` {{ .Values.persistence.mountPath }} + {{- else }} + chown {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.persistence.mountPath }} + {{- end }} + mkdir -p {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + chmod 700 {{ .Values.persistence.mountPath }}/data {{- if (include "postgresql.mountConfigurationCM" .) }} {{ .Values.persistence.mountPath }}/conf {{- end }} + find {{ .Values.persistence.mountPath }} -mindepth 1 -maxdepth 1 {{- if not (include "postgresql.mountConfigurationCM" .) }} -not -name "conf" {{- end }} -not -name ".snapshot" -not -name "lost+found" | \ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + xargs chown -R `id -u`:`id -G | cut -d " " -f2` + {{- else }} + xargs chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} + {{- end }} + {{- end }} + {{- if and .Values.shmVolume.enabled .Values.shmVolume.chmod.enabled }} + chmod -R 777 /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + cp /tmp/certs/* /opt/bitnami/postgresql/certs/ + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` /opt/bitnami/postgresql/certs/ + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} /opt/bitnami/postgresql/certs/ + {{- end }} + chmod 600 {{ template "postgresql.tlsCertKey" . }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + volumeMounts: + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + mountPath: /tmp/certs + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + {{- end }} + {{- end }} + {{- if .Values.master.extraInitContainers }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraInitContainers "context" $ ) | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName }} + {{- end }} + containers: + - name: {{ template "postgresql.fullname" . }} + image: {{ template "postgresql.image" . }} + imagePullPolicy: "{{ .Values.image.pullPolicy }}" + {{- if .Values.resources }} + resources: {{- toYaml .Values.resources | nindent 12 }} + {{- end }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + - name: BITNAMI_DEBUG + value: {{ ternary "true" "false" .Values.image.debug | quote }} + - name: POSTGRESQL_PORT_NUMBER + value: "{{ template "postgresql.port" . }}" + - name: POSTGRESQL_VOLUME_DIR + value: "{{ .Values.persistence.mountPath }}" + {{- if .Values.postgresqlInitdbArgs }} + - name: POSTGRES_INITDB_ARGS + value: {{ .Values.postgresqlInitdbArgs | quote }} + {{- end }} + {{- if .Values.postgresqlInitdbWalDir }} + - name: POSTGRES_INITDB_WALDIR + value: {{ .Values.postgresqlInitdbWalDir | quote }} + {{- end }} + {{- if .Values.initdbUser }} + - name: POSTGRESQL_INITSCRIPTS_USERNAME + value: {{ .Values.initdbUser }} + {{- end }} + {{- if .Values.initdbPassword }} + - name: POSTGRESQL_INITSCRIPTS_PASSWORD + value: {{ .Values.initdbPassword }} + {{- end }} + {{- if .Values.persistence.mountPath }} + - name: PGDATA + value: {{ .Values.postgresqlDataDir | quote }} + {{- end }} + {{- if .Values.masterAsStandBy.enabled }} + - name: POSTGRES_MASTER_HOST + value: {{ .Values.masterAsStandBy.masterHost }} + - name: POSTGRES_MASTER_PORT_NUMBER + value: {{ .Values.masterAsStandBy.masterPort | quote }} + {{- end }} + {{- if or .Values.replication.enabled .Values.masterAsStandBy.enabled }} + - name: POSTGRES_REPLICATION_MODE + {{- if .Values.masterAsStandBy.enabled }} + value: "slave" + {{- else }} + value: "master" + {{- end }} + - name: POSTGRES_REPLICATION_USER + value: {{ include "postgresql.replication.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_REPLICATION_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-replication-password" + {{- else }} + - name: POSTGRES_REPLICATION_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-replication-password + {{- end }} + {{- if not (eq .Values.replication.synchronousCommit "off")}} + - name: POSTGRES_SYNCHRONOUS_COMMIT_MODE + value: {{ .Values.replication.synchronousCommit | quote }} + - name: POSTGRES_NUM_SYNCHRONOUS_REPLICAS + value: {{ .Values.replication.numSynchronousReplicas | quote }} + {{- end }} + - name: POSTGRES_CLUSTER_APP_NAME + value: {{ .Values.replication.applicationName }} + {{- end }} + {{- if and (not (eq .Values.postgresqlUsername "postgres")) (or .Values.postgresqlPostgresPassword (include "postgresql.useExistingSecret" .)) }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-postgres-password" + {{- else }} + - name: POSTGRES_POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-postgres-password + {{- end }} + {{- end }} + - name: POSTGRES_USER + value: {{ include "postgresql.username" . | quote }} + {{- if .Values.usePasswordFile }} + - name: POSTGRES_PASSWORD_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: POSTGRES_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + {{- if (include "postgresql.database" .) }} + - name: POSTGRES_DB + value: {{ (include "postgresql.database" .) | quote }} + {{- end }} + {{- if .Values.extraEnv }} + {{- include "common.tplvalues.render" (dict "value" .Values.extraEnv "context" $) | nindent 12 }} + {{- end }} + - name: POSTGRESQL_ENABLE_LDAP + value: {{ ternary "yes" "no" .Values.ldap.enabled | quote }} + {{- if .Values.ldap.enabled }} + - name: POSTGRESQL_LDAP_SERVER + value: {{ .Values.ldap.server }} + - name: POSTGRESQL_LDAP_PORT + value: {{ .Values.ldap.port | quote }} + - name: POSTGRESQL_LDAP_SCHEME + value: {{ .Values.ldap.scheme }} + {{- if .Values.ldap.tls }} + - name: POSTGRESQL_LDAP_TLS + value: "1" + {{- end }} + - name: POSTGRESQL_LDAP_PREFIX + value: {{ .Values.ldap.prefix | quote }} + - name: POSTGRESQL_LDAP_SUFFIX + value: {{ .Values.ldap.suffix | quote }} + - name: POSTGRESQL_LDAP_BASE_DN + value: {{ .Values.ldap.baseDN }} + - name: POSTGRESQL_LDAP_BIND_DN + value: {{ .Values.ldap.bindDN }} + {{- if (not (empty .Values.ldap.bind_password)) }} + - name: POSTGRESQL_LDAP_BIND_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-ldap-password + {{- end}} + - name: POSTGRESQL_LDAP_SEARCH_ATTR + value: {{ .Values.ldap.search_attr }} + - name: POSTGRESQL_LDAP_SEARCH_FILTER + value: {{ .Values.ldap.search_filter }} + - name: POSTGRESQL_LDAP_URL + value: {{ .Values.ldap.url }} + {{- end}} + - name: POSTGRESQL_ENABLE_TLS + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: POSTGRESQL_TLS_PREFER_SERVER_CIPHERS + value: {{ ternary "yes" "no" .Values.tls.preferServerCiphers | quote }} + - name: POSTGRESQL_TLS_CERT_FILE + value: {{ template "postgresql.tlsCert" . }} + - name: POSTGRESQL_TLS_KEY_FILE + value: {{ template "postgresql.tlsCertKey" . }} + {{- if .Values.tls.certCAFilename }} + - name: POSTGRESQL_TLS_CA_FILE + value: {{ template "postgresql.tlsCACert" . }} + {{- end }} + {{- if .Values.tls.crlFilename }} + - name: POSTGRESQL_TLS_CRL_FILE + value: {{ template "postgresql.tlsCRL" . }} + {{- end }} + {{- end }} + - name: POSTGRESQL_LOG_HOSTNAME + value: {{ .Values.audit.logHostname | quote }} + - name: POSTGRESQL_LOG_CONNECTIONS + value: {{ .Values.audit.logConnections | quote }} + - name: POSTGRESQL_LOG_DISCONNECTIONS + value: {{ .Values.audit.logDisconnections | quote }} + {{- if .Values.audit.logLinePrefix }} + - name: POSTGRESQL_LOG_LINE_PREFIX + value: {{ .Values.audit.logLinePrefix | quote }} + {{- end }} + {{- if .Values.audit.logTimezone }} + - name: POSTGRESQL_LOG_TIMEZONE + value: {{ .Values.audit.logTimezone | quote }} + {{- end }} + {{- if .Values.audit.pgAuditLog }} + - name: POSTGRESQL_PGAUDIT_LOG + value: {{ .Values.audit.pgAuditLog | quote }} + {{- end }} + - name: POSTGRESQL_PGAUDIT_LOG_CATALOG + value: {{ .Values.audit.pgAuditLogCatalog | quote }} + - name: POSTGRESQL_CLIENT_MIN_MESSAGES + value: {{ .Values.audit.clientMinMessages | quote }} + - name: POSTGRESQL_SHARED_PRELOAD_LIBRARIES + value: {{ .Values.postgresqlSharedPreloadLibraries | quote }} + {{- if .Values.postgresqlMaxConnections }} + - name: POSTGRESQL_MAX_CONNECTIONS + value: {{ .Values.postgresqlMaxConnections | quote }} + {{- end }} + {{- if .Values.postgresqlPostgresConnectionLimit }} + - name: POSTGRESQL_POSTGRES_CONNECTION_LIMIT + value: {{ .Values.postgresqlPostgresConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlDbUserConnectionLimit }} + - name: POSTGRESQL_USERNAME_CONNECTION_LIMIT + value: {{ .Values.postgresqlDbUserConnectionLimit | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesInterval }} + - name: POSTGRESQL_TCP_KEEPALIVES_INTERVAL + value: {{ .Values.postgresqlTcpKeepalivesInterval | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeepalivesIdle }} + - name: POSTGRESQL_TCP_KEEPALIVES_IDLE + value: {{ .Values.postgresqlTcpKeepalivesIdle | quote }} + {{- end }} + {{- if .Values.postgresqlStatementTimeout }} + - name: POSTGRESQL_STATEMENT_TIMEOUT + value: {{ .Values.postgresqlStatementTimeout | quote }} + {{- end }} + {{- if .Values.postgresqlTcpKeealivesCount }} + - name: POSTGRESQL_TCP_KEEPALIVES_COUNT + value: {{ .Values.postgresqlTcpKeealivesCount | quote }} + {{- end }} + {{- if .Values.postgresqlPghbaRemoveFilters }} + - name: POSTGRESQL_PGHBA_REMOVE_FILTERS + value: {{ .Values.postgresqlPghbaRemoveFilters | quote }} + {{- end }} + {{- if .Values.extraEnvVarsCM }} + envFrom: + - configMapRef: + name: {{ tpl .Values.extraEnvVarsCM . }} + {{- end }} + ports: + - name: tcp-postgresql + containerPort: {{ template "postgresql.port" . }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + exec: + command: + - /bin/sh + - -c + {{- if (include "postgresql.database" .) }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} -d "dbname={{ include "postgresql.database" . }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}{{- end }}" -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- else }} + - exec pg_isready -U {{ include "postgresql.username" . | quote }} {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} -d "sslcert={{ include "postgresql.tlsCert" . }} sslkey={{ include "postgresql.tlsCertKey" . }}"{{- end }} -h 127.0.0.1 -p {{ template "postgresql.port" . }} + {{- end }} + initialDelaySeconds: {{ .Values.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.livenessProbe.failureThreshold }} + {{- else if .Values.customLivenessProbe }} + livenessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customLivenessProbe "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + exec: + command: + - /bin/sh + - -c + - -e + {{- include "postgresql.readinessProbeCommand" . | nindent 16 }} + initialDelaySeconds: {{ .Values.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.readinessProbe.failureThreshold }} + {{- else if .Values.customReadinessProbe }} + readinessProbe: {{- include "postgresql.tplValue" (dict "value" .Values.customReadinessProbe "context" $) | nindent 12 }} + {{- end }} + volumeMounts: + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + mountPath: /docker-entrypoint-initdb.d/ + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + mountPath: /docker-entrypoint-initdb.d/secret + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + mountPath: /bitnami/postgresql/conf/conf.d/ + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + mountPath: /dev/shm + {{- end }} + {{- if .Values.persistence.enabled }} + - name: data + mountPath: {{ .Values.persistence.mountPath }} + subPath: {{ .Values.persistence.subPath }} + {{- end }} + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap }} + - name: postgresql-config + mountPath: /bitnami/postgresql/conf + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- toYaml .Values.master.extraVolumeMounts | nindent 12 }} + {{- end }} +{{- if .Values.master.sidecars }} +{{- include "common.tplvalues.render" ( dict "value" .Values.master.sidecars "context" $ ) | nindent 8 }} +{{- end }} +{{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "postgresql.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.metrics.securityContext.enabled }} + securityContext: {{- omit .Values.metrics.securityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + env: + {{- $database := required "In order to enable metrics you need to specify a database (.Values.postgresqlDatabase or .Values.global.postgresql.postgresqlDatabase)" (include "postgresql.database" .) }} + {{- $sslmode := ternary "require" "disable" .Values.tls.enabled }} + {{- if and .Values.tls.enabled .Values.tls.certCAFilename }} + - name: DATA_SOURCE_NAME + value: {{ printf "host=127.0.0.1 port=%d user=%s sslmode=%s sslcert=%s sslkey=%s" (int (include "postgresql.port" .)) (include "postgresql.username" .) $sslmode (include "postgresql.tlsCert" .) (include "postgresql.tlsCertKey" .) }} + {{- else }} + - name: DATA_SOURCE_URI + value: {{ printf "127.0.0.1:%d/%s?sslmode=%s" (int (include "postgresql.port" .)) $database $sslmode }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: DATA_SOURCE_PASS_FILE + value: "/opt/bitnami/postgresql/secrets/postgresql-password" + {{- else }} + - name: DATA_SOURCE_PASS + valueFrom: + secretKeyRef: + name: {{ template "postgresql.secretName" . }} + key: postgresql-password + {{- end }} + - name: DATA_SOURCE_USER + value: {{ template "postgresql.username" . }} + {{- if .Values.metrics.extraEnvVars }} + {{- include "common.tplvalues.render" (dict "value" .Values.metrics.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if .Values.livenessProbe.enabled }} + livenessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.livenessProbe.failureThreshold }} + {{- end }} + {{- if .Values.readinessProbe.enabled }} + readinessProbe: + httpGet: + path: / + port: http-metrics + initialDelaySeconds: {{ .Values.metrics.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.metrics.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.metrics.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.metrics.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.metrics.readinessProbe.failureThreshold }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: postgresql-password + mountPath: /opt/bitnami/postgresql/secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: postgresql-certificates + mountPath: /opt/bitnami/postgresql/certs + readOnly: true + {{- end }} + {{- if .Values.metrics.customMetrics }} + - name: custom-metrics + mountPath: /conf + readOnly: true + args: ["--extend.query-path", "/conf/custom-metrics.yaml"] + {{- end }} + ports: + - name: http-metrics + containerPort: 9187 + {{- if .Values.metrics.resources }} + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} +{{- end }} + volumes: + {{- if or (.Files.Glob "files/postgresql.conf") (.Files.Glob "files/pg_hba.conf") .Values.postgresqlConfiguration .Values.pgHbaConfiguration .Values.configurationConfigMap}} + - name: postgresql-config + configMap: + name: {{ template "postgresql.configurationCM" . }} + {{- end }} + {{- if or (.Files.Glob "files/conf.d/*.conf") .Values.postgresqlExtendedConf .Values.extendedConfConfigMap }} + - name: postgresql-extended-config + configMap: + name: {{ template "postgresql.extendedConfigurationCM" . }} + {{- end }} + {{- if .Values.usePasswordFile }} + - name: postgresql-password + secret: + secretName: {{ template "postgresql.secretName" . }} + {{- end }} + {{- if or (.Files.Glob "files/docker-entrypoint-initdb.d/*.{sh,sql,sql.gz}") .Values.initdbScriptsConfigMap .Values.initdbScripts }} + - name: custom-init-scripts + configMap: + name: {{ template "postgresql.initdbScriptsCM" . }} + {{- end }} + {{- if .Values.initdbScriptsSecret }} + - name: custom-init-scripts-secret + secret: + secretName: {{ template "postgresql.initdbScriptsSecret" . }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: raw-certificates + secret: + secretName: {{ required "A secret containing TLS certificates is required when TLS is enabled" .Values.tls.certificatesSecret }} + - name: postgresql-certificates + emptyDir: {} + {{- end }} + {{- if .Values.master.extraVolumes }} + {{- toYaml .Values.master.extraVolumes | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.customMetrics }} + - name: custom-metrics + configMap: + name: {{ template "postgresql.metricsCM" . }} + {{- end }} + {{- if .Values.shmVolume.enabled }} + - name: dshm + emptyDir: + medium: Memory + sizeLimit: 1Gi + {{- end }} +{{- if and .Values.persistence.enabled .Values.persistence.existingClaim }} + - name: data + persistentVolumeClaim: +{{- with .Values.persistence.existingClaim }} + claimName: {{ tpl . $ }} +{{- end }} +{{- else if not .Values.persistence.enabled }} + - name: data + emptyDir: {} +{{- else if and .Values.persistence.enabled (not .Values.persistence.existingClaim) }} + volumeClaimTemplates: + - metadata: + name: data + {{- with .Values.persistence.annotations }} + annotations: + {{- range $key, $value := . }} + {{ $key }}: {{ $value }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.persistence.size | quote }} + {{ include "postgresql.storageClass" . }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml new file mode 100755 index 000000000..fb8c838d2 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-headless.yaml @@ -0,0 +1,18 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-headless + labels: + {{- include "common.labels.standard" . | nindent 4 }} + {{- if .Values.commonAnnotations }} + annotations: {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} +spec: + type: ClusterIP + clusterIP: None + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-read.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-read.yaml new file mode 100755 index 000000000..5ee051c45 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc-read.yaml @@ -0,0 +1,42 @@ +{{- if .Values.replication.enabled }} +{{- $serviceAnnotations := coalesce .Values.slave.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.slave.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.slave.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.slave.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.slave.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.slave.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }}-read + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: slave +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc.yaml new file mode 100755 index 000000000..3dbfaa12d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/templates/svc.yaml @@ -0,0 +1,40 @@ +{{- $serviceAnnotations := coalesce .Values.master.service.annotations .Values.service.annotations -}} +{{- $serviceType := coalesce .Values.master.service.type .Values.service.type -}} +{{- $serviceLoadBalancerIP := coalesce .Values.master.service.loadBalancerIP .Values.service.loadBalancerIP -}} +{{- $serviceLoadBalancerSourceRanges := coalesce .Values.master.service.loadBalancerSourceRanges .Values.service.loadBalancerSourceRanges -}} +{{- $serviceClusterIP := coalesce .Values.master.service.clusterIP .Values.service.clusterIP -}} +{{- $serviceNodePort := coalesce .Values.master.service.nodePort .Values.service.nodePort -}} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "postgresql.fullname" . }} + labels: + {{- include "common.labels.standard" . | nindent 4 }} + annotations: + {{- if .Values.commonAnnotations }} + {{- include "common.tplvalues.render" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }} + {{- end }} + {{- if $serviceAnnotations }} + {{- include "common.tplvalues.render" (dict "value" $serviceAnnotations "context" $) | nindent 4 }} + {{- end }} +spec: + type: {{ $serviceType }} + {{- if and $serviceLoadBalancerIP (eq $serviceType "LoadBalancer") }} + loadBalancerIP: {{ $serviceLoadBalancerIP }} + {{- end }} + {{- if and (eq $serviceType "LoadBalancer") $serviceLoadBalancerSourceRanges }} + loadBalancerSourceRanges: {{- include "common.tplvalues.render" (dict "value" $serviceLoadBalancerSourceRanges "context" $) | nindent 4 }} + {{- end }} + {{- if and (eq $serviceType "ClusterIP") $serviceClusterIP }} + clusterIP: {{ $serviceClusterIP }} + {{- end }} + ports: + - name: tcp-postgresql + port: {{ template "postgresql.port" . }} + targetPort: tcp-postgresql + {{- if $serviceNodePort }} + nodePort: {{ $serviceNodePort }} + {{- end }} + selector: + {{- include "common.labels.matchLabels" . | nindent 4 }} + role: master diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/values-production.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/values-production.yaml new file mode 100755 index 000000000..3e144c131 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/values-production.yaml @@ -0,0 +1,711 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: true + user: repl_user + password: repl_password + slaveReplicas: 2 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: "on" + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 1 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: "" + server: "" + port: "" + prefix: "" + suffix: "" + baseDN: "" + bindDN: "" + bind_password: + search_attr: "" + search_filter: "" + scheme: "" + tls: false + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: "" + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: "" + # + # Certificate filename + certFilename: "" + # + # Certificate Key filename + certKeyFilename: "" + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: true + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9187" + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/values.schema.json b/scripts/helm/helmcharts/databases/charts/postgresql/values.schema.json new file mode 100755 index 000000000..7b5e2efc3 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/values.schema.json @@ -0,0 +1,103 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "postgresqlUsername": { + "type": "string", + "title": "Admin user", + "form": true + }, + "postgresqlPassword": { + "type": "string", + "title": "Password", + "form": true + }, + "persistence": { + "type": "object", + "properties": { + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi" + } + } + }, + "resources": { + "type": "object", + "title": "Required Resources", + "description": "Configure resource requests", + "form": true, + "properties": { + "requests": { + "type": "object", + "properties": { + "memory": { + "type": "string", + "form": true, + "render": "slider", + "title": "Memory Request", + "sliderMin": 10, + "sliderMax": 2048, + "sliderUnit": "Mi" + }, + "cpu": { + "type": "string", + "form": true, + "render": "slider", + "title": "CPU Request", + "sliderMin": 10, + "sliderMax": 2000, + "sliderUnit": "m" + } + } + } + } + }, + "replication": { + "type": "object", + "form": true, + "title": "Replication Details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Enable Replication", + "form": true + }, + "slaveReplicas": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "replication/enabled" + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Change the owner of the persist volume mountpoint to RunAsUser:fsGroup" + } + } + }, + "metrics": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Configure metrics exporter", + "form": true + } + } + } + } +} diff --git a/scripts/helm/helmcharts/databases/charts/postgresql/values.yaml b/scripts/helm/helmcharts/databases/charts/postgresql/values.yaml new file mode 100755 index 000000000..c97200904 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/postgresql/values.yaml @@ -0,0 +1,722 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + postgresql: {} +# imageRegistry: myRegistryName +# imagePullSecrets: +# - myRegistryKeySecretName +# storageClass: myStorageClass + +## Bitnami PostgreSQL image version +## ref: https://hub.docker.com/r/bitnami/postgresql/tags/ +## +image: + registry: docker.io + repository: bitnami/postgresql + tag: 11.9.0-debian-10-r48 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Set to true if you would like to see extra information on logs + ## It turns BASH and NAMI debugging in minideb + ## ref: https://github.com/bitnami/minideb-extras/#turn-on-bash-debugging + ## + debug: false + +## String to partially override postgresql.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override postgresql.fullname template +## +# fullnameOverride: + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/minideb + tag: buster + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Init container Security Context + ## Note: the chown of the data folder is done to securityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## pod securityContext.enabled=false and shmVolume.chmod.enabled=false + ## + securityContext: + runAsUser: 0 + +## Use an alternate scheduler, e.g. "stork". +## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ +## +# schedulerName: + +## Pod Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +securityContext: + enabled: true + fsGroup: 1001 + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Pod Service Account +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-service-account/ +## +serviceAccount: + enabled: false + ## Name of an already existing service account. Setting this value disables the automatic service account creation. + # name: + +## Pod Security Policy +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +psp: + create: false + +## Creates role for ServiceAccount +## Required for PSP +## +rbac: + create: false + +replication: + enabled: false + user: repl_user + password: repl_password + slaveReplicas: 1 + ## Set synchronous commit mode: on, off, remote_apply, remote_write and local + ## ref: https://www.postgresql.org/docs/9.6/runtime-config-wal.html#GUC-WAL-LEVEL + ## + synchronousCommit: 'off' + ## From the number of `slaveReplicas` defined above, set the number of those that will have synchronous replication + ## NOTE: It cannot be > slaveReplicas + ## + numSynchronousReplicas: 0 + ## Replication Cluster application name. Useful for defining multiple replication policies + ## + applicationName: my_application + +## PostgreSQL admin password (used when `postgresqlUsername` is not `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-user-on-first-run (see note!) +# postgresqlPostgresPassword: + +## PostgreSQL user (has superuser privileges if username is `postgres`) +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +postgresqlUsername: postgres + +## PostgreSQL password +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#setting-the-root-password-on-first-run +## +# postgresqlPassword: asayerPostgres + +## PostgreSQL password using existing secret +## existingSecret: secret +## + +## Mount PostgreSQL secret as a file instead of passing environment variable +# usePasswordFile: false + +## Create a database +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md#creating-a-database-on-first-run +## +# postgresqlDatabase: + +## PostgreSQL data dir +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +postgresqlDataDir: /bitnami/postgresql/data + +## An array to add extra environment variables +## For example: +## extraEnv: +## - name: FOO +## value: "bar" +## +# extraEnv: +extraEnv: [] + +## Name of a ConfigMap containing extra env vars +## +# extraEnvVarsCM: + +## Specify extra initdb args +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbArgs: + +## Specify a custom location for the PostgreSQL transaction log +## ref: https://github.com/bitnami/bitnami-docker-postgresql/blob/master/README.md +## +# postgresqlInitdbWalDir: + +## PostgreSQL configuration +## Specify runtime configuration parameters as a dict, using camelCase, e.g. +## {"sharedBuffers": "500MB"} +## Alternatively, you can put your postgresql.conf under the files/ directory +## ref: https://www.postgresql.org/docs/current/static/runtime-config.html +## +# postgresqlConfiguration: + +## PostgreSQL extended configuration +## As above, but _appended_ to the main configuration +## Alternatively, you can put your *.conf under the files/conf.d/ directory +## https://github.com/bitnami/bitnami-docker-postgresql#allow-settings-to-be-loaded-from-files-other-than-the-default-postgresqlconf +## +# postgresqlExtendedConf: + +## Configure current cluster's master server to be the standby server in other cluster. +## This will allow cross cluster replication and provide cross cluster high availability. +## You will need to configure pgHbaConfiguration if you want to enable this feature with local cluster replication enabled. +## +masterAsStandBy: + enabled: false + # masterHost: + # masterPort: + +## PostgreSQL client authentication configuration +## Specify content for pg_hba.conf +## Default: do not create pg_hba.conf +## Alternatively, you can put your pg_hba.conf under the files/ directory +# pgHbaConfiguration: |- +# local all all trust +# host all all localhost trust +# host mydatabase mysuser 192.168.0.0/24 md5 + +## ConfigMap with PostgreSQL configuration +## NOTE: This will override postgresqlConfiguration and pgHbaConfiguration +# configurationConfigMap: + +## ConfigMap with PostgreSQL extended configuration +# extendedConfConfigMap: + +## initdb scripts +## Specify dictionary of scripts to be run at first boot +## Alternatively, you can put your scripts under the files/docker-entrypoint-initdb.d directory +## +# initdbScripts: +# my_init_script.sh: | +# #!/bin/sh +# echo "Do something." + +## ConfigMap with scripts to be run at first boot +## NOTE: This will override initdbScripts +# initdbScriptsConfigMap: + +## Secret with scripts to be run at first boot (in case it contains sensitive information) +## NOTE: This can work along initdbScripts or initdbScriptsConfigMap +# initdbScriptsSecret: + +## Specify the PostgreSQL username and password to execute the initdb scripts +# initdbUser: +# initdbPassword: + +## Audit settings +## https://github.com/bitnami/bitnami-docker-postgresql#auditing +## +audit: + ## Log client hostnames + ## + logHostname: false + ## Log connections to the server + ## + logConnections: false + ## Log disconnections + ## + logDisconnections: false + ## Operation to audit using pgAudit (default if not set) + ## + pgAuditLog: "" + ## Log catalog using pgAudit + ## + pgAuditLogCatalog: "off" + ## Log level for clients + ## + clientMinMessages: error + ## Template for log line prefix (default if not set) + ## + logLinePrefix: "" + ## Log timezone + ## + logTimezone: "" + +## Shared preload libraries +## +postgresqlSharedPreloadLibraries: "pgaudit" + +## Maximum total connections +## +postgresqlMaxConnections: + +## Maximum connections for the postgres user +## +postgresqlPostgresConnectionLimit: + +## Maximum connections for the created user +## +postgresqlDbUserConnectionLimit: + +## TCP keepalives interval +## +postgresqlTcpKeepalivesInterval: + +## TCP keepalives idle +## +postgresqlTcpKeepalivesIdle: + +## TCP keepalives count +## +postgresqlTcpKeepalivesCount: + +## Statement timeout +## +postgresqlStatementTimeout: + +## Remove pg_hba.conf lines with the following comma-separated patterns +## (cannot be used with custom pg_hba.conf) +## +postgresqlPghbaRemoveFilters: + +## Optional duration in seconds the pod needs to terminate gracefully. +## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod/#termination-of-pods +## +# terminationGracePeriodSeconds: 30 + +## LDAP configuration +## +ldap: + enabled: false + url: '' + server: '' + port: '' + prefix: '' + suffix: '' + baseDN: '' + bindDN: '' + bind_password: + search_attr: '' + search_filter: '' + scheme: '' + tls: false + +## PostgreSQL service configuration +## +service: + ## PosgresSQL service type + ## + type: ClusterIP + # clusterIP: None + port: 5432 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. Evaluated as a template. + ## + annotations: {} + ## Set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + # loadBalancerIP: + ## Load Balancer sources. Evaluated as a template. + ## https://kubernetes.io/docs/tasks/access-application-cluster/configure-cloud-provider-firewall/#restrict-access-for-loadbalancer-service + ## + # loadBalancerSourceRanges: + # - 10.10.10.0/24 + +## Start master and slave(s) pod(s) without limitations on shm memory. +## By default docker and containerd (and possibly other container runtimes) +## limit `/dev/shm` to `64M` (see e.g. the +## [docker issue](https://github.com/docker-library/postgres/issues/416) and the +## [containerd issue](https://github.com/containerd/containerd/issues/3654), +## which could be not enough if PostgreSQL uses parallel workers heavily. +## +shmVolume: + ## Set `shmVolume.enabled` to `true` to mount a new tmpfs volume to remove + ## this limitation. + ## + enabled: true + ## Set to `true` to `chmod 777 /dev/shm` on a initContainer. + ## This option is ingored if `volumePermissions.enabled` is `false` + ## + chmod: + enabled: true + +## PostgreSQL data Persistent Volume Storage Class +## If defined, storageClassName: +## If set to "-", storageClassName: "", which disables dynamic provisioning +## If undefined (the default) or set to null, no storageClassName spec is +## set, choosing the default provisioner. (gp2 on AWS, standard on +## GKE, AWS & OpenStack) +## +persistence: + enabled: true + ## A manually managed Persistent Volume and Claim + ## If defined, PVC must be created manually before volume will be bound + ## The value is evaluated as a template, so, for example, the name can depend on .Release or .Chart + ## + # existingClaim: + + ## The path the volume will be mounted at, useful when using different + ## PostgreSQL images. + ## + mountPath: /bitnami/postgresql + + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: '' + + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + annotations: {} + +## updateStrategy for PostgreSQL StatefulSet and its slaves StatefulSets +## ref: https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#update-strategies +## +updateStrategy: + type: RollingUpdate + +## +## PostgreSQL Master parameters +## +master: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + ## Extra init containers + ## Example + ## + ## extraInitContainers: + ## - name: do-something + ## image: busybox + ## command: ['do', 'something'] + ## + extraInitContainers: [] + + ## Additional PostgreSQL Master Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Master Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for master + ## + service: {} + # type: + # nodePort: + # clusterIP: + +## +## PostgreSQL Slave parameters +## +slave: + ## Node, affinity, tolerations, and priorityclass settings for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#affinity-and-anti-affinity + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## ref: https://kubernetes.io/docs/concepts/configuration/pod-priority-preemption + ## + nodeSelector: {} + affinity: {} + tolerations: [] + labels: {} + annotations: {} + podLabels: {} + podAnnotations: {} + priorityClassName: '' + extraInitContainers: | + # - name: do-something + # image: busybox + # command: ['do', 'something'] + ## Additional PostgreSQL Slave Volume mounts + ## + extraVolumeMounts: [] + ## Additional PostgreSQL Slave Volumes + ## + extraVolumes: [] + ## Add sidecars to the pod + ## + ## For example: + ## sidecars: + ## - name: your-image-name + ## image: your-image + ## imagePullPolicy: Always + ## ports: + ## - name: portname + ## containerPort: 1234 + ## + sidecars: [] + + ## Override the service configuration for slave + ## + service: {} + # type: + # nodePort: + # clusterIP: + + ## Whether to enable PostgreSQL slave replicas data Persistent + ## + persistence: + enabled: true + + # Override the resource configuration for slave + resources: {} + # requests: + # memory: 256Mi + # cpu: 250m + +## Configure resource requests and limits +## ref: http://kubernetes.io/docs/user-guide/compute-resources/ +## +resources: + requests: + memory: 256Mi + cpu: 250m + +## Add annotations to all the deployed resources +## +commonAnnotations: {} + +networkPolicy: + ## Enable creation of NetworkPolicy resources. Only Ingress traffic is filtered for now. + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port PostgreSQL is listening + ## on. When true, PostgreSQL will accept connections from any source + ## (with the correct destination port). + ## + allowExternal: true + + ## if explicitNamespacesSelector is missing or set to {}, only client Pods that are in the networkPolicy's namespace + ## and that match other criteria, the ones that have the good label, can reach the DB. + ## But sometimes, we want the DB to be accessible to clients from other namespaces, in this case, we can use this + ## LabelSelector to select these namespaces, note that the networkPolicy's namespace should also be explicitly added. + ## + ## Example: + ## explicitNamespacesSelector: + ## matchLabels: + ## role: frontend + ## matchExpressions: + ## - {key: role, operator: In, values: [frontend]} + ## + explicitNamespacesSelector: {} + +## Configure extra options for liveness and readiness probes +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) +## +livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Custom Liveness probe +## +customLivenessProbe: {} + +## Custom Rediness probe +## +customReadinessProbe: {} + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to use the server's TLS cipher preferences rather than the client's. + preferServerCiphers: true + # + # Name of the Secret that contains the certificates + certificatesSecret: '' + # + # Certificate filename + certFilename: '' + # + # Certificate Key filename + certKeyFilename: '' + # + # CA Certificate filename + # If provided, PostgreSQL will authenticate TLS/SSL clients by requesting them a certificate + # ref: https://www.postgresql.org/docs/9.6/auth-methods.html + certCAFilename: + # + # File containing a Certificate Revocation List + crlFilename: + +## Configure metrics exporter +## +metrics: + enabled: false + # resources: {} + service: + type: ClusterIP + annotations: + prometheus.io/scrape: 'true' + prometheus.io/port: '9187' + loadBalancerIP: + serviceMonitor: + enabled: false + additionalLabels: {} + # namespace: monitoring + # interval: 30s + # scrapeTimeout: 10s + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: '' + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + ## rules: + ## - alert: HugeReplicationLag + ## expr: pg_replication_lag{service="{{ template "postgresql.fullname" . }}-metrics"} / 3600 > 1 + ## for: 1m + ## labels: + ## severity: critical + ## annotations: + ## description: replication for {{ template "postgresql.fullname" . }} PostgreSQL is lagging by {{ "{{ $value }}" }} hour(s). + ## summary: PostgreSQL replication is lagging by {{ "{{ $value }}" }} hour(s). + ## + rules: [] + + image: + registry: docker.io + repository: bitnami/postgres-exporter + tag: 0.8.0-debian-10-r242 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + ## Define additional custom metrics + ## ref: https://github.com/wrouesnel/postgres_exporter#adding-new-metrics-via-a-config-file + # customMetrics: + # pg_database: + # query: "SELECT d.datname AS name, CASE WHEN pg_catalog.has_database_privilege(d.datname, 'CONNECT') THEN pg_catalog.pg_database_size(d.datname) ELSE 0 END AS size_bytes FROM pg_catalog.pg_database d where datname not in ('template0', 'template1', 'postgres')" + # metrics: + # - name: + # usage: "LABEL" + # description: "Name of the database" + # - size_bytes: + # usage: "GAUGE" + # description: "Size of the database in bytes" + # + ## An array to add extra env vars to configure postgres-exporter + ## see: https://github.com/wrouesnel/postgres_exporter#environment-variables + ## For example: + # extraEnvVars: + # - name: PG_EXPORTER_DISABLE_DEFAULT_METRICS + # value: "true" + extraEnvVars: {} + + ## Pod Security Context + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ + ## + securityContext: + enabled: false + runAsUser: 1001 + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## Configure extra options for liveness and readiness probes + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 5 + failureThreshold: 6 + successThreshold: 1 + +## Array with extra yaml to deploy with the chart. Evaluated as a template +## +extraDeploy: [] diff --git a/scripts/helm/helmcharts/databases/charts/redis/.helmignore b/scripts/helm/helmcharts/databases/charts/redis/.helmignore new file mode 100644 index 000000000..f0c131944 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/.helmignore @@ -0,0 +1,21 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj diff --git a/scripts/helm/helmcharts/databases/charts/redis/Chart.lock b/scripts/helm/helmcharts/databases/charts/redis/Chart.lock new file mode 100644 index 000000000..69cc5b92f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/Chart.lock @@ -0,0 +1,6 @@ +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + version: 1.4.2 +digest: sha256:4e3ec38e0e27e9fc1defb2a13f67a0aa12374bf0b15f06a6c13b1b46df6bffeb +generated: "2021-04-05T11:40:59.141264592Z" diff --git a/scripts/helm/helmcharts/databases/charts/redis/Chart.yaml b/scripts/helm/helmcharts/databases/charts/redis/Chart.yaml new file mode 100644 index 000000000..dd36c43e0 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/Chart.yaml @@ -0,0 +1,29 @@ +annotations: + category: Database +apiVersion: v2 +appVersion: 6.0.12 +dependencies: +- name: common + repository: https://charts.bitnami.com/bitnami + tags: + - bitnami-common + version: 1.x.x +description: Open source, advanced key-value store. It is often referred to as a data + structure server since keys can contain strings, hashes, lists, sets and sorted + sets. +home: https://github.com/bitnami/charts/tree/master/bitnami/redis +icon: https://bitnami.com/assets/stacks/redis/img/redis-stack-220x234.png +keywords: +- redis +- keyvalue +- database +maintainers: +- email: containers@bitnami.com + name: Bitnami +- email: cedric@desaintmartin.fr + name: desaintmartin +name: redis +sources: +- https://github.com/bitnami/bitnami-docker-redis +- http://redis.io/ +version: 12.10.1 diff --git a/scripts/helm/helmcharts/databases/charts/redis/README.md b/scripts/helm/helmcharts/databases/charts/redis/README.md new file mode 100644 index 000000000..de0a04e18 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/README.md @@ -0,0 +1,738 @@ +# RedisTM Chart packaged by Bitnami + +[RedisTM](http://redis.io/) is an advanced key-value cache and store. It is often referred to as a data structure server since keys can contain strings, hashes, lists, sets, sorted sets, bitmaps and hyperloglogs. + +Disclaimer: REDIS® is a registered trademark of Redis Labs Ltd.Any rights therein are reserved to Redis Labs Ltd. Any use by Bitnami is for referential purposes only and does not indicate any sponsorship, endorsement, or affiliation between Redis Labs Ltd. + +## TL;DR + +```bash +$ helm repo add bitnami https://charts.bitnami.com/bitnami +$ helm install my-release bitnami/redis +``` + +## Introduction + +This chart bootstraps a [RedisTM](https://github.com/bitnami/bitnami-docker-redis) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This chart has been tested to work with NGINX Ingress, cert-manager, fluentd and Prometheus on top of the [BKPR](https://kubeprod.io/). + +### Choose between RedisTM Helm Chart and RedisTM Cluster Helm Chart + +You can choose any of the two RedisTM Helm charts for deploying a RedisTM cluster. +While [RedisTM Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis) will deploy a master-slave cluster using RedisTM Sentinel, the [RedisTM Cluster Helm Chart](https://github.com/bitnami/charts/tree/master/bitnami/redis-cluster) will deploy a RedisTM Cluster topology with sharding. +The main features of each chart are the following: + +| RedisTM | RedisTM Cluster | +|--------------------------------------------------------|------------------------------------------------------------------------| +| Supports multiple databases | Supports only one database. Better if you have a big dataset | +| Single write point (single master) | Multiple write points (multiple masters) | +| ![RedisTM Topology](img/redis-topology.png) | ![RedisTM Cluster Topology](img/redis-cluster-topology.png) | + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 +- PV provisioner support in the underlying infrastructure + +## Installing the Chart + +To install the chart with the release name `my-release`: + +```bash +$ helm install my-release bitnami/redis +``` + +The command deploys RedisTM on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation. + +> **Tip**: List all releases using `helm list` + +## Uninstalling the Chart + +To uninstall/delete the `my-release` deployment: + +```bash +$ helm delete my-release +``` + +The command removes all the Kubernetes components associated with the chart and deletes the release. + +## Parameters + +The following table lists the configurable parameters of the RedisTM chart and their default values. + +| Parameter | Description | Default | +|:------------------------------------------------------|:----------------------------------------------------------------------------------------------------------------------------------------------------|:--------------------------------------------------------| +| `global.imageRegistry` | Global Docker image registry | `nil` | +| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) | +| `global.storageClass` | Global storage class for dynamic provisioning | `nil` | +| `global.redis.password` | RedisTM password (overrides `password`) | `nil` | +| `image.registry` | RedisTM Image registry | `docker.io` | +| `image.repository` | RedisTM Image name | `bitnami/redis` | +| `image.tag` | RedisTM Image tag | `{TAG_NAME}` | +| `image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `nameOverride` | String to partially override redis.fullname template with a string (will prepend the release name) | `nil` | +| `fullnameOverride` | String to fully override redis.fullname template with a string | `nil` | +| `cluster.enabled` | Use master-slave topology | `true` | +| `cluster.slaveCount` | Number of slaves | `2` | +| `existingSecret` | Name of existing secret object (for password authentication) | `nil` | +| `existingSecretPasswordKey` | Name of key containing password to be retrieved from the existing secret | `nil` | +| `usePassword` | Use password | `true` | +| `usePasswordFile` | Mount passwords as files instead of environment variables | `false` | +| `password` | RedisTM password (ignored if existingSecret set) | Randomly generated | +| `configmap` | Additional common RedisTM node configuration (this value is evaluated as a template) | See values.yaml | +| `clusterDomain` | Kubernetes DNS Domain name to use | `cluster.local` | +| `extraVolumes` | Array of extra volumes to be added to master & slave nodes (evaluated as a template) | `[]` | +| `extraVolumeMounts` | Array of extra volume mounts to be added to master & slave nodes (evaluated as a template) | `[]` | +| `networkPolicy.enabled` | Enable NetworkPolicy | `false` | +| `networkPolicy.allowExternal` | Don't require client label for connections | `true` | +| `networkPolicy.ingressNSMatchLabels` | Allow connections from other namespaces | `{}` | +| `networkPolicy.ingressNSPodMatchLabels` | For other namespaces match by pod labels and namespace labels | `{}` | +| `securityContext.*` | Other pod security context to be included as-is in the pod spec | `{}` | +| `securityContext.enabled` | Enable security context (both redis master and slave pods) | `true` | +| `securityContext.fsGroup` | Group ID for the container (both redis master and slave pods) | `1001` | +| `containerSecurityContext.*` | Other container security context to be included as-is in the container spec | `{}` | +| `containerSecurityContext.enabled` | Enable security context (both redis master and slave containers) | `true` | +| `containerSecurityContext.runAsUser` | User ID for the container (both redis master and slave containers) | `1001` | +| `serviceAccount.create` | Specifies whether a ServiceAccount should be created | `false` | +| `serviceAccount.name` | The name of the ServiceAccount to create | Generated using the fullname template | +| `serviceAccount.annotations` | Specifies annotations to add to ServiceAccount. | `nil` | +| `rbac.create` | Specifies whether RBAC resources should be created | `false` | +| `rbac.role.rules` | Rules to create | `[]` | +| `metrics.enabled` | Start a side-car prometheus exporter | `false` | +| `metrics.image.registry` | RedisTM exporter image registry | `docker.io` | +| `metrics.image.repository` | RedisTM exporter image name | `bitnami/redis-exporter` | +| `metrics.image.tag` | RedisTM exporter image tag | `{TAG_NAME}` | +| `metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/oliver006/redis_exporter#flags) | {} | +| `metrics.podLabels` | Additional labels for Metrics exporter pod | {} | +| `metrics.podAnnotations` | Additional annotations for Metrics exporter pod | {} | +| `metrics.resources` | Exporter resource requests/limit | Memory: `256Mi`, CPU: `100m` | +| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` | +| `metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `metrics.serviceMonitor.relabelings` | ServiceMonitor relabelings. Value is evaluated as a template | `[]` | +| `metrics.serviceMonitor.metricRelabelings` | ServiceMonitor metricRelabelings. Value is evaluated as a template | `[]` | +| `metrics.service.type` | Kubernetes Service type (redis metrics) | `ClusterIP` | +| `metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `metrics.service.annotations` | Annotations for the services to monitor (redis master and redis slave service) | {} | +| `metrics.service.labels` | Additional labels for the metrics service | {} | +| `metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `metrics.priorityClassName` | Metrics exporter pod priorityClassName | `nil` | +| `metrics.prometheusRule.enabled` | Set this to true to create prometheusRules for Prometheus operator | `false` | +| `metrics.prometheusRule.additionalLabels` | Additional labels that can be used so prometheusRules will be discovered by Prometheus | `{}` | +| `metrics.prometheusRule.namespace` | namespace where prometheusRules resource should be created | Same namespace as redis | +| `metrics.prometheusRule.rules` | [rules](https://prometheus.io/docs/prometheus/latest/configuration/alerting_rules/) to be created, check values for an example. | `[]` | +| `metrics.redisTargetHost` | way to specify an alternative redis hostname, if you set a local endpoint in hostAliases to match specific redis server certificate CN/SAN for example. + | `localhost` | +| `persistence.existingClaim` | Provide an existing PersistentVolumeClaim | `nil` | +| `master.persistence.enabled` | Use a PVC to persist data (master node) | `true` | +| `master.hostAliases` | Add deployment host aliases | `[]` | +| `master.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `master.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `master.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `master.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `master.persistence.size` | Size of data volume | `8Gi` | +| `master.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `master.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `master.persistence.volumes` | Additional volumes without creating PVC | `{}` | +| `master.statefulset.labels` | Additional labels for redis master StatefulSet | `{}` | +| `master.statefulset.annotations` | Additional annotations for redis master StatefulSet | `{}` | +| `master.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `master.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `master.statefulset.volumeClaimTemplates.labels` | Additional labels for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis master StatefulSet volumeClaimTemplates | `{}` | +| `master.podLabels` | Additional labels for RedisTM master pod | {} | +| `master.podAnnotations` | Additional annotations for RedisTM master pod | {} | +| `master.extraEnvVars` | Additional Environment Variables passed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the master's stateful set set | `[]` | +| `master.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the master's stateful set | `[]` | +| `master.extraVolumes` | Array of extra volumes to be added to master pod (evaluated as a template) | `[]` | +| `master.extraVolumeMounts` | Array of extra volume mounts to be added to master pod (evaluated as a template) | `[]` | +| `podDisruptionBudget.enabled` | Pod Disruption Budget toggle | `false` | +| `podDisruptionBudget.minAvailable` | Minimum available pods | `1` | +| `podDisruptionBudget.maxUnavailable` | Maximum unavailable | `nil` | +| `redisPort` | RedisTM port (in both master and slaves) | `6379` | +| `tls.enabled` | Enable TLS support for replication traffic | `false` | +| `tls.authClients` | Require clients to authenticate or not | `true` | +| `tls.certificatesSecret` | Name of the secret that contains the certificates | `nil` | +| `tls.certFilename` | Certificate filename | `nil` | +| `tls.certKeyFilename` | Certificate key filename | `nil` | +| `tls.certCAFilename` | CA Certificate filename | `nil` | +| `tls.dhParamsFilename` | DH params (in order to support DH based ciphers) | `nil` | +| `master.command` | RedisTM master entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `master.preExecCmds` | Text to inset into the startup script immediately prior to `master.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `master.configmap` | Additional RedisTM configuration for the master nodes (this value is evaluated as a template) | `nil` | +| `master.disableCommands` | Array of RedisTM commands to disable (master) | `["FLUSHDB", "FLUSHALL"]` | +| `master.extraFlags` | RedisTM master additional command line flags | [] | +| `master.nodeSelector` | RedisTM master Node labels for pod assignment | {"beta.kubernetes.io/arch": "amd64"} | +| `master.tolerations` | Toleration labels for RedisTM master pod assignment | [] | +| `master.affinity` | Affinity settings for RedisTM master pod assignment | {} | +| `master.schedulerName` | Name of an alternate scheduler | `nil` | +| `master.service.type` | Kubernetes Service type (redis master) | `ClusterIP` | +| `master.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `master.service.port` | Kubernetes Service port (redis master) | `6379` | +| `master.service.nodePort` | Kubernetes Service nodePort (redis master) | `nil` | +| `master.service.annotations` | annotations for redis master service | {} | +| `master.service.labels` | Additional labels for redis master service | {} | +| `master.service.loadBalancerIP` | loadBalancerIP if redis master service type is `LoadBalancer` | `nil` | +| `master.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if redis master service type is `LoadBalancer` | `nil` | +| `master.resources` | RedisTM master CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `100m` | +| `master.livenessProbe.enabled` | Turn on and off liveness probe (redis master pod) | `true` | +| `master.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis master pod) | `5` | +| `master.livenessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.livenessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `5` | +| `master.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.readinessProbe.enabled` | Turn on and off readiness probe (redis master pod) | `true` | +| `master.readinessProbe.initialDelaySeconds` | Delay before readiness probe is initiated (redis master pod) | `5` | +| `master.readinessProbe.periodSeconds` | How often to perform the probe (redis master pod) | `5` | +| `master.readinessProbe.timeoutSeconds` | When the probe times out (redis master pod) | `1` | +| `master.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis master pod) | `1` | +| `master.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `master.shareProcessNamespace` | RedisTM Master pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `master.priorityClassName` | RedisTM Master pod priorityClassName | `nil` | +| `volumePermissions.enabled` | Enable init container that changes volume permissions in the registry (for cases where the default k8s `runAsUser` and `fsUser` values do not work) | `false` | +| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` | +| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/bitnami-shell` | +| `volumePermissions.image.tag` | Init container volume-permissions image tag | `"10"` | +| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` | +| `volumePermissions.resources ` | Init container volume-permissions CPU/Memory resource requests/limits | {} | +| `volumePermissions.securityContext.*` | Security context of the init container | `{}` | +| `volumePermissions.securityContext.runAsUser` | UserID for the init container (when facing issues in OpenShift or uid unknown, try value "auto") | 0 | +| `slave.hostAliases` | Add deployment host aliases | `[]` | +| `slave.service.type` | Kubernetes Service type (redis slave) | `ClusterIP` | +| `slave.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `slave.service.nodePort` | Kubernetes Service nodePort (redis slave) | `nil` | +| `slave.service.annotations` | annotations for redis slave service | {} | +| `slave.service.labels` | Additional labels for redis slave service | {} | +| `slave.service.port` | Kubernetes Service port (redis slave) | `6379` | +| `slave.service.loadBalancerIP` | LoadBalancerIP if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.service.loadBalancerSourceRanges` | loadBalancerSourceRanges if RedisTM slave service type is `LoadBalancer` | `nil` | +| `slave.command` | RedisTM slave entrypoint string. The command `redis-server` is executed if this is not provided. Note this is prepended with `exec` | `/run.sh` | +| `slave.preExecCmds` | Text to inset into the startup script immediately prior to `slave.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `slave.configmap` | Additional RedisTM configuration for the slave nodes (this value is evaluated as a template) | `nil` | +| `slave.disableCommands` | Array of RedisTM commands to disable (slave) | `[FLUSHDB, FLUSHALL]` | +| `slave.extraFlags` | RedisTM slave additional command line flags | `[]` | +| `slave.livenessProbe.enabled` | Turn on and off liveness probe (redis slave pod) | `true` | +| `slave.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis slave pod) | `5` | +| `slave.livenessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.livenessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `5` | +| `slave.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `slave.readinessProbe.enabled` | Turn on and off slave.readiness probe (redis slave pod) | `true` | +| `slave.readinessProbe.initialDelaySeconds` | Delay before slave.readiness probe is initiated (redis slave pod) | `5` | +| `slave.readinessProbe.periodSeconds` | How often to perform the probe (redis slave pod) | `5` | +| `slave.readinessProbe.timeoutSeconds` | When the probe times out (redis slave pod) | `1` | +| `slave.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis slave pod) | `1` | +| `slave.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis slave pod) | `5` | +| `slave.shareProcessNamespace` | RedisTM slave pod `shareProcessNamespace` option. Enables /pause reap zombie PIDs. | `false` | +| `slave.persistence.enabled` | Use a PVC to persist data (slave node) | `true` | +| `slave.persistence.path` | Path to mount the volume at, to use other images | `/data` | +| `slave.persistence.subPath` | Subdirectory of the volume to mount at | `""` | +| `slave.persistence.storageClass` | Storage class of backing PVC | `generic` | +| `slave.persistence.accessModes` | Persistent Volume Access Modes | `[ReadWriteOnce]` | +| `slave.persistence.size` | Size of data volume | `8Gi` | +| `slave.persistence.matchLabels` | matchLabels persistent volume selector | `{}` | +| `slave.persistence.matchExpressions` | matchExpressions persistent volume selector | `{}` | +| `slave.statefulset.labels` | Additional labels for redis slave StatefulSet | `{}` | +| `slave.statefulset.annotations` | Additional annotations for redis slave StatefulSet | `{}` | +| `slave.statefulset.updateStrategy` | Update strategy for StatefulSet | onDelete | +| `slave.statefulset.rollingUpdatePartition` | Partition update strategy | `nil` | +| `slave.statefulset.volumeClaimTemplates.labels` | Additional labels for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.statefulset.volumeClaimTemplates.annotations` | Additional annotations for redis slave StatefulSet volumeClaimTemplates | `{}` | +| `slave.extraEnvVars` | Additional Environment Variables passed to the pod of the slave's stateful set set | `[]` | +| `slave.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the slave's stateful set set | `[]` | +| `masslaveter.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the slave's stateful set | `[]` | +| `slave.extraVolumes` | Array of extra volumes to be added to slave pod (evaluated as a template) | `[]` | +| `slave.extraVolumeMounts` | Array of extra volume mounts to be added to slave pod (evaluated as a template) | `[]` | +| `slave.podLabels` | Additional labels for RedisTM slave pod | `master.podLabels` | +| `slave.podAnnotations` | Additional annotations for RedisTM slave pod | `master.podAnnotations` | +| `slave.schedulerName` | Name of an alternate scheduler | `nil` | +| `slave.resources` | RedisTM slave CPU/Memory resource requests/limits | `{}` | +| `slave.affinity` | Enable node/pod affinity for slaves | {} | +| `slave.tolerations` | Toleration labels for RedisTM slave pod assignment | [] | +| `slave.spreadConstraints` | [Topology Spread Constraints](https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/) for RedisTM slave pod | {} | +| `slave.priorityClassName` | RedisTM Slave pod priorityClassName | `nil` | +| `sentinel.enabled` | Enable sentinel containers | `false` | +| `sentinel.usePassword` | Use password for sentinel containers | `true` | +| `sentinel.masterSet` | Name of the sentinel master set | `mymaster` | +| `sentinel.initialCheckTimeout` | Timeout for querying the redis sentinel service for the active sentinel list | `5` | +| `sentinel.quorum` | Quorum for electing a new master | `2` | +| `sentinel.downAfterMilliseconds` | Timeout for detecting a RedisTM node is down | `60000` | +| `sentinel.failoverTimeout` | Timeout for performing a election failover | `18000` | +| `sentinel.parallelSyncs` | Number of parallel syncs in the cluster | `1` | +| `sentinel.port` | RedisTM Sentinel port | `26379` | +| `sentinel.cleanDelaySeconds` | Delay seconds before issuing the the cleaning in the next node | `5` | +| `sentinel.configmap` | Additional RedisTM configuration for the sentinel nodes (this value is evaluated as a template) | `nil` | +| `sentinel.staticID` | Enable static IDs for sentinel replicas (If disabled IDs will be randomly generated on startup) | `false` | +| `sentinel.service.type` | Kubernetes Service type (redis sentinel) | `ClusterIP` | +| `sentinel.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.service.nodePort` | Kubernetes Service nodePort (redis sentinel) | `nil` | +| `sentinel.service.annotations` | annotations for redis sentinel service | {} | +| `sentinel.service.labels` | Additional labels for redis sentinel service | {} | +| `sentinel.service.redisPort` | Kubernetes Service port for RedisTM read only operations | `6379` | +| `sentinel.service.sentinelPort` | Kubernetes Service port for RedisTM sentinel | `26379` | +| `sentinel.service.redisNodePort` | Kubernetes Service node port for RedisTM read only operations | `` | +| `sentinel.service.sentinelNodePort` | Kubernetes Service node port for RedisTM sentinel | `` | +| `sentinel.service.loadBalancerIP` | LoadBalancerIP if RedisTM sentinel service type is `LoadBalancer` | `nil` | +| `sentinel.livenessProbe.enabled` | Turn on and off liveness probe (redis sentinel pod) | `true` | +| `sentinel.livenessProbe.initialDelaySeconds` | Delay before liveness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.livenessProbe.periodSeconds` | How often to perform the probe (redis sentinel container) | `5` | +| `sentinel.livenessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `5` | +| `sentinel.livenessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.livenessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. | `5` | +| `sentinel.readinessProbe.enabled` | Turn on and off sentinel.readiness probe (redis sentinel pod) | `true` | +| `sentinel.readinessProbe.initialDelaySeconds` | Delay before sentinel.readiness probe is initiated (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.periodSeconds` | How often to perform the probe (redis sentinel pod) | `5` | +| `sentinel.readinessProbe.timeoutSeconds` | When the probe times out (redis sentinel container) | `1` | +| `sentinel.readinessProbe.successThreshold` | Minimum consecutive successes for the probe to be considered successful after having failed (redis sentinel container) | `1` | +| `sentinel.readinessProbe.failureThreshold` | Minimum consecutive failures for the probe to be considered failed after having succeeded. (redis sentinel container) | `5` | +| `sentinel.resources` | RedisTM sentinel CPU/Memory resource requests/limits | `{}` | +| `sentinel.image.registry` | RedisTM Sentinel Image registry | `docker.io` | +| `sentinel.image.repository` | RedisTM Sentinel Image name | `bitnami/redis-sentinel` | +| `sentinel.image.tag` | RedisTM Sentinel Image tag | `{TAG_NAME}` | +| `sentinel.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.image.pullSecrets` | Specify docker-registry secret names as an array | `nil` | +| `sentinel.extraEnvVars` | Additional Environment Variables passed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarCMs` | Additional Environment Variables ConfigMappassed to the pod of the sentinel node stateful set set | `[]` | +| `sentinel.extraEnvVarsSecret` | Additional Environment Variables Secret passed to the sentinel node statefulset | `[]` | +| `sentinel.extraVolumes` | Array of extra volumes to be added to sentinel node (evaluated as a template) | `[]` | +| `sentinel.extraVolumeMounts` | Array of extra volume mounts to be added to sentinel node (evaluated as a template) | `[]` | +| `sentinel.preExecCmds` | Text to inset into the startup script immediately prior to `sentinel.command`. Use this if you need to run other ad-hoc commands as part of startup | `nil` | +| `sentinel.metrics.enabled` | Start a side-car prometheus sentinel exporter | `false` | +| `sentinel.metrics.image.registry` | Redis Sentinel exporter image registry | `docker.io` | +| `sentinel.metrics.image.repository` | Redis Sentinel exporter image name | `bitnami/redis-sentinel-exporter` | +| `sentinel.metrics.image.tag` | Redis Sentinel exporter image tag | `{TAG_NAME}` | +| `sentinel.metrics.image.pullPolicy` | Image pull policy | `IfNotPresent` | +| `sentinel.metrics.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` | +| `sentinel.metrics.extraArgs` | Extra arguments for the binary; possible values [here](https://github.com/leominov/redis_sentinel_exporter#configuration) | `{}` | +| `sentinel.metrics.resources` | Exporter resource requests/limit | `{}` | +| `sentinel.metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `sentinel.metrics.enabled` to be `true`) | `false` | +| `sentinel.metrics.serviceMonitor.namespace` | Optional namespace which Prometheus is running in | `nil` | +| `sentinel.metrics.serviceMonitor.interval` | How frequently to scrape metrics (use by default, falling back to Prometheus' default) | `nil` | +| `sentinel.metrics.serviceMonitor.selector` | Default to kube-prometheus install (CoreOS recommended), but should be set according to Prometheus install | `{ prometheus: kube-prometheus }` | +| `sentinel.metrics.service.type` | Kubernetes Service type (redis sentinel metrics) | `ClusterIP` | +| `sentinel.metrics.service.port` | Kubernetes service port (redis sentinel metrics) | `9355` | +| `sentinel.metrics.service.externalTrafficPolicy` | External traffic policy (when service type is LoadBalancer) | `Cluster` | +| `sentinel.metrics.service.annotations` | Annotations for the services to monitor (redis sentinel metrics service) | {} | +| `sentinel.metrics.service.labels` | Additional labels for the Sentinel metrics service | {} | +| `sentinel.metrics.service.loadBalancerIP` | loadBalancerIP if redis metrics service type is `LoadBalancer` | `nil` | +| `sentinel.metrics.priorityClassName` | Sentinel metrics exporter pod priorityClassName | `nil` | +| `sysctlImage.enabled` | Enable an init container to modify Kernel settings | `false` | +| `sysctlImage.command` | sysctlImage command to execute | [] | +| `sysctlImage.registry` | sysctlImage Init container registry | `docker.io` | +| `sysctlImage.repository` | sysctlImage Init container name | `bitnami/bitnami-shell` | +| `sysctlImage.tag` | sysctlImage Init container tag | `"10"` | +| `sysctlImage.pullPolicy` | sysctlImage Init container pull policy | `Always` | +| `sysctlImage.mountHostSys` | Mount the host `/sys` folder to `/host-sys` | `false` | +| `sysctlImage.resources` | sysctlImage Init container CPU/Memory resource requests/limits | {} | +| `podSecurityPolicy.create` | Specifies whether a PodSecurityPolicy should be created | `false` | + +Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example, + +```bash +$ helm install my-release \ + --set password=secretpassword \ + bitnami/redis +``` + +The above command sets the RedisTM server password to `secretpassword`. + +> NOTE: Once this chart is deployed, it is not possible to change the application's access credentials, such as usernames or passwords, using Helm. To change these application credentials after deployment, delete any persistent volumes (PVs) used by the chart and re-deploy it, or use the application's built-in administrative tools if available. + +Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example, + +```bash +$ helm install my-release -f values.yaml bitnami/redis +``` + +> **Tip**: You can use the default [values.yaml](values.yaml) + +> **Note for minikube users**: Current versions of minikube (v0.24.1 at the time of writing) provision `hostPath` persistent volumes that are only writable by root. Using chart defaults cause pod failure for the RedisTM pod as it attempts to write to the `/bitnami` directory. Consider installing RedisTM with `--set persistence.enabled=false`. See minikube issue [1990](https://github.com/kubernetes/minikube/issues/1990) for more information. + +## Configuration and installation details + +### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/) + +It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image. + +Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist. + +### Change RedisTM version + +To modify the RedisTM version used in this chart you can specify a [valid image tag](https://hub.docker.com/r/bitnami/redis/tags/) using the `image.tag` parameter. For example, `image.tag=X.Y.Z`. This approach is also applicable to other images like exporters. + +### Cluster topologies + +#### Default: Master-Slave + +When installing the chart with `cluster.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master node allowed) and a RedisTM slave StatefulSet. The slaves will be read-replicas of the master. Two services will be exposed: + + - RedisTM Master service: Points to the master, where read-write operations can be performed + - RedisTM Slave service: Points to the slaves, where only read operations are allowed. + +In case the master crashes, the slaves will wait until the master node is respawned again by the Kubernetes Controller Manager. + +#### Master-Slave with Sentinel + +When installing the chart with `cluster.enabled=true` and `sentinel.enabled=true`, it will deploy a RedisTM master StatefulSet (only one master allowed) and a RedisTM slave StatefulSet. In this case, the pods will contain an extra container with RedisTM Sentinel. This container will form a cluster of RedisTM Sentinel nodes, which will promote a new master in case the actual one fails. In addition to this, only one service is exposed: + + - RedisTM service: Exposes port 6379 for RedisTM read-only operations and port 26379 for accessing RedisTM Sentinel. + +For read-only operations, access the service using port 6379. For write operations, it's necessary to access the RedisTM Sentinel cluster and query the current master using the command below (using redis-cli or similar: + +``` +SENTINEL get-master-addr-by-name +``` +This command will return the address of the current master, which can be accessed from inside the cluster. + +In case the current master crashes, the Sentinel containers will elect a new master node. + +### Using password file +To use a password file for RedisTM you need to create a secret containing the password. + +> *NOTE*: It is important that the file with the password must be called `redis-password` + +And then deploy the Helm Chart using the secret name as parameter: + +```console +usePassword=true +usePasswordFile=true +existingSecret=redis-password-file +sentinels.enabled=true +metrics.enabled=true +``` + +### Securing traffic using TLS + +TLS support can be enabled in the chart by specifying the `tls.` parameters while creating a release. The following parameters should be configured to properly enable the TLS support in the chart: + +- `tls.enabled`: Enable TLS support. Defaults to `false` +- `tls.certificatesSecret`: Name of the secret that contains the certificates. No defaults. +- `tls.certFilename`: Certificate filename. No defaults. +- `tls.certKeyFilename`: Certificate key filename. No defaults. +- `tls.certCAFilename`: CA Certificate filename. No defaults. + +For example: + +First, create the secret with the certificates files: + +```console +kubectl create secret generic certificates-tls-secret --from-file=./cert.pem --from-file=./cert.key --from-file=./ca.pem +``` + +Then, use the following parameters: + +```console +tls.enabled="true" +tls.certificatesSecret="certificates-tls-secret" +tls.certFilename="cert.pem" +tls.certKeyFilename="cert.key" +tls.certCAFilename="ca.pem" +``` + +### Metrics + +The chart optionally can start a metrics exporter for [prometheus](https://prometheus.io). The metrics endpoint (port 9121) is exposed in the service. Metrics can be scraped from within the cluster using something similar as the described in the [example Prometheus scrape configuration](https://github.com/prometheus/prometheus/blob/master/documentation/examples/prometheus-kubernetes.yml). If metrics are to be scraped from outside the cluster, the Kubernetes API proxy can be utilized to access the endpoint. + +If you have enabled TLS by specifying `tls.enabled=true` you also need to specify TLS option to the metrics exporter. You can do that via `metrics.extraArgs`. You can find the metrics exporter CLI flags for TLS [here](https://github.com/oliver006/redis_exporter#command-line-flags). For example: + +You can either specify `metrics.extraArgs.skip-tls-verification=true` to skip TLS verification or providing the following values under `metrics.extraArgs` for TLS client authentication: + +```console +tls-client-key-file +tls-client-cert-file +tls-ca-cert-file +``` + +### Host Kernel Settings + +RedisTM may require some changes in the kernel of the host machine to work as expected, in particular increasing the `somaxconn` value and disabling transparent huge pages. +To do so, you can set up a privileged initContainer with the `sysctlImage` config values, for example: + +``` +sysctlImage: + enabled: true + mountHostSys: true + command: + - /bin/sh + - -c + - |- + sysctl -w net.core.somaxconn=10000 + echo never > /host-sys/kernel/mm/transparent_hugepage/enabled +``` + +Alternatively, for Kubernetes 1.12+ you can set `securityContext.sysctls` which will configure sysctls for master and slave pods. Example: + +```yaml +securityContext: + sysctls: + - name: net.core.somaxconn + value: "10000" +``` + +Note that this will not disable transparent huge tables. + +## Persistence + +By default, the chart mounts a [Persistent Volume](http://kubernetes.io/docs/user-guide/persistent-volumes/) at the `/data` path. The volume is created using dynamic volume provisioning. If a Persistent Volume Claim already exists, specify it during installation. + +### Existing PersistentVolumeClaim + +1. Create the PersistentVolume +2. Create the PersistentVolumeClaim +3. Install the chart + +```bash +$ helm install my-release --set persistence.existingClaim=PVC_NAME bitnami/redis +``` + +## Backup and restore + +### Backup + +To perform a backup you will need to connect to one of the nodes and execute: + +```bash +$ kubectl exec -it my-redis-master-0 bash + +$ redis-cli +127.0.0.1:6379> auth your_current_redis_password +OK +127.0.0.1:6379> save +OK +``` + +Then you will need to get the created dump file form the redis node: + +```bash +$ kubectl cp my-redis-master-0:/data/dump.rdb dump.rdb -c redis +``` + +### Restore + +To restore in a new cluster, you will need to change a parameter in the redis.conf file and then upload the `dump.rdb` to the volume. + +Follow the following steps: + +- First you will need to set in the `values.yaml` the parameter `appendonly` to `no`, if it is already `no` you can skip this step. + +```yaml +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly no + # Disable RDB persistence, AOF persistence already enabled. + save "" +``` + +- Start the new cluster to create the PVCs. + +For example, : + +```bash +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +- Now that the PVC were created, stop it and copy the `dump.rdp` on the persisted data by using a helping pod. + +``` +$ helm delete new-redis + +$ kubectl run --generator=run-pod/v1 -i --rm --tty volpod --overrides=' +{ + "apiVersion": "v1", + "kind": "Pod", + "metadata": { + "name": "redisvolpod" + }, + "spec": { + "containers": [{ + "command": [ + "tail", + "-f", + "/dev/null" + ], + "image": "bitnami/minideb", + "name": "mycontainer", + "volumeMounts": [{ + "mountPath": "/mnt", + "name": "redisdata" + }] + }], + "restartPolicy": "Never", + "volumes": [{ + "name": "redisdata", + "persistentVolumeClaim": { + "claimName": "redis-data-new-redis-master-0" + } + }] + } +}' --image="bitnami/minideb" + +$ kubectl cp dump.rdb redisvolpod:/mnt/dump.rdb +$ kubectl delete pod volpod +``` + +- Start again the cluster: + +``` +helm install new-redis -f values.yaml . --set cluster.enabled=true --set cluster.slaveCount=3 +``` + +## NetworkPolicy + +To enable network policy for RedisTM, install +[a networking plugin that implements the Kubernetes NetworkPolicy spec](https://kubernetes.io/docs/tasks/administer-cluster/declare-network-policy#before-you-begin), +and set `networkPolicy.enabled` to `true`. + +For Kubernetes v1.5 & v1.6, you must also turn on NetworkPolicy by setting +the DefaultDeny namespace annotation. Note: this will enforce policy for _all_ pods in the namespace: + + kubectl annotate namespace default "net.beta.kubernetes.io/network-policy={\"ingress\":{\"isolation\":\"DefaultDeny\"}}" + +With NetworkPolicy enabled, only pods with the generated client label will be +able to connect to RedisTM. This label will be displayed in the output +after a successful install. + +With `networkPolicy.ingressNSMatchLabels` pods from other namespaces can connect to redis. Set `networkPolicy.ingressNSPodMatchLabels` to match pod labels in matched namespace. For example, for a namespace labeled `redis=external` and pods in that namespace labeled `redis-client=true` the fields should be set: + +``` +networkPolicy: + enabled: true + ingressNSMatchLabels: + redis: external + ingressNSPodMatchLabels: + redis-client: true +``` + +## Troubleshooting + +Find more information about how to deal with common errors related to Bitnami’s Helm charts in [this troubleshooting guide](https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues). + +## Upgrading an existing Release to a new major version + +A major chart version change (like v1.2.3 -> v2.0.0) indicates that there is an +incompatible breaking change needing manual actions. + +### To 11.0.0 + +When using sentinel, a new statefulset called `-node` was introduced. This will break upgrading from a previous version where the statefulsets are called master and slave. Hence the PVC will not match the new naming and won't be reused. If you want to keep your data, you will need to perform a backup and then a restore the data in this new version. + +### To 10.0.0 + +For releases with `usePassword: true`, the value `sentinel.usePassword` controls whether the password authentication also applies to the sentinel port. This defaults to `true` for a secure configuration, however it is possible to disable to account for the following cases: + +- Using a version of redis-sentinel prior to `5.0.1` where the authentication feature was introduced. +- Where redis clients need to be updated to support sentinel authentication. + +If using a master/slave topology, or with `usePassword: false`, no action is required. + +### To 8.0.18 + +For releases with `metrics.enabled: true` the default tag for the exporter image is now `v1.x.x`. This introduces many changes including metrics names. You'll want to use [this dashboard](https://github.com/oliver006/redis_exporter/blob/master/contrib/grafana_prometheus_redis_dashboard.json) now. Please see the [redis_exporter github page](https://github.com/oliver006/redis_exporter#upgrading-from-0x-to-1x) for more details. + +### To 7.0.0 + +This version causes a change in the RedisTM Master StatefulSet definition, so the command helm upgrade would not work out of the box. As an alternative, one of the following could be done: + +- Recommended: Create a clone of the RedisTM Master PVC (for example, using projects like [this one](https://github.com/edseymour/pvc-transfer)). Then launch a fresh release reusing this cloned PVC. + + ``` + helm install my-release bitnami/redis --set persistence.existingClaim= + ``` + +- Alternative (not recommended, do at your own risk): `helm delete --purge` does not remove the PVC assigned to the RedisTM Master StatefulSet. As a consequence, the following commands can be done to upgrade the release + + ``` + helm delete --purge + helm install bitnami/redis + ``` + +Previous versions of the chart were not using persistence in the slaves, so this upgrade would add it to them. Another important change is that no values are inherited from master to slaves. For example, in 6.0.0 `slaves.readinessProbe.periodSeconds`, if empty, would be set to `master.readinessProbe.periodSeconds`. This approach lacked transparency and was difficult to maintain. From now on, all the slave parameters must be configured just as it is done with the masters. + +Some values have changed as well: + +- `master.port` and `slave.port` have been changed to `redisPort` (same value for both master and slaves) +- `master.securityContext` and `slave.securityContext` have been changed to `securityContext`(same values for both master and slaves) + +By default, the upgrade will not change the cluster topology. In case you want to use RedisTM Sentinel, you must explicitly set `sentinel.enabled` to `true`. + +### To 6.0.0 + +Previous versions of the chart were using an init-container to change the permissions of the volumes. This was done in case the `securityContext` directive in the template was not enough for that (for example, with cephFS). In this new version of the chart, this container is disabled by default (which should not affect most of the deployments). If your installation still requires that init container, execute `helm upgrade` with the `--set volumePermissions.enabled=true`. + +### To 5.0.0 + +The default image in this release may be switched out for any image containing the `redis-server` +and `redis-cli` binaries. If `redis-server` is not the default image ENTRYPOINT, `master.command` +must be specified. + +#### Breaking changes + +- `master.args` and `slave.args` are removed. Use `master.command` or `slave.command` instead in order to override the image entrypoint, or `master.extraFlags` to pass additional flags to `redis-server`. +- `disableCommands` is now interpreted as an array of strings instead of a string of comma separated values. +- `master.persistence.path` now defaults to `/data`. + +### 4.0.0 + +This version removes the `chart` label from the `spec.selector.matchLabels` +which is immutable since `StatefulSet apps/v1beta2`. It has been inadvertently +added, causing any subsequent upgrade to fail. See https://github.com/helm/charts/issues/7726. + +It also fixes https://github.com/helm/charts/issues/7726 where a deployment `extensions/v1beta1` can not be upgraded if `spec.selector` is not explicitly set. + +Finally, it fixes https://github.com/helm/charts/issues/7803 by removing mutable labels in `spec.VolumeClaimTemplate.metadata.labels` so that it is upgradable. + +In order to upgrade, delete the RedisTM StatefulSet before upgrading: + +```bash +kubectl delete statefulsets.apps --cascade=false my-release-redis-master +``` + +And edit the RedisTM slave (and metrics if enabled) deployment: + +```bash +kubectl patch deployments my-release-redis-slave --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +kubectl patch deployments my-release-redis-metrics --type=json -p='[{"op": "remove", "path": "/spec/selector/matchLabels/chart"}]' +``` + +## Upgrading + +### To 12.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ + +### To 11.0.0 + +When deployed with sentinel enabled, only a group of nodes is deployed and the master/slave role is handled in the group. To avoid breaking the compatibility, the settings for this nodes are given through the `slave.xxxx` parameters in `values.yaml` + +### To 9.0.0 + +The metrics exporter has been changed from a separate deployment to a sidecar container, due to the latest changes in the RedisTM exporter code. Check the [official page](https://github.com/oliver006/redis_exporter/) for more information. The metrics container image was changed from oliver006/redis_exporter to bitnami/redis-exporter (Bitnami's maintained package of oliver006/redis_exporter). + +### To 7.0.0 + +In order to improve the performance in case of slave failure, we added persistence to the read-only slaves. That means that we moved from Deployment to StatefulSets. This should not affect upgrades from previous versions of the chart, as the deployments did not contain any persistence at all. + +This version also allows enabling RedisTM Sentinel containers inside of the RedisTM Pods (feature disabled by default). In case the master crashes, a new RedisTM node will be elected as master. In order to query the current master (no redis master service is exposed), you need to query first the Sentinel cluster. Find more information [in this section](#master-slave-with-sentinel). diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/.helmignore b/scripts/helm/helmcharts/databases/charts/redis/charts/common/.helmignore new file mode 100644 index 000000000..50af03172 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/.helmignore @@ -0,0 +1,22 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/Chart.yaml b/scripts/helm/helmcharts/databases/charts/redis/charts/common/Chart.yaml new file mode 100644 index 000000000..bcc3808d0 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/Chart.yaml @@ -0,0 +1,23 @@ +annotations: + category: Infrastructure +apiVersion: v2 +appVersion: 1.4.2 +description: A Library Helm Chart for grouping common logic between bitnami charts. + This chart is not deployable by itself. +home: https://github.com/bitnami/charts/tree/master/bitnami/common +icon: https://bitnami.com/downloads/logos/bitnami-mark.png +keywords: +- common +- helper +- template +- function +- bitnami +maintainers: +- email: containers@bitnami.com + name: Bitnami +name: common +sources: +- https://github.com/bitnami/charts +- http://www.bitnami.com/ +type: library +version: 1.4.2 diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/README.md b/scripts/helm/helmcharts/databases/charts/redis/charts/common/README.md new file mode 100644 index 000000000..559788e39 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/README.md @@ -0,0 +1,322 @@ +# Bitnami Common Library Chart + +A [Helm Library Chart](https://helm.sh/docs/topics/library_charts/#helm) for grouping common logic between bitnami charts. + +## TL;DR + +```yaml +dependencies: + - name: common + version: 0.x.x + repository: https://charts.bitnami.com/bitnami +``` + +```bash +$ helm dependency update +``` + +```yaml +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ include "common.names.fullname" . }} +data: + myvalue: "Hello World" +``` + +## Introduction + +This chart provides a common template helpers which can be used to develop new charts using [Helm](https://helm.sh) package manager. + +Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications. + +## Prerequisites + +- Kubernetes 1.12+ +- Helm 3.1.0 + +## Parameters + +The following table lists the helpers available in the library which are scoped in different sections. + +### Affinities + +| Helper identifier | Description | Expected Input | +|-------------------------------|------------------------------------------------------|------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.node.hard` | Return a hard nodeAffinity definition | `dict "key" "FOO" "values" (list "BAR" "BAZ")` | +| `common.affinities.pod.soft` | Return a soft podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | +| `common.affinities.pod.hard` | Return a hard podAffinity/podAntiAffinity definition | `dict "component" "FOO" "context" $` | + +### Capabilities + +| Helper identifier | Description | Expected Input | +|----------------------------------------------|------------------------------------------------------------------------------------------------|-------------------| +| `common.capabilities.kubeVersion` | Return the target Kubernetes version (using client default if .Values.kubeVersion is not set). | `.` Chart context | +| `common.capabilities.deployment.apiVersion` | Return the appropriate apiVersion for deployment. | `.` Chart context | +| `common.capabilities.statefulset.apiVersion` | Return the appropriate apiVersion for statefulset. | `.` Chart context | +| `common.capabilities.ingress.apiVersion` | Return the appropriate apiVersion for ingress. | `.` Chart context | +| `common.capabilities.rbac.apiVersion` | Return the appropriate apiVersion for RBAC resources. | `.` Chart context | +| `common.capabilities.crd.apiVersion` | Return the appropriate apiVersion for CRDs. | `.` Chart context | +| `common.capabilities.supportsHelmVersion` | Returns true if the used Helm version is 3.3+ | `.` Chart context | + +### Errors + +| Helper identifier | Description | Expected Input | +|-----------------------------------------|------------------------------------------------------------------------------------------------------------------------------------------------------------------------|-------------------------------------------------------------------------------------| +| `common.errors.upgrade.passwords.empty` | It will ensure required passwords are given when we are upgrading a chart. If `validationErrors` is not empty it will throw an error and will stop the upgrade action. | `dict "validationErrors" (list $validationError00 $validationError01) "context" $` | + +### Images + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|---------------------------------------------------------------------------------------------------------| +| `common.images.image` | Return the proper and full image name | `dict "imageRoot" .Values.path.to.the.image "global" $`, see [ImageRoot](#imageroot) for the structure. | +| `common.images.pullSecrets` | Return the proper Docker Image Registry Secret Names | `dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global` | + +### Ingress + +| Helper identifier | Description | Expected Input | +|--------------------------|----------------------------------------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.ingress.backend` | Generate a proper Ingress backend entry depending on the API version | `dict "serviceName" "foo" "servicePort" "bar"`, see the [Ingress deprecation notice](https://kubernetes.io/blog/2019/07/18/api-deprecations-in-1-16/) for the syntax differences | + +### Labels + +| Helper identifier | Description | Expected Input | +|-----------------------------|------------------------------------------------------|-------------------| +| `common.labels.standard` | Return Kubernetes standard labels | `.` Chart context | +| `common.labels.matchLabels` | Return the proper Docker Image Registry Secret Names | `.` Chart context | + +### Names + +| Helper identifier | Description | Expected Input | +|-------------------------|------------------------------------------------------------|-------------------| +| `common.names.name` | Expand the name of the chart or use `.Values.nameOverride` | `.` Chart context | +| `common.names.fullname` | Create a default fully qualified app name. | `.` Chart context | +| `common.names.chart` | Chart name plus version | `.` Chart context | + +### Secrets + +| Helper identifier | Description | Expected Input | +|---------------------------|--------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.secrets.name` | Generate the name of the secret. | `dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $` see [ExistingSecret](#existingsecret) for the structure. | +| `common.secrets.key` | Generate secret key. | `dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName"` see [ExistingSecret](#existingsecret) for the structure. | +| `common.passwords.manage` | Generate secret password or retrieve one if already created. | `dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $`, length, strong and chartNAme fields are optional. | +| `common.secrets.exists` | Returns whether a previous generated secret already exists. | `dict "secret" "secret-name" "context" $` | + +### Storage + +| Helper identifier | Description | Expected Input | +|-------------------------------|---------------------------------------|---------------------------------------------------------------------------------------------------------------------| +| `common.affinities.node.soft` | Return a soft nodeAffinity definition | `dict "persistence" .Values.path.to.the.persistence "global" $`, see [Persistence](#persistence) for the structure. | + +### TplValues + +| Helper identifier | Description | Expected Input | +|---------------------------|----------------------------------------|----------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.tplvalues.render` | Renders a value that contains template | `dict "value" .Values.path.to.the.Value "context" $`, value is the value should rendered as template, context frequently is the chart context `$` or `.` | + +### Utils + +| Helper identifier | Description | Expected Input | +|--------------------------------|------------------------------------------------------------------------------------------|------------------------------------------------------------------------| +| `common.utils.fieldToEnvVar` | Build environment variable name given a field. | `dict "field" "my-password"` | +| `common.utils.secret.getvalue` | Print instructions to get a secret value. | `dict "secret" "secret-name" "field" "secret-value-field" "context" $` | +| `common.utils.getValueFromKey` | Gets a value from `.Values` object given its key path | `dict "key" "path.to.key" "context" $` | +| `common.utils.getKeyFromList` | Returns first `.Values` key with a defined value or first of the list if all non-defined | `dict "keys" (list "path.to.key1" "path.to.key2") "context" $` | + +### Validations + +| Helper identifier | Description | Expected Input | +|--------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------------| +| `common.validations.values.single.empty` | Validate a value must not be empty. | `dict "valueKey" "path.to.value" "secret" "secret.name" "field" "my-password" "subchart" "subchart" "context" $` secret, field and subchart are optional. In case they are given, the helper will generate a how to get instruction. See [ValidateValue](#validatevalue) | +| `common.validations.values.multiple.empty` | Validate a multiple values must not be empty. It returns a shared error for all the values. | `dict "required" (list $validateValueConf00 $validateValueConf01) "context" $`. See [ValidateValue](#validatevalue) | +| `common.validations.values.mariadb.passwords` | This helper will ensure required password for MariaDB are not empty. It returns a shared error for all the values. | `dict "secret" "mariadb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mariadb chart and the helper. | +| `common.validations.values.postgresql.passwords` | This helper will ensure required password for PostgreSQL are not empty. It returns a shared error for all the values. | `dict "secret" "postgresql-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use postgresql chart and the helper. | +| `common.validations.values.redis.passwords` | This helper will ensure required password for RedisTM are not empty. It returns a shared error for all the values. | `dict "secret" "redis-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use redis chart and the helper. | +| `common.validations.values.cassandra.passwords` | This helper will ensure required password for Cassandra are not empty. It returns a shared error for all the values. | `dict "secret" "cassandra-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use cassandra chart and the helper. | +| `common.validations.values.mongodb.passwords` | This helper will ensure required password for MongoDB® are not empty. It returns a shared error for all the values. | `dict "secret" "mongodb-secret" "subchart" "true" "context" $` subchart field is optional and could be true or false it depends on where you will use mongodb chart and the helper. | + +### Warnings + +| Helper identifier | Description | Expected Input | +|------------------------------|----------------------------------|------------------------------------------------------------| +| `common.warnings.rollingTag` | Warning about using rolling tag. | `ImageRoot` see [ImageRoot](#imageroot) for the structure. | + +## Special input schemas + +### ImageRoot + +```yaml +registry: + type: string + description: Docker registry where the image is located + example: docker.io + +repository: + type: string + description: Repository and image name + example: bitnami/nginx + +tag: + type: string + description: image tag + example: 1.16.1-debian-10-r63 + +pullPolicy: + type: string + description: Specify a imagePullPolicy. Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + +pullSecrets: + type: array + items: + type: string + description: Optionally specify an array of imagePullSecrets. + +debug: + type: boolean + description: Set to true if you would like to see extra information on logs + example: false + +## An instance would be: +# registry: docker.io +# repository: bitnami/nginx +# tag: 1.16.1-debian-10-r63 +# pullPolicy: IfNotPresent +# debug: false +``` + +### Persistence + +```yaml +enabled: + type: boolean + description: Whether enable persistence. + example: true + +storageClass: + type: string + description: Ghost data Persistent Volume Storage Class, If set to "-", storageClassName: "" which disables dynamic provisioning. + example: "-" + +accessMode: + type: string + description: Access mode for the Persistent Volume Storage. + example: ReadWriteOnce + +size: + type: string + description: Size the Persistent Volume Storage. + example: 8Gi + +path: + type: string + description: Path to be persisted. + example: /bitnami + +## An instance would be: +# enabled: true +# storageClass: "-" +# accessMode: ReadWriteOnce +# size: 8Gi +# path: /bitnami +``` + +### ExistingSecret + +```yaml +name: + type: string + description: Name of the existing secret. + example: mySecret +keyMapping: + description: Mapping between the expected key name and the name of the key in the existing secret. + type: object + +## An instance would be: +# name: mySecret +# keyMapping: +# password: myPasswordKey +``` + +#### Example of use + +When we store sensitive data for a deployment in a secret, some times we want to give to users the possibility of using theirs existing secrets. + +```yaml +# templates/secret.yaml +--- +apiVersion: v1 +kind: Secret +metadata: + name: {{ include "common.names.fullname" . }} + labels: + app: {{ include "common.names.fullname" . }} +type: Opaque +data: + password: {{ .Values.password | b64enc | quote }} + +# templates/dpl.yaml +--- +... + env: + - name: PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "common.secrets.name" (dict "existingSecret" .Values.existingSecret "context" $) }} + key: {{ include "common.secrets.key" (dict "existingSecret" .Values.existingSecret "key" "password") }} +... + +# values.yaml +--- +name: mySecret +keyMapping: + password: myPasswordKey +``` + +### ValidateValue + +#### NOTES.txt + +```console +{{- $validateValueConf00 := (dict "valueKey" "path.to.value00" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value01" "secret" "secretName" "field" "password-01") -}} + +{{ include "common.validations.values.multiple.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} +``` + +If we force those values to be empty we will see some alerts + +```console +$ helm install test mychart --set path.to.value00="",path.to.value01="" + 'path.to.value00' must not be empty, please add '--set path.to.value00=$PASSWORD_00' to the command. To get the current value: + + export PASSWORD_00=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-00}" | base64 --decode) + + 'path.to.value01' must not be empty, please add '--set path.to.value01=$PASSWORD_01' to the command. To get the current value: + + export PASSWORD_01=$(kubectl get secret --namespace default secretName -o jsonpath="{.data.password-01}" | base64 --decode) +``` + +## Upgrading + +### To 1.0.0 + +[On November 13, 2020, Helm v2 support was formally finished](https://github.com/helm/charts#status-of-the-project), this major version is the result of the required changes applied to the Helm Chart to be able to incorporate the different features added in Helm v3 and to be consistent with the Helm project itself regarding the Helm v2 EOL. + +**What changes were introduced in this major version?** + +- Previous versions of this Helm Chart use `apiVersion: v1` (installable by both Helm 2 and 3), this Helm Chart was updated to `apiVersion: v2` (installable by Helm 3 only). [Here](https://helm.sh/docs/topics/charts/#the-apiversion-field) you can find more information about the `apiVersion` field. +- Use `type: library`. [Here](https://v3.helm.sh/docs/faq/#library-chart-support) you can find more information. +- The different fields present in the *Chart.yaml* file has been ordered alphabetically in a homogeneous way for all the Bitnami Helm Charts + +**Considerations when upgrading to this version** + +- If you want to upgrade to this version from a previous one installed with Helm v3, you shouldn't face any issues +- If you want to upgrade to this version using Helm v2, this scenario is not supported as this version doesn't support Helm v2 anymore +- If you installed the previous version with Helm v2 and wants to upgrade to this version with Helm v3, please refer to the [official Helm documentation](https://helm.sh/docs/topics/v2_v3_migration/#migration-use-cases) about migrating from Helm v2 to v3 + +**Useful links** + +- https://docs.bitnami.com/tutorials/resolve-helm2-helm3-post-migration-issues/ +- https://helm.sh/docs/topics/v2_v3_migration/ +- https://helm.sh/blog/migrate-from-helm-v2-to-helm-v3/ diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl new file mode 100644 index 000000000..493a6dc7e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_affinities.tpl @@ -0,0 +1,94 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return a soft nodeAffinity definition +{{ include "common.affinities.nodes.soft" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.soft" -}} +preferredDuringSchedulingIgnoredDuringExecution: + - preference: + matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} + weight: 1 +{{- end -}} + +{{/* +Return a hard nodeAffinity definition +{{ include "common.affinities.nodes.hard" (dict "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes.hard" -}} +requiredDuringSchedulingIgnoredDuringExecution: + nodeSelectorTerms: + - matchExpressions: + - key: {{ .key }} + operator: In + values: + {{- range .values }} + - {{ . }} + {{- end }} +{{- end -}} + +{{/* +Return a nodeAffinity definition +{{ include "common.affinities.nodes" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.nodes" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.nodes.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.nodes.hard" . -}} + {{- end -}} +{{- end -}} + +{{/* +Return a soft podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.soft" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.soft" -}} +{{- $component := default "" .component -}} +preferredDuringSchedulingIgnoredDuringExecution: + - podAffinityTerm: + labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 10 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname + weight: 1 +{{- end -}} + +{{/* +Return a hard podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods.hard" (dict "component" "FOO" "context" $) -}} +*/}} +{{- define "common.affinities.pods.hard" -}} +{{- $component := default "" .component -}} +requiredDuringSchedulingIgnoredDuringExecution: + - labelSelector: + matchLabels: {{- (include "common.labels.matchLabels" .context) | nindent 8 }} + {{- if not (empty $component) }} + {{ printf "app.kubernetes.io/component: %s" $component }} + {{- end }} + namespaces: + - {{ .context.Release.Namespace | quote }} + topologyKey: kubernetes.io/hostname +{{- end -}} + +{{/* +Return a podAffinity/podAntiAffinity definition +{{ include "common.affinities.pods" (dict "type" "soft" "key" "FOO" "values" (list "BAR" "BAZ")) -}} +*/}} +{{- define "common.affinities.pods" -}} + {{- if eq .type "soft" }} + {{- include "common.affinities.pods.soft" . -}} + {{- else if eq .type "hard" }} + {{- include "common.affinities.pods.hard" . -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl new file mode 100644 index 000000000..4dde56a38 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_capabilities.tpl @@ -0,0 +1,95 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Return the target Kubernetes version +*/}} +{{- define "common.capabilities.kubeVersion" -}} +{{- if .Values.global }} + {{- if .Values.global.kubeVersion }} + {{- .Values.global.kubeVersion -}} + {{- else }} + {{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} + {{- end -}} +{{- else }} +{{- default .Capabilities.KubeVersion.Version .Values.kubeVersion -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for deployment. +*/}} +{{- define "common.capabilities.deployment.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for statefulset. +*/}} +{{- define "common.capabilities.statefulset.apiVersion" -}} +{{- if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apps/v1beta1" -}} +{{- else -}} +{{- print "apps/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for ingress. +*/}} +{{- define "common.capabilities.ingress.apiVersion" -}} +{{- if .Values.ingress -}} +{{- if .Values.ingress.apiVersion -}} +{{- .Values.ingress.apiVersion -}} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end }} +{{- else if semverCompare "<1.14-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "extensions/v1beta1" -}} +{{- else if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "networking.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for RBAC resources. +*/}} +{{- define "common.capabilities.rbac.apiVersion" -}} +{{- if semverCompare "<1.17-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "rbac.authorization.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "rbac.authorization.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for CRDs. +*/}} +{{- define "common.capabilities.crd.apiVersion" -}} +{{- if semverCompare "<1.19-0" (include "common.capabilities.kubeVersion" .) -}} +{{- print "apiextensions.k8s.io/v1beta1" -}} +{{- else -}} +{{- print "apiextensions.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Returns true if the used Helm version is 3.3+. +A way to check the used Helm version was not introduced until version 3.3.0 with .Capabilities.HelmVersion, which contains an additional "{}}" structure. +This check is introduced as a regexMatch instead of {{ if .Capabilities.HelmVersion }} because checking for the key HelmVersion in <3.3 results in a "interface not found" error. +**To be removed when the catalog's minimun Helm version is 3.3** +*/}} +{{- define "common.capabilities.supportsHelmVersion" -}} +{{- if regexMatch "{(v[0-9])*[^}]*}}$" (.Capabilities | toString ) }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl new file mode 100644 index 000000000..a79cc2e32 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_errors.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Through error when upgrading using empty passwords values that must not be empty. + +Usage: +{{- $validationError00 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password00" "secret" "secretName" "field" "password-00") -}} +{{- $validationError01 := include "common.validations.values.single.empty" (dict "valueKey" "path.to.password01" "secret" "secretName" "field" "password-01") -}} +{{ include "common.errors.upgrade.passwords.empty" (dict "validationErrors" (list $validationError00 $validationError01) "context" $) }} + +Required password params: + - validationErrors - String - Required. List of validation strings to be return, if it is empty it won't throw error. + - context - Context - Required. Parent context. +*/}} +{{- define "common.errors.upgrade.passwords.empty" -}} + {{- $validationErrors := join "" .validationErrors -}} + {{- if and $validationErrors .context.Release.IsUpgrade -}} + {{- $errorString := "\nPASSWORDS ERROR: You must provide your current passwords when upgrading the release." -}} + {{- $errorString = print $errorString "\n Note that even after reinstallation, old credentials may be needed as they may be kept in persistent volume claims." -}} + {{- $errorString = print $errorString "\n Further information can be obtained at https://docs.bitnami.com/general/how-to/troubleshoot-helm-chart-issues/#credential-errors-while-upgrading-chart-releases" -}} + {{- $errorString = print $errorString "\n%s" -}} + {{- printf $errorString $validationErrors | fail -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl new file mode 100644 index 000000000..60f04fd6e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_images.tpl @@ -0,0 +1,47 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper image name +{{ include "common.images.image" ( dict "imageRoot" .Values.path.to.the.image "global" $) }} +*/}} +{{- define "common.images.image" -}} +{{- $registryName := .imageRoot.registry -}} +{{- $repositoryName := .imageRoot.repository -}} +{{- $tag := .imageRoot.tag | toString -}} +{{- if .global }} + {{- if .global.imageRegistry }} + {{- $registryName = .global.imageRegistry -}} + {{- end -}} +{{- end -}} +{{- if $registryName }} +{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- else -}} +{{- printf "%s:%s" $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +{{ include "common.images.pullSecrets" ( dict "images" (list .Values.path.to.the.image1, .Values.path.to.the.image2) "global" .Values.global) }} +*/}} +{{- define "common.images.pullSecrets" -}} + {{- $pullSecrets := list }} + + {{- if .global }} + {{- range .global.imagePullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- range .images -}} + {{- range .pullSecrets -}} + {{- $pullSecrets = append $pullSecrets . -}} + {{- end -}} + {{- end -}} + + {{- if (not (empty $pullSecrets)) }} +imagePullSecrets: + {{- range $pullSecrets }} + - name: {{ . }} + {{- end }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl new file mode 100644 index 000000000..622ef50e3 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_ingress.tpl @@ -0,0 +1,42 @@ +{{/* vim: set filetype=mustache: */}} + +{{/* +Generate backend entry that is compatible with all Kubernetes API versions. + +Usage: +{{ include "common.ingress.backend" (dict "serviceName" "backendName" "servicePort" "backendPort" "context" $) }} + +Params: + - serviceName - String. Name of an existing service backend + - servicePort - String/Int. Port name (or number) of the service. It will be translated to different yaml depending if it is a string or an integer. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.ingress.backend" -}} +{{- $apiVersion := (include "common.capabilities.ingress.apiVersion" .context) -}} +{{- if or (eq $apiVersion "extensions/v1beta1") (eq $apiVersion "networking.k8s.io/v1beta1") -}} +serviceName: {{ .serviceName }} +servicePort: {{ .servicePort }} +{{- else -}} +service: + name: {{ .serviceName }} + port: + {{- if typeIs "string" .servicePort }} + name: {{ .servicePort }} + {{- else if typeIs "int" .servicePort }} + number: {{ .servicePort }} + {{- end }} +{{- end -}} +{{- end -}} + +{{/* +Print "true" if the API pathType field is supported +Usage: +{{ include "common.ingress.supportsPathType" . }} +*/}} +{{- define "common.ingress.supportsPathType" -}} +{{- if (semverCompare "<1.18-0" (include "common.capabilities.kubeVersion" .)) -}} +{{- print "false" -}} +{{- else -}} +{{- print "true" -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl new file mode 100644 index 000000000..252066c7e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_labels.tpl @@ -0,0 +1,18 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Kubernetes standard labels +*/}} +{{- define "common.labels.standard" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +helm.sh/chart: {{ include "common.names.chart" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end -}} + +{{/* +Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector +*/}} +{{- define "common.labels.matchLabels" -}} +app.kubernetes.io/name: {{ include "common.names.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl new file mode 100644 index 000000000..adf2a74f4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_names.tpl @@ -0,0 +1,32 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "common.names.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "common.names.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "common.names.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl new file mode 100644 index 000000000..60b84a701 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_secrets.tpl @@ -0,0 +1,129 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Generate secret name. + +Usage: +{{ include "common.secrets.name" (dict "existingSecret" .Values.path.to.the.existingSecret "defaultNameSuffix" "mySuffix" "context" $) }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - defaultNameSuffix - String - Optional. It is used only if we have several secrets in the same deployment. + - context - Dict - Required. The context for the template evaluation. +*/}} +{{- define "common.secrets.name" -}} +{{- $name := (include "common.names.fullname" .context) -}} + +{{- if .defaultNameSuffix -}} +{{- $name = printf "%s-%s" $name .defaultNameSuffix | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{- with .existingSecret -}} +{{- if not (typeIs "string" .) -}} +{{- with .name -}} +{{- $name = . -}} +{{- end -}} +{{- else -}} +{{- $name = . -}} +{{- end -}} +{{- end -}} + +{{- printf "%s" $name -}} +{{- end -}} + +{{/* +Generate secret key. + +Usage: +{{ include "common.secrets.key" (dict "existingSecret" .Values.path.to.the.existingSecret "key" "keyName") }} + +Params: + - existingSecret - ExistingSecret/String - Optional. The path to the existing secrets in the values.yaml given by the user + to be used instead of the default one. Allows for it to be of type String (just the secret name) for backwards compatibility. + +info: https://github.com/bitnami/charts/tree/master/bitnami/common#existingsecret + - key - String - Required. Name of the key in the secret. +*/}} +{{- define "common.secrets.key" -}} +{{- $key := .key -}} + +{{- if .existingSecret -}} + {{- if not (typeIs "string" .existingSecret) -}} + {{- if .existingSecret.keyMapping -}} + {{- $key = index .existingSecret.keyMapping $.key -}} + {{- end -}} + {{- end }} +{{- end -}} + +{{- printf "%s" $key -}} +{{- end -}} + +{{/* +Generate secret password or retrieve one if already created. + +Usage: +{{ include "common.secrets.passwords.manage" (dict "secret" "secret-name" "key" "keyName" "providedValues" (list "path.to.password1" "path.to.password2") "length" 10 "strong" false "chartName" "chartName" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - key - String - Required - Name of the key in the secret. + - providedValues - List - Required - The path to the validating value in the values.yaml, e.g: "mysql.password". Will pick first parameter with a defined value. + - length - int - Optional - Length of the generated random password. + - strong - Boolean - Optional - Whether to add symbols to the generated random password. + - chartName - String - Optional - Name of the chart used when said chart is deployed as a subchart. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.passwords.manage" -}} + +{{- $password := "" }} +{{- $subchart := "" }} +{{- $chartName := default "" .chartName }} +{{- $passwordLength := default 10 .length }} +{{- $providedPasswordKey := include "common.utils.getKeyFromList" (dict "keys" .providedValues "context" $.context) }} +{{- $providedPasswordValue := include "common.utils.getValueFromKey" (dict "key" $providedPasswordKey "context" $.context) }} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- if index $secret.data .key }} + {{- $password = index $secret.data .key }} + {{- end -}} +{{- else if $providedPasswordValue }} + {{- $password = $providedPasswordValue | toString | b64enc | quote }} +{{- else }} + + {{- if .context.Values.enabled }} + {{- $subchart = $chartName }} + {{- end -}} + + {{- $requiredPassword := dict "valueKey" $providedPasswordKey "secret" .secret "field" .key "subchart" $subchart "context" $.context -}} + {{- $requiredPasswordError := include "common.validations.values.single.empty" $requiredPassword -}} + {{- $passwordValidationErrors := list $requiredPasswordError -}} + {{- include "common.errors.upgrade.passwords.empty" (dict "validationErrors" $passwordValidationErrors "context" $.context) -}} + + {{- if .strong }} + {{- $subStr := list (lower (randAlpha 1)) (randNumeric 1) (upper (randAlpha 1)) | join "_" }} + {{- $password = randAscii $passwordLength }} + {{- $password = regexReplaceAllLiteral "\\W" $password "@" | substr 5 $passwordLength }} + {{- $password = printf "%s%s" $subStr $password | toString | shuffle | b64enc | quote }} + {{- else }} + {{- $password = randAlphaNum $passwordLength | b64enc | quote }} + {{- end }} +{{- end -}} +{{- printf "%s" $password -}} +{{- end -}} + +{{/* +Returns whether a previous generated secret already exists + +Usage: +{{ include "common.secrets.exists" (dict "secret" "secret-name" "context" $) }} + +Params: + - secret - String - Required - Name of the 'Secret' resource where the password is stored. + - context - Context - Required - Parent context. +*/}} +{{- define "common.secrets.exists" -}} +{{- $secret := (lookup "v1" "Secret" $.context.Release.Namespace .secret) }} +{{- if $secret }} + {{- true -}} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl new file mode 100644 index 000000000..60e2a844f --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_storage.tpl @@ -0,0 +1,23 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Return the proper Storage Class +{{ include "common.storage.class" ( dict "persistence" .Values.path.to.the.persistence "global" $) }} +*/}} +{{- define "common.storage.class" -}} + +{{- $storageClass := .persistence.storageClass -}} +{{- if .global -}} + {{- if .global.storageClass -}} + {{- $storageClass = .global.storageClass -}} + {{- end -}} +{{- end -}} + +{{- if $storageClass -}} + {{- if (eq "-" $storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" $storageClass -}} + {{- end -}} +{{- end -}} + +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl new file mode 100644 index 000000000..2db166851 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_tplvalues.tpl @@ -0,0 +1,13 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Renders a value that contains template. +Usage: +{{ include "common.tplvalues.render" ( dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "common.tplvalues.render" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl new file mode 100644 index 000000000..ea083a249 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_utils.tpl @@ -0,0 +1,62 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Print instructions to get a secret value. +Usage: +{{ include "common.utils.secret.getvalue" (dict "secret" "secret-name" "field" "secret-value-field" "context" $) }} +*/}} +{{- define "common.utils.secret.getvalue" -}} +{{- $varname := include "common.utils.fieldToEnvVar" . -}} +export {{ $varname }}=$(kubectl get secret --namespace {{ .context.Release.Namespace | quote }} {{ .secret }} -o jsonpath="{.data.{{ .field }}}" | base64 --decode) +{{- end -}} + +{{/* +Build env var name given a field +Usage: +{{ include "common.utils.fieldToEnvVar" dict "field" "my-password" }} +*/}} +{{- define "common.utils.fieldToEnvVar" -}} + {{- $fieldNameSplit := splitList "-" .field -}} + {{- $upperCaseFieldNameSplit := list -}} + + {{- range $fieldNameSplit -}} + {{- $upperCaseFieldNameSplit = append $upperCaseFieldNameSplit ( upper . ) -}} + {{- end -}} + + {{ join "_" $upperCaseFieldNameSplit }} +{{- end -}} + +{{/* +Gets a value from .Values given +Usage: +{{ include "common.utils.getValueFromKey" (dict "key" "path.to.key" "context" $) }} +*/}} +{{- define "common.utils.getValueFromKey" -}} +{{- $splitKey := splitList "." .key -}} +{{- $value := "" -}} +{{- $latestObj := $.context.Values -}} +{{- range $splitKey -}} + {{- if not $latestObj -}} + {{- printf "please review the entire path of '%s' exists in values" $.key | fail -}} + {{- end -}} + {{- $value = ( index $latestObj . ) -}} + {{- $latestObj = $value -}} +{{- end -}} +{{- printf "%v" (default "" $value) -}} +{{- end -}} + +{{/* +Returns first .Values key with a defined value or first of the list if all non-defined +Usage: +{{ include "common.utils.getKeyFromList" (dict "keys" (list "path.to.key1" "path.to.key2") "context" $) }} +*/}} +{{- define "common.utils.getKeyFromList" -}} +{{- $key := first .keys -}} +{{- $reverseKeys := reverse .keys }} +{{- range $reverseKeys }} + {{- $value := include "common.utils.getValueFromKey" (dict "key" . "context" $.context ) }} + {{- if $value -}} + {{- $key = . }} + {{- end -}} +{{- end -}} +{{- printf "%s" $key -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl new file mode 100644 index 000000000..ae10fa41e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/_warnings.tpl @@ -0,0 +1,14 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Warning about using rolling tag. +Usage: +{{ include "common.warnings.rollingTag" .Values.path.to.the.imageRoot }} +*/}} +{{- define "common.warnings.rollingTag" -}} + +{{- if and (contains "bitnami/" .repository) (not (.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .repository }}:{{ .tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} + +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl new file mode 100644 index 000000000..8679ddffb --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_cassandra.tpl @@ -0,0 +1,72 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Cassandra required passwords are not empty. + +Usage: +{{ include "common.validations.values.cassandra.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where Cassandra values are stored, e.g: "cassandra-passwords-secret" + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.cassandra.passwords" -}} + {{- $existingSecret := include "common.cassandra.values.existingSecret" . -}} + {{- $enabled := include "common.cassandra.values.enabled" . -}} + {{- $dbUserPrefix := include "common.cassandra.values.key.dbUser" . -}} + {{- $valueKeyPassword := printf "%s.password" $dbUserPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "cassandra-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.cassandra.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.cassandra.dbUser.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.dbUser.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled cassandra. + +Usage: +{{ include "common.cassandra.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.cassandra.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.cassandra.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key dbUser + +Usage: +{{ include "common.cassandra.values.key.dbUser" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Cassandra is used as subchart or not. Default: false +*/}} +{{- define "common.cassandra.values.key.dbUser" -}} + {{- if .subchart -}} + cassandra.dbUser + {{- else -}} + dbUser + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl new file mode 100644 index 000000000..bb5ed7253 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mariadb.tpl @@ -0,0 +1,103 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MariaDB required passwords are not empty. + +Usage: +{{ include "common.validations.values.mariadb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MariaDB values are stored, e.g: "mysql-passwords-secret" + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mariadb.passwords" -}} + {{- $existingSecret := include "common.mariadb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mariadb.values.enabled" . -}} + {{- $architecture := include "common.mariadb.values.architecture" . -}} + {{- $authPrefix := include "common.mariadb.values.key.auth" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicationPassword := printf "%s.replicationPassword" $authPrefix -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mariadb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- if not (empty $valueUsername) -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mariadb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replication") -}} + {{- $requiredReplicationPassword := dict "valueKey" $valueKeyReplicationPassword "secret" .secret "field" "mariadb-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mariadb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mariadb. + +Usage: +{{ include "common.mariadb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mariadb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mariadb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mariadb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mariadb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mariadb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mariadb.values.key.auth" -}} + {{- if .subchart -}} + mariadb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl new file mode 100644 index 000000000..7d5ecbccb --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_mongodb.tpl @@ -0,0 +1,108 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate MongoDB(R) required passwords are not empty. + +Usage: +{{ include "common.validations.values.mongodb.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where MongoDB(R) values are stored, e.g: "mongodb-passwords-secret" + - subchart - Boolean - Optional. Whether MongoDB(R) is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.mongodb.passwords" -}} + {{- $existingSecret := include "common.mongodb.values.auth.existingSecret" . -}} + {{- $enabled := include "common.mongodb.values.enabled" . -}} + {{- $authPrefix := include "common.mongodb.values.key.auth" . -}} + {{- $architecture := include "common.mongodb.values.architecture" . -}} + {{- $valueKeyRootPassword := printf "%s.rootPassword" $authPrefix -}} + {{- $valueKeyUsername := printf "%s.username" $authPrefix -}} + {{- $valueKeyDatabase := printf "%s.database" $authPrefix -}} + {{- $valueKeyPassword := printf "%s.password" $authPrefix -}} + {{- $valueKeyReplicaSetKey := printf "%s.replicaSetKey" $authPrefix -}} + {{- $valueKeyAuthEnabled := printf "%s.enabled" $authPrefix -}} + + {{- $authEnabled := include "common.utils.getValueFromKey" (dict "key" $valueKeyAuthEnabled "context" .context) -}} + + {{- if and (not $existingSecret) (eq $enabled "true") (eq $authEnabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredRootPassword := dict "valueKey" $valueKeyRootPassword "secret" .secret "field" "mongodb-root-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRootPassword -}} + + {{- $valueUsername := include "common.utils.getValueFromKey" (dict "key" $valueKeyUsername "context" .context) }} + {{- $valueDatabase := include "common.utils.getValueFromKey" (dict "key" $valueKeyDatabase "context" .context) }} + {{- if and $valueUsername $valueDatabase -}} + {{- $requiredPassword := dict "valueKey" $valueKeyPassword "secret" .secret "field" "mongodb-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPassword -}} + {{- end -}} + + {{- if (eq $architecture "replicaset") -}} + {{- $requiredReplicaSetKey := dict "valueKey" $valueKeyReplicaSetKey "secret" .secret "field" "mongodb-replica-set-key" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredReplicaSetKey -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.mongodb.values.auth.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDb is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.auth.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.auth.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.auth.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled mongodb. + +Usage: +{{ include "common.mongodb.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.mongodb.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.mongodb.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key auth + +Usage: +{{ include "common.mongodb.values.key.auth" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MongoDB(R) is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.key.auth" -}} + {{- if .subchart -}} + mongodb.auth + {{- else -}} + auth + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for architecture + +Usage: +{{ include "common.mongodb.values.architecture" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether MariaDB is used as subchart or not. Default: false +*/}} +{{- define "common.mongodb.values.architecture" -}} + {{- if .subchart -}} + {{- .context.Values.mongodb.architecture -}} + {{- else -}} + {{- .context.Values.architecture -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl new file mode 100644 index 000000000..992bcd390 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_postgresql.tpl @@ -0,0 +1,131 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate PostgreSQL required passwords are not empty. + +Usage: +{{ include "common.validations.values.postgresql.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where postgresql values are stored, e.g: "postgresql-passwords-secret" + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.postgresql.passwords" -}} + {{- $existingSecret := include "common.postgresql.values.existingSecret" . -}} + {{- $enabled := include "common.postgresql.values.enabled" . -}} + {{- $valueKeyPostgresqlPassword := include "common.postgresql.values.key.postgressPassword" . -}} + {{- $valueKeyPostgresqlReplicationEnabled := include "common.postgresql.values.key.replicationPassword" . -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $requiredPostgresqlPassword := dict "valueKey" $valueKeyPostgresqlPassword "secret" .secret "field" "postgresql-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlPassword -}} + + {{- $enabledReplication := include "common.postgresql.values.enabled.replication" . -}} + {{- if (eq $enabledReplication "true") -}} + {{- $requiredPostgresqlReplicationPassword := dict "valueKey" $valueKeyPostgresqlReplicationEnabled "secret" .secret "field" "postgresql-replication-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredPostgresqlReplicationPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to decide whether evaluate global values. + +Usage: +{{ include "common.postgresql.values.use.global" (dict "key" "key-of-global" "context" $) }} +Params: + - key - String - Required. Field to be evaluated within global, e.g: "existingSecret" +*/}} +{{- define "common.postgresql.values.use.global" -}} + {{- if .context.Values.global -}} + {{- if .context.Values.global.postgresql -}} + {{- index .context.Values.global.postgresql .key | quote -}} + {{- end -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.postgresql.values.existingSecret" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.existingSecret" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "existingSecret" "context" .context) -}} + + {{- if .subchart -}} + {{- default (.context.Values.postgresql.existingSecret | quote) $globalValue -}} + {{- else -}} + {{- default (.context.Values.existingSecret | quote) $globalValue -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled postgresql. + +Usage: +{{ include "common.postgresql.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.postgresql.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key postgressPassword. + +Usage: +{{ include "common.postgresql.values.key.postgressPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.postgressPassword" -}} + {{- $globalValue := include "common.postgresql.values.use.global" (dict "key" "postgresqlUsername" "context" .context) -}} + + {{- if not $globalValue -}} + {{- if .subchart -}} + postgresql.postgresqlPassword + {{- else -}} + postgresqlPassword + {{- end -}} + {{- else -}} + global.postgresql.postgresqlPassword + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled.replication. + +Usage: +{{ include "common.postgresql.values.enabled.replication" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.enabled.replication" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.postgresql.replication.enabled -}} + {{- else -}} + {{- printf "%v" .context.Values.replication.enabled -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for the key replication.password. + +Usage: +{{ include "common.postgresql.values.key.replicationPassword" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether postgresql is used as subchart or not. Default: false +*/}} +{{- define "common.postgresql.values.key.replicationPassword" -}} + {{- if .subchart -}} + postgresql.replication.password + {{- else -}} + replication.password + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl new file mode 100644 index 000000000..3e2a47c03 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_redis.tpl @@ -0,0 +1,72 @@ + +{{/* vim: set filetype=mustache: */}} +{{/* +Validate Redis(TM) required passwords are not empty. + +Usage: +{{ include "common.validations.values.redis.passwords" (dict "secret" "secretName" "subchart" false "context" $) }} +Params: + - secret - String - Required. Name of the secret where redis values are stored, e.g: "redis-passwords-secret" + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.validations.values.redis.passwords" -}} + {{- $existingSecret := include "common.redis.values.existingSecret" . -}} + {{- $enabled := include "common.redis.values.enabled" . -}} + {{- $valueKeyPrefix := include "common.redis.values.keys.prefix" . -}} + {{- $valueKeyRedisPassword := printf "%s%s" $valueKeyPrefix "password" -}} + {{- $valueKeyRedisUsePassword := printf "%s%s" $valueKeyPrefix "usePassword" -}} + + {{- if and (not $existingSecret) (eq $enabled "true") -}} + {{- $requiredPasswords := list -}} + + {{- $usePassword := include "common.utils.getValueFromKey" (dict "key" $valueKeyRedisUsePassword "context" .context) -}} + {{- if eq $usePassword "true" -}} + {{- $requiredRedisPassword := dict "valueKey" $valueKeyRedisPassword "secret" .secret "field" "redis-password" -}} + {{- $requiredPasswords = append $requiredPasswords $requiredRedisPassword -}} + {{- end -}} + + {{- include "common.validations.values.multiple.empty" (dict "required" $requiredPasswords "context" .context) -}} + {{- end -}} +{{- end -}} + +{{/* +Redis Auxiliary function to get the right value for existingSecret. + +Usage: +{{ include "common.redis.values.existingSecret" (dict "context" $) }} +Params: + - subchart - Boolean - Optional. Whether Redis(TM) is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.existingSecret" -}} + {{- if .subchart -}} + {{- .context.Values.redis.existingSecret | quote -}} + {{- else -}} + {{- .context.Values.existingSecret | quote -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right value for enabled redis. + +Usage: +{{ include "common.redis.values.enabled" (dict "context" $) }} +*/}} +{{- define "common.redis.values.enabled" -}} + {{- if .subchart -}} + {{- printf "%v" .context.Values.redis.enabled -}} + {{- else -}} + {{- printf "%v" (not .context.Values.enabled) -}} + {{- end -}} +{{- end -}} + +{{/* +Auxiliary function to get the right prefix path for the values + +Usage: +{{ include "common.redis.values.key.prefix" (dict "subchart" "true" "context" $) }} +Params: + - subchart - Boolean - Optional. Whether redis is used as subchart or not. Default: false +*/}} +{{- define "common.redis.values.keys.prefix" -}} + {{- if .subchart -}}redis.{{- else -}}{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl new file mode 100644 index 000000000..9a814cf40 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/templates/validations/_validations.tpl @@ -0,0 +1,46 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Validate values must not be empty. + +Usage: +{{- $validateValueConf00 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-00") -}} +{{- $validateValueConf01 := (dict "valueKey" "path.to.value" "secret" "secretName" "field" "password-01") -}} +{{ include "common.validations.values.empty" (dict "required" (list $validateValueConf00 $validateValueConf01) "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" +*/}} +{{- define "common.validations.values.multiple.empty" -}} + {{- range .required -}} + {{- include "common.validations.values.single.empty" (dict "valueKey" .valueKey "secret" .secret "field" .field "context" $.context) -}} + {{- end -}} +{{- end -}} + +{{/* +Validate a value must not be empty. + +Usage: +{{ include "common.validations.value.empty" (dict "valueKey" "mariadb.password" "secret" "secretName" "field" "my-password" "subchart" "subchart" "context" $) }} + +Validate value params: + - valueKey - String - Required. The path to the validating value in the values.yaml, e.g: "mysql.password" + - secret - String - Optional. Name of the secret where the validating value is generated/stored, e.g: "mysql-passwords-secret" + - field - String - Optional. Name of the field in the secret data, e.g: "mysql-password" + - subchart - String - Optional - Name of the subchart that the validated password is part of. +*/}} +{{- define "common.validations.values.single.empty" -}} + {{- $value := include "common.utils.getValueFromKey" (dict "key" .valueKey "context" .context) }} + {{- $subchart := ternary "" (printf "%s." .subchart) (empty .subchart) }} + + {{- if not $value -}} + {{- $varname := "my-value" -}} + {{- $getCurrentValue := "" -}} + {{- if and .secret .field -}} + {{- $varname = include "common.utils.fieldToEnvVar" . -}} + {{- $getCurrentValue = printf " To get the current value:\n\n %s\n" (include "common.utils.secret.getvalue" .) -}} + {{- end -}} + {{- printf "\n '%s' must not be empty, please add '--set %s%s=$%s' to the command.%s" .valueKey $subchart .valueKey $varname $getCurrentValue -}} + {{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/charts/common/values.yaml b/scripts/helm/helmcharts/databases/charts/redis/charts/common/values.yaml new file mode 100644 index 000000000..9ecdc93f5 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/charts/common/values.yaml @@ -0,0 +1,3 @@ +## bitnami/common +## It is required by CI/CD tools and processes. +exampleValue: common-chart diff --git a/scripts/helm/helmcharts/databases/charts/redis/ci/default-values.yaml b/scripts/helm/helmcharts/databases/charts/redis/ci/default-values.yaml new file mode 100644 index 000000000..fc2ba605a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/ci/default-values.yaml @@ -0,0 +1 @@ +# Leave this file empty to ensure that CI runs builds against the default configuration in values.yaml. diff --git a/scripts/helm/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml b/scripts/helm/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml new file mode 100644 index 000000000..71132f76e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/ci/extra-flags-values.yaml @@ -0,0 +1,11 @@ +master: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +slave: + extraFlags: + - --maxmemory-policy allkeys-lru + persistence: + enabled: false +usePassword: false diff --git a/scripts/helm/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml b/scripts/helm/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml new file mode 100644 index 000000000..009a3718a --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/ci/production-sentinel-values.yaml @@ -0,0 +1,682 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +cluster: + enabled: true + slaveCount: 3 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +sentinel: + enabled: true + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 5.0.9-debian-10-r0 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 60000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + service: + ## Redis(TM) Sentinel Service type + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: true + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespacess. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + name: + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +securityContext: + enabled: true + fsGroup: 1001 + runAsUser: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Use password authentication +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +usePasswordFile: false + +## Persist data to a persistent volume (Redis(TM) Master) +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + service: + ## Redis(TM) Master Service type + type: ClusterIP + port: 6379 + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: {} + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + service: + ## Redis(TM) Slave Service type + type: ClusterIP + ## Redis(TM) port + port: 6379 + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + port: 6379 + ## Can be used to specify command line arguments, for example: + ## + command: "/run.sh" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + extraFlags: [] + ## List of Redis(TM) commands to disable + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + # priorityClassName: {} + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + statefulset: + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + +## Prometheus Exporter / Metrics +## +metrics: + enabled: true + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.5.3-debian-10-r14 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + selector: + prometheus: kube-prometheus + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current postgresql service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + # priorityClassName: {} + service: + type: ClusterIP + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false diff --git a/scripts/helm/helmcharts/databases/charts/redis/img/redis-cluster-topology.png b/scripts/helm/helmcharts/databases/charts/redis/img/redis-cluster-topology.png new file mode 100644 index 000000000..f0a02a9f8 Binary files /dev/null and b/scripts/helm/helmcharts/databases/charts/redis/img/redis-cluster-topology.png differ diff --git a/scripts/helm/helmcharts/databases/charts/redis/img/redis-topology.png b/scripts/helm/helmcharts/databases/charts/redis/img/redis-topology.png new file mode 100644 index 000000000..3f5280feb Binary files /dev/null and b/scripts/helm/helmcharts/databases/charts/redis/img/redis-topology.png differ diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/NOTES.txt b/scripts/helm/helmcharts/databases/charts/redis/templates/NOTES.txt new file mode 100644 index 000000000..5c27951d1 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/NOTES.txt @@ -0,0 +1,136 @@ +** Please be patient while the chart is being deployed ** + +{{- if contains .Values.master.service.type "LoadBalancer" }} +{{- if not .Values.usePassword }} +{{ if and (not .Values.networkPolicy.enabled) (.Values.networkPolicy.allowExternal) }} + +------------------------------------------------------------------------------- + WARNING + + By specifying "master.service.type=LoadBalancer" and "usePassword=false" you have + most likely exposed the Redis(TM) service externally without any authentication + mechanism. + + For security reasons, we strongly suggest that you switch to "ClusterIP" or + "NodePort". As alternative, you can also switch to "usePassword=true" + providing a valid password on "password" parameter. + +------------------------------------------------------------------------------- +{{- end }} +{{- end }} +{{- end }} + +{{- if and .Values.sentinel.enabled (not .Values.cluster.enabled)}} + +------------------------------------------------------------------------------- + WARNING + + Using redis sentinel without a cluster is not supported. A single pod with + standalone redis has been deployed. + + To deploy redis sentinel, please use the values "cluster.enabled=true" and + "sentinel.enabled=true". + +------------------------------------------------------------------------------- +{{- end }} + +{{- if .Values.cluster.enabled }} +{{- if .Values.sentinel.enabled }} +Redis(TM) can be accessed via port {{ .Values.sentinel.service.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read only operations + +For read/write operations, first access the Redis(TM) Sentinel cluster, which is available in port {{ .Values.sentinel.service.sentinelPort }} using the same domain name above. + +{{- else }} +Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS names from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read/write operations +{{ template "redis.fullname" . }}-slave.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} for read-only operations +{{- end }} + +{{- else }} +Redis(TM) can be accessed via port {{ .Values.redisPort }} on the following DNS name from within your cluster: + +{{ template "redis.fullname" . }}-master.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + +{{- end }} + +{{ if .Values.usePassword }} +To get your password run: + + export REDIS_PASSWORD=$(kubectl get secret --namespace {{ .Release.Namespace }} {{ template "redis.secretName" . }} -o jsonpath="{.data.redis-password}" | base64 --decode) +{{- end }} + +To connect to your Redis(TM) server: + +1. Run a Redis(TM) pod that you can use as a client: + +{{- if .Values.tls.enabled }} + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --restart='Never' --env REDIS_PASSWORD=$REDIS_PASSWORD --image {{ template "redis.image" . }} --command -- sleep infinity + + Copy your TLS certificates to the pod: + + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.cert {{ template "redis.fullname" . }}-client:/tmp/client.cert + kubectl cp --namespace {{ .Release.Namespace }} /path/to/client.key {{ template "redis.fullname" . }}-client:/tmp/client.key + kubectl cp --namespace {{ .Release.Namespace }} /path/to/CA.cert {{ template "redis.fullname" . }}-client:/tmp/CA.cert + + Use the following command to attach to the pod: + + kubectl exec --tty -i {{ template "redis.fullname" . }}-client \ + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --namespace {{ .Release.Namespace }} -- bash +{{- else }} + kubectl run --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }}-client --rm --tty -i --restart='Never' \ + {{ if .Values.usePassword }} --env REDIS_PASSWORD=$REDIS_PASSWORD \{{ end }} + {{- if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }}--labels="{{ template "redis.fullname" . }}-client=true" \{{- end }} + --image {{ template "redis.image" . }} -- bash +{{- end }} + +2. Connect using the Redis(TM) CLI: + +{{- if .Values.cluster.enabled }} + {{- if .Values.sentinel.enabled }} + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.redisPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Read only operations + redis-cli -h {{ template "redis.fullname" . }} -p {{ .Values.sentinel.service.sentinelPort }}{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} # Sentinel access + {{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + redis-cli -h {{ template "redis.fullname" . }}-slave{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + {{- end }} +{{- else }} + redis-cli -h {{ template "redis.fullname" . }}-master{{ if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} +{{- end }} + +{{ if and (.Values.networkPolicy.enabled) (not .Values.networkPolicy.allowExternal) }} +Note: Since NetworkPolicy is enabled, only pods with label +{{ template "redis.fullname" . }}-client=true" +will be able to connect to redis. +{{- else -}} + +To connect to your database from outside the cluster execute the following commands: + +{{- if contains "NodePort" .Values.master.service.type }} + + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "redis.fullname" . }}-master) + redis-cli -h $NODE_IP -p $NODE_PORT {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "LoadBalancer" .Values.master.service.type }} + + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "redis.fullname" . }}' + + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "redis.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + redis-cli -h $SERVICE_IP -p {{ .Values.master.service.port }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- else if contains "ClusterIP" .Values.master.service.type }} + + kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "redis.fullname" . }}-master {{ .Values.redisPort }}:{{ .Values.redisPort }} & + redis-cli -h 127.0.0.1 -p {{ .Values.redisPort }} {{- if .Values.usePassword }} -a $REDIS_PASSWORD{{ end }}{{ if .Values.tls.enabled }} --tls --cert /tmp/client.cert --key /tmp/client.key --cacert /tmp/CA.cert{{ end }} + +{{- end }} +{{- end }} + +{{ include "redis.checkRollingTags" . }} + +{{- include "redis.validateValues" . }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/charts/redis/templates/_helpers.tpl new file mode 100644 index 000000000..e76b9ce9d --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/_helpers.tpl @@ -0,0 +1,445 @@ +{{/* vim: set filetype=mustache: */}} +{{/* +Expand the name of the chart. +*/}} +{{- define "redis.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}} +{{- end -}} + +{{/* +Expand the chart plus release name (used by the chart label) +*/}} +{{- define "redis.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version -}} +{{- end -}} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "redis.fullname" -}} +{{- if .Values.fullnameOverride -}} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- $name := default .Chart.Name .Values.nameOverride -}} +{{- if contains $name .Release.Name -}} +{{- .Release.Name | trunc 63 | trimSuffix "-" -}} +{{- else -}} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}} +{{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for networkpolicy. +*/}} +{{- define "networkPolicy.apiVersion" -}} +{{- if semverCompare ">=1.4-0, <1.7-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "extensions/v1beta1" -}} +{{- else -}} +{{- print "networking.k8s.io/v1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiGroup for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiGroup" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy" -}} +{{- else -}} +{{- print "extensions" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the appropriate apiVersion for PodSecurityPolicy. +*/}} +{{- define "podSecurityPolicy.apiVersion" -}} +{{- if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +{{- print "policy/v1beta1" -}} +{{- else -}} +{{- print "extensions/v1beta1" -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) image name +*/}} +{{- define "redis.image" -}} +{{- $registryName := .Values.image.registry -}} +{{- $repositoryName := .Values.image.repository -}} +{{- $tag := .Values.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Redis(TM) Sentinel image name +*/}} +{{- define "sentinel.image" -}} +{{- $registryName := .Values.sentinel.image.registry -}} +{{- $repositoryName := .Values.sentinel.image.repository -}} +{{- $tag := .Values.sentinel.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the metrics image) +*/}} +{{- define "redis.metrics.image" -}} +{{- $registryName := .Values.metrics.image.registry -}} +{{- $repositoryName := .Values.metrics.image.repository -}} +{{- $tag := .Values.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper image name (for the sentinel metrics image) +*/}} +{{- define "sentinel.metrics.image" -}} +{{- $registryName := .Values.sentinel.metrics.image.registry -}} +{{- $repositoryName := .Values.sentinel.metrics.image.repository -}} +{{- $tag := .Values.sentinel.metrics.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + + +{{/* +Return the proper image name (for the init container volume-permissions image) +*/}} +{{- define "redis.volumePermissions.image" -}} +{{- $registryName := .Values.volumePermissions.image.registry -}} +{{- $repositoryName := .Values.volumePermissions.image.repository -}} +{{- $tag := .Values.volumePermissions.image.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the path to the cert file. +*/}} +{{- define "redis.tlsCert" -}} +{{- required "Certificate filename is required when TLS in enabled" .Values.tls.certFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the cert key file. +*/}} +{{- define "redis.tlsCertKey" -}} +{{- required "Certificate Key filename is required when TLS in enabled" .Values.tls.certKeyFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the CA cert file. +*/}} +{{- define "redis.tlsCACert" -}} +{{- required "Certificate CA filename is required when TLS in enabled" .Values.tls.certCAFilename | printf "/opt/bitnami/redis/certs/%s" -}} +{{- end -}} + +{{/* +Return the path to the DH params file. +*/}} +{{- define "redis.tlsDHParams" -}} +{{- if .Values.tls.dhParamsFilename -}} +{{- printf "/opt/bitnami/redis/certs/%s" .Values.tls.dhParamsFilename -}} +{{- end -}} +{{- end -}} + +{{/* +Create the name of the service account to use +*/}} +{{- define "redis.serviceAccountName" -}} +{{- if .Values.serviceAccount.create -}} + {{ default (include "redis.fullname" .) .Values.serviceAccount.name }} +{{- else -}} + {{ default "default" .Values.serviceAccount.name }} +{{- end -}} +{{- end -}} + +{{/* +Get the password secret. +*/}} +{{- define "redis.secretName" -}} +{{- if .Values.existingSecret -}} +{{- printf "%s" .Values.existingSecret -}} +{{- else -}} +{{- printf "%s" (include "redis.fullname" .) -}} +{{- end -}} +{{- end -}} + +{{/* +Get the password key to be retrieved from Redis(TM) secret. +*/}} +{{- define "redis.secretPasswordKey" -}} +{{- if and .Values.existingSecret .Values.existingSecretPasswordKey -}} +{{- printf "%s" .Values.existingSecretPasswordKey -}} +{{- else -}} +{{- printf "redis-password" -}} +{{- end -}} +{{- end -}} + +{{/* +Return Redis(TM) password +*/}} +{{- define "redis.password" -}} +{{- if not (empty .Values.global.redis.password) }} + {{- .Values.global.redis.password -}} +{{- else if not (empty .Values.password) -}} + {{- .Values.password -}} +{{- else -}} + {{- randAlphaNum 10 -}} +{{- end -}} +{{- end -}} + +{{/* +Return sysctl image +*/}} +{{- define "redis.sysctl.image" -}} +{{- $registryName := default "docker.io" .Values.sysctlImage.registry -}} +{{- $repositoryName := .Values.sysctlImage.repository -}} +{{- $tag := default "buster" .Values.sysctlImage.tag | toString -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic. +Also, we can't use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} + {{- if .Values.global.imageRegistry }} + {{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}} + {{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} + {{- end -}} +{{- else -}} + {{- printf "%s/%s:%s" $registryName $repositoryName $tag -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Docker Image Registry Secret Names +*/}} +{{- define "redis.imagePullSecrets" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +Also, we can not use a single if because lazy evaluation is not an option +*/}} +{{- if .Values.global }} +{{- if .Values.global.imagePullSecrets }} +imagePullSecrets: +{{- range .Values.global.imagePullSecrets }} + - name: {{ . }} +{{- end }} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- else if or .Values.image.pullSecrets .Values.metrics.image.pullSecrets .Values.sysctlImage.pullSecrets .Values.volumePermissions.image.pullSecrets }} +imagePullSecrets: +{{- range .Values.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.metrics.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.sysctlImage.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- range .Values.volumePermissions.image.pullSecrets }} + - name: {{ . }} +{{- end }} +{{- end -}} +{{- end -}} + +{{/* Check if there are rolling tags in the images */}} +{{- define "redis.checkRollingTags" -}} +{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- if and (contains "bitnami/" .Values.sentinel.image.repository) (not (.Values.sentinel.image.tag | toString | regexFind "-r\\d+$|sha256:")) }} +WARNING: Rolling tag detected ({{ .Values.sentinel.image.repository }}:{{ .Values.sentinel.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment. ++info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/ +{{- end }} +{{- end -}} + +{{/* +Return the proper Storage Class for master +*/}} +{{- define "redis.master.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.master.persistence.storageClass -}} + {{- if (eq "-" .Values.master.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.master.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Return the proper Storage Class for slave +*/}} +{{- define "redis.slave.storageClass" -}} +{{/* +Helm 2.11 supports the assignment of a value to a variable defined in a different scope, +but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic. +*/}} +{{- if .Values.global -}} + {{- if .Values.global.storageClass -}} + {{- if (eq "-" .Values.global.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.global.storageClass -}} + {{- end -}} + {{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} + {{- end -}} +{{- else -}} + {{- if .Values.slave.persistence.storageClass -}} + {{- if (eq "-" .Values.slave.persistence.storageClass) -}} + {{- printf "storageClassName: \"\"" -}} + {{- else }} + {{- printf "storageClassName: %s" .Values.slave.persistence.storageClass -}} + {{- end -}} + {{- end -}} +{{- end -}} +{{- end -}} + +{{/* +Compile all warnings into a single message, and call fail. +*/}} +{{- define "redis.validateValues" -}} +{{- $messages := list -}} +{{- $messages := append $messages (include "redis.validateValues.spreadConstraints" .) -}} +{{- $messages := without $messages "" -}} +{{- $message := join "\n" $messages -}} + +{{- if $message -}} +{{- printf "\nVALUES VALIDATION:\n%s" $message | fail -}} +{{- end -}} +{{- end -}} + +{{/* Validate values of Redis(TM) - spreadConstrainsts K8s version */}} +{{- define "redis.validateValues.spreadConstraints" -}} +{{- if and (semverCompare "<1.16-0" .Capabilities.KubeVersion.GitVersion) .Values.slave.spreadConstraints -}} +redis: spreadConstraints + Pod Topology Spread Constraints are only available on K8s >= 1.16 + Find more information at https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ +{{- end -}} +{{- end -}} + +{{/* +Renders a value that contains template. +Usage: +{{ include "redis.tplValue" (dict "value" .Values.path.to.the.Value "context" $) }} +*/}} +{{- define "redis.tplValue" -}} + {{- if typeIs "string" .value }} + {{- tpl .value .context }} + {{- else }} + {{- tpl (.value | toYaml) .context }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml new file mode 100644 index 000000000..6d7402721 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/configmap-scripts.yaml @@ -0,0 +1,430 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-scripts + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + start-node.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/liblog.sh + . /opt/bitnami/scripts/libvalidations.sh + + not_exists_dns_entry() { + myip=$(hostname -i) + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep "^${myip}" )" ]]; then + warn "$HEADLESS_SERVICE does not contain the IP of this pod: ${myip}" + return 1 + fi + info "$HEADLESS_SERVICE has my IP: ${myip}" + return 0 + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + # Waits for DNS to add this ip to the service DNS entry + retry_while not_exists_dns_entry + + export REDIS_REPLICATION_MODE="slave" + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i) ")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + echo "I am master" + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + else + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the redis + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_TLS_CERT_FILE} --key ${REDIS_TLS_KEY_FILE} --cacert ${REDIS_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "slave" ]]; then + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + fi + + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + else + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + fi + + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + + start-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libos.sh + . /opt/bitnami/scripts/libvalidations.sh + . /opt/bitnami/scripts/libfile.sh + + sentinel_conf_set() { + local -r key="${1:?missing key}" + local value="${2:-}" + + # Sanitize inputs + value="${value//\\/\\\\}" + value="${value//&/\\&}" + value="${value//\?/\\?}" + [[ "$value" = "" ]] && value="\"$value\"" + + replace_in_file "/opt/bitnami/redis-sentinel/etc/sentinel.conf" "^#*\s*${key} .*" "${key} ${value}" false + } + sentinel_conf_add() { + echo $'\n'"$@" >> "/opt/bitnami/redis-sentinel/etc/sentinel.conf" + } + host_id() { + echo "$1" | openssl sha1 | awk '{print $2}' + } + not_exists_dns_entry() { + myip=$(hostname -i) + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep "^${myip}" )" ]]; then + warn "$HEADLESS_SERVICE does not contain the IP of this pod: ${myip}" + return 1 + fi + info "$HEADLESS_SERVICE has my IP: ${myip}" + return 0 + } + + HEADLESS_SERVICE="{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + REDIS_SERVICE="{{ template "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + + if [[ ! -f /opt/bitnami/redis-sentinel/etc/sentinel.conf ]]; then + cp /opt/bitnami/redis-sentinel/mounted-etc/sentinel.conf /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.usePassword }} + printf "\nsentinel auth-pass %s %s" "{{ .Values.sentinel.masterSet }}" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- if .Values.sentinel.usePassword }} + printf "\nrequirepass %s" "$REDIS_PASSWORD" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + {{- end }} + {{- if .Values.sentinel.staticID }} + printf "\nsentinel myid %s" "$(host_id "$HOSTNAME")" >> /opt/bitnami/redis-sentinel/etc/sentinel.conf + {{- end }} + fi + + export REDIS_REPLICATION_MODE="slave" + + # Waits for DNS to add this ip to the service DNS entry + retry_while not_exists_dns_entry + + if [[ -z "$(getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i)")" ]]; then + export REDIS_REPLICATION_MODE="master" + fi + + # Clean sentineles from the current sentinel nodes + for node in $( getent ahosts "$HEADLESS_SERVICE" | grep -v "^$(hostname -i)" | cut -f 1 -d ' ' | uniq ); do + info "Cleaning sentinels in sentinel node: $node" + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $node -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel reset "*" + else + redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $node -p {{ .Values.sentinel.port }} sentinel reset "*" + fi + sleep {{ .Values.sentinel.cleanDelaySeconds }} + done + info "Sentinels clean up done" + + if [[ "$REDIS_REPLICATION_MODE" == "master" ]]; then + REDIS_MASTER_HOST="$(hostname -i)" + REDIS_MASTER_PORT_NUMBER="{{ .Values.redisPort }}" + else + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_SERVICE -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST=${REDIS_SENTINEL_INFO[0]} + REDIS_MASTER_PORT_NUMBER=${REDIS_SENTINEL_INFO[1]} + + # Immediately attempt to connect to the reported master. If it doesn't exist the connection attempt will either hang + # or fail with "port unreachable" and give no data. The liveness check will then timeout waiting for the sentinel + # container to be ready and restart the it. By then the new master will likely have been elected + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a $REDIS_PASSWORD {{- end }} -h $REDIS_MASTER_HOST -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + + if [[ ! ($($sentinel_info_command)) ]]; then + # master doesn't actually exist, this probably means the remaining pods haven't elected a new one yet + # and are reporting the old one still. Once this happens the container will get stuck and never see the new + # master. We stop here to allow the container to not pass the liveness check and be restarted. + exit 1 + fi + fi + sentinel_conf_set "sentinel monitor" "{{ .Values.sentinel.masterSet }} "$REDIS_MASTER_HOST" "$REDIS_MASTER_PORT_NUMBER" {{ .Values.sentinel.quorum }}" + + add_replica() { + if [[ "$1" != "$REDIS_MASTER_HOST" ]]; then + sentinel_conf_add "sentinel known-replica {{ .Values.sentinel.masterSet }} $1 {{ .Values.redisPort }}" + fi + } + + {{- if .Values.sentinel.staticID }} + # remove generated known sentinels and replicas + tmp="$(sed -e '/^sentinel known-/d' -e '/^$/d' /opt/bitnami/redis-sentinel/etc/sentinel.conf)" + echo "$tmp" > /opt/bitnami/redis-sentinel/etc/sentinel.conf + + for node in $(seq 0 {{ .Values.cluster.slaveCount }}); do + NAME="{{ template "redis.fullname" . }}-node-$node" + IP="$(getent hosts "$NAME.$HEADLESS_SERVICE" | awk ' {print $1 }')" + if [[ "$NAME" != "$HOSTNAME" && -n "$IP" ]]; then + sentinel_conf_add "sentinel known-sentinel {{ .Values.sentinel.masterSet }} $IP {{ .Values.sentinel.port }} $(host_id "$NAME")" + add_replica "$IP" + fi + done + add_replica "$(hostname -i)" + {{- end }} + + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_SENTINEL_TLS_PORT_NUMBER}") + ARGS+=("--tls-cert-file" "${REDIS_SENTINEL_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_SENTINEL_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_SENTINEL_TLS_CA_FILE}") + ARGS+=("--tls-replication" "yes") + ARGS+=("--tls-auth-clients" "${REDIS_SENTINEL_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_SENTINEL_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- end }} + {{- if .Values.sentinel.preExecCmds }} + {{ .Values.sentinel.preExecCmds | nindent 4 }} + {{- end }} + exec redis-server /opt/bitnami/redis-sentinel/etc/sentinel.conf --sentinel {{- if .Values.tls.enabled }} "${ARGS[@]}" {{- end }} + prestop-sentinel.sh: | + #!/bin/bash + + . /opt/bitnami/scripts/libvalidations.sh + + REDIS_SERVICE="{{ include "redis.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}" + + if [[ -n "$REDIS_PASSWORD_FILE" ]]; then + password_aux=$(cat "$REDIS_PASSWORD_FILE") + export REDIS_PASSWORD="$password_aux" + fi + + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a ${REDIS_PASSWORD} {{- end }} -h ${REDIS_SERVICE} -p {{ .Values.sentinel.port }} --tls --cert ${REDIS_SENTINEL_TLS_CERT_FILE} --key ${REDIS_SENTINEL_TLS_KEY_FILE} --cacert ${REDIS_SENTINEL_TLS_CA_FILE} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + else + sentinel_info_command="redis-cli {{- if .Values.usePassword }} -a ${REDIS_PASSWORD} {{- end }} -h ${REDIS_SERVICE} -p {{ .Values.sentinel.port }} sentinel get-master-addr-by-name {{ .Values.sentinel.masterSet }}" + fi + REDIS_SENTINEL_INFO=($($sentinel_info_command)) + REDIS_MASTER_HOST="${REDIS_SENTINEL_INFO[0]}" + + if [[ "$REDIS_MASTER_HOST" == "$(hostname -i)" ]]; then + if is_boolean_yes "$REDIS_SENTINEL_TLS_ENABLED"; then + redis-cli {{- if .Values.usePassword }} -a "$REDIS_PASSWORD" {{- end }} -h "$REDIS_SERVICE" -p {{ .Values.sentinel.port }} --tls --cert "$REDIS_SENTINEL_TLS_CERT_FILE" --key "$REDIS_SENTINEL_TLS_KEY_FILE" --cacert "$REDIS_SENTINEL_TLS_CA_FILE" sentinel failover mymaster + else + redis-cli {{- if .Values.usePassword }} -a "$REDIS_PASSWORD" {{- end }} -h "$REDIS_SERVICE" -p {{ .Values.sentinel.port }} sentinel failover mymaster + fi + fi +{{- else }} + start-master.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.master.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/master.conf ]];then + cp /opt/bitnami/redis/mounted-etc/master.conf /opt/bitnami/redis/etc/master.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/master.conf") + {{- if .Values.master.extraFlags }} + {{- range .Values.master.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.master.preExecCmds }} + {{ .Values.master.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.master.command }} + exec {{ .Values.master.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- if .Values.cluster.enabled }} + start-slave.sh: | + #!/bin/bash + {{- if and .Values.securityContext.runAsUser (eq (.Values.securityContext.runAsUser | int) 0) }} + useradd redis + chown -R redis {{ .Values.slave.persistence.path }} + {{- end }} + if [[ -n $REDIS_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux + fi + if [[ -n $REDIS_MASTER_PASSWORD_FILE ]]; then + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux + fi + if [[ ! -f /opt/bitnami/redis/etc/replica.conf ]];then + cp /opt/bitnami/redis/mounted-etc/replica.conf /opt/bitnami/redis/etc/replica.conf + fi + if [[ ! -f /opt/bitnami/redis/etc/redis.conf ]];then + cp /opt/bitnami/redis/mounted-etc/redis.conf /opt/bitnami/redis/etc/redis.conf + fi + {{- if .Values.tls.enabled }} + ARGS=("--port" "0") + ARGS+=("--tls-port" "${REDIS_TLS_PORT}") + ARGS+=("--tls-cert-file" "${REDIS_TLS_CERT_FILE}") + ARGS+=("--tls-key-file" "${REDIS_TLS_KEY_FILE}") + ARGS+=("--tls-ca-cert-file" "${REDIS_TLS_CA_FILE}") + ARGS+=("--tls-auth-clients" "${REDIS_TLS_AUTH_CLIENTS}") + ARGS+=("--tls-replication" "yes") + {{- if .Values.tls.dhParamsFilename }} + ARGS+=("--tls-dh-params-file" "${REDIS_TLS_DH_PARAMS_FILE}") + {{- end }} + {{- else }} + ARGS=("--port" "${REDIS_PORT}") + {{- end }} + ARGS+=("--slaveof" "${REDIS_MASTER_HOST}" "${REDIS_MASTER_PORT_NUMBER}") + {{- if .Values.usePassword }} + ARGS+=("--requirepass" "${REDIS_PASSWORD}") + ARGS+=("--masterauth" "${REDIS_MASTER_PASSWORD}") + {{- else }} + ARGS+=("--protected-mode" "no") + {{- end }} + ARGS+=("--include" "/opt/bitnami/redis/etc/redis.conf") + ARGS+=("--include" "/opt/bitnami/redis/etc/replica.conf") + {{- if .Values.slave.extraFlags }} + {{- range .Values.slave.extraFlags }} + ARGS+=({{ . | quote }}) + {{- end }} + {{- end }} + {{- if .Values.slave.preExecCmds }} + {{ .Values.slave.preExecCmds | nindent 4}} + {{- end }} + {{- if .Values.slave.command }} + exec {{ .Values.slave.command }} "${ARGS[@]}" + {{- else }} + exec redis-server "${ARGS[@]}" + {{- end }} + {{- end }} + +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/configmap.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/configmap.yaml new file mode 100644 index 000000000..77bdc81e8 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/configmap.yaml @@ -0,0 +1,53 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + redis.conf: |- +{{- if .Values.configmap }} + # User-supplied configuration: +{{- tpl .Values.configmap . | nindent 4 }} +{{- end }} + master.conf: |- + dir {{ .Values.master.persistence.path }} +{{- if .Values.master.configmap }} + # User-supplied master configuration: +{{- tpl .Values.master.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.master.disableCommands }} +{{- range .Values.master.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} + replica.conf: |- + dir {{ .Values.slave.persistence.path }} + slave-read-only yes +{{- if .Values.slave.configmap }} + # User-supplied slave configuration: +{{- tpl .Values.slave.configmap . | nindent 4 }} +{{- end }} +{{- if .Values.slave.disableCommands }} +{{- range .Values.slave.disableCommands }} + rename-command {{ . }} "" +{{- end }} +{{- end }} +{{- if .Values.sentinel.enabled }} + sentinel.conf: |- + dir "/tmp" + bind 0.0.0.0 + port {{ .Values.sentinel.port }} + sentinel monitor {{ .Values.sentinel.masterSet }} {{ template "redis.fullname" . }}-node-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} {{ .Values.redisPort }} {{ .Values.sentinel.quorum }} + sentinel down-after-milliseconds {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.downAfterMilliseconds }} + sentinel failover-timeout {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.failoverTimeout }} + sentinel parallel-syncs {{ .Values.sentinel.masterSet }} {{ .Values.sentinel.parallelSyncs }} +{{- if .Values.sentinel.configmap }} + # User-supplied sentinel configuration: +{{- tpl .Values.sentinel.configmap . | nindent 4 }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/headless-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/headless-svc.yaml new file mode 100644 index 000000000..d758c0d23 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/headless-svc.yaml @@ -0,0 +1,28 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-headless + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + type: ClusterIP + clusterIP: None + {{- if .Values.sentinel.enabled }} + publishNotReadyAddresses: true + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.redisPort }} + targetPort: redis + {{- if .Values.sentinel.enabled }} + - name: tcp-sentinel + port: {{ .Values.sentinel.port }} + targetPort: redis-sentinel + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/health-configmap.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/health-configmap.yaml new file mode 100644 index 000000000..1bb8e74d9 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/health-configmap.yaml @@ -0,0 +1,176 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: {{ template "redis.fullname" . }}-health + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +data: + ping_readiness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_local.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_TLS_PORT \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi +{{- if .Values.sentinel.enabled }} + ping_sentinel.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_PASSWORD_FILE}` + export REDIS_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h localhost \ +{{- if .Values.tls.enabled }} + -p $REDIS_SENTINEL_TLS_PORT_NUMBER \ + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- else }} + -p $REDIS_SENTINEL_PORT \ +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + parse_sentinels.awk: |- + /ip/ {FOUND_IP=1} + /port/ {FOUND_PORT=1} + /runid/ {FOUND_RUNID=1} + !/ip|port|runid/ { + if (FOUND_IP==1) { + IP=$1; FOUND_IP=0; + } + else if (FOUND_PORT==1) { + PORT=$1; + FOUND_PORT=0; + } else if (FOUND_RUNID==1) { + printf "\nsentinel known-sentinel {{ .Values.sentinel.masterSet }} %s %s %s", IP, PORT, $0; FOUND_RUNID=0; + } + } +{{- end }} + ping_readiness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ]; then + echo "$response" + exit 1 + fi + ping_liveness_master.sh: |- + #!/bin/bash +{{- if .Values.usePasswordFile }} + password_aux=`cat ${REDIS_MASTER_PASSWORD_FILE}` + export REDIS_MASTER_PASSWORD=$password_aux +{{- end }} + export REDISCLI_AUTH="$REDIS_MASTER_PASSWORD" + response=$( + timeout -s 3 $1 \ + redis-cli \ + -h $REDIS_MASTER_HOST \ + -p $REDIS_MASTER_PORT_NUMBER \ +{{- if .Values.tls.enabled }} + --tls \ + --cacert {{ template "redis.tlsCACert" . }} \ + {{- if .Values.tls.authClients }} + --cert {{ template "redis.tlsCert" . }} \ + --key {{ template "redis.tlsCertKey" . }} \ + {{- end }} +{{- end }} + ping + ) + if [ "$response" != "PONG" ] && [ "$response" != "LOADING Redis is loading the dataset in memory" ]; then + echo "$response" + exit 1 + fi + ping_readiness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_readiness_local.sh" $1 || exit_status=$? + "$script_dir/ping_readiness_master.sh" $1 || exit_status=$? + exit $exit_status + ping_liveness_local_and_master.sh: |- + script_dir="$(dirname "$0")" + exit_status=0 + "$script_dir/ping_liveness_local.sh" $1 || exit_status=$? + "$script_dir/ping_liveness_master.sh" $1 || exit_status=$? + exit $exit_status diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml new file mode 100644 index 000000000..ed53dc6e2 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-prometheus.yaml @@ -0,0 +1,39 @@ +{{- if and (.Values.metrics.enabled) (.Values.metrics.serviceMonitor.enabled) }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: metrics + {{- if .Values.metrics.serviceMonitor.interval }} + interval: {{ .Values.metrics.serviceMonitor.interval }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.relabelings }} + relabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.relabelings "context" $) | nindent 6 }} + {{- end }} + {{- if .Values.metrics.serviceMonitor.metricRelabelings }} + metricRelabelings: {{- include "common.tplvalues.render" ( dict "value" .Values.metrics.serviceMonitor.metricRelabelings "context" $) | nindent 6 }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "metrics" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml new file mode 100644 index 000000000..43cc2aa3c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-prometheus.yaml @@ -0,0 +1,33 @@ +{{- if and .Values.sentinel.enabled .Values.sentinel.metrics.enabled .Values.sentinel.metrics.serviceMonitor.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: ServiceMonitor +metadata: + name: {{ include "redis.fullname" . }}-sentinel-metrics + {{- if .Values.sentinel.metrics.serviceMonitor.namespace }} + namespace: {{ .Values.sentinel.metrics.serviceMonitor.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ include "redis.name" . }} + chart: {{ include "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- range $key, $value := .Values.sentinel.metrics.serviceMonitor.selector }} + {{ $key }}: {{ $value | quote }} + {{- end }} +spec: + endpoints: + - port: sentinelmetrics + {{- if .Values.sentinel.metrics.serviceMonitor.interval }} + interval: {{ .Values.sentinel.metrics.serviceMonitor.interval }} + {{- end }} + selector: + matchLabels: + app: {{ include "redis.name" . }} + release: {{ .Release.Name }} + app.kubernetes.io/component: "sentinel-metrics" + namespaceSelector: + matchNames: + - {{ .Release.Namespace }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml new file mode 100644 index 000000000..25f3770de --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-sentinel-svc.yaml @@ -0,0 +1,35 @@ +{{- if and .Values.sentinel.enabled .Values.sentinel.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ include "redis.fullname" . }}-sentinel-metrics + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ include "redis.name" . }} + chart: {{ include "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "sentinel-metrics" + {{- if .Values.sentinel.metrics.service.labels -}} + {{- include "common.tplvalues.render" (dict "value" .Values.sentinel.metrics.service.labels "context" $) | nindent 4 }} + {{- end -}} + {{- if .Values.sentinel.metrics.service.annotations }} + annotations: {{- toYaml .Values.sentinel.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.sentinel.metrics.service.type }} + {{- if eq .Values.sentinel.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.metrics.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.sentinel.metrics.service.type "LoadBalancer") .Values.sentinel.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - port: {{ .Values.sentinel.metrics.service.port }} + targetPort: sentinelmetrics + protocol: TCP + name: sentinelmetrics + selector: + app: {{ include "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-svc.yaml new file mode 100644 index 000000000..767a464e5 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/metrics-svc.yaml @@ -0,0 +1,34 @@ +{{- if .Values.metrics.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-metrics + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + app.kubernetes.io/component: "metrics" + {{- if .Values.metrics.service.labels -}} + {{- toYaml .Values.metrics.service.labels | nindent 4 }} + {{- end -}} + {{- if .Values.metrics.service.annotations }} + annotations: {{- toYaml .Values.metrics.service.annotations | nindent 4 }} + {{- end }} +spec: + type: {{ .Values.metrics.service.type }} + {{ if eq .Values.metrics.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.metrics.service.externalTrafficPolicy }} + {{- end }} + {{ if and (eq .Values.metrics.service.type "LoadBalancer") .Values.metrics.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.metrics.service.loadBalancerIP }} + {{- end }} + ports: + - name: metrics + port: 9121 + targetPort: metrics + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/networkpolicy.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/networkpolicy.yaml new file mode 100644 index 000000000..0249bc0e6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/networkpolicy.yaml @@ -0,0 +1,74 @@ +{{- if .Values.networkPolicy.enabled }} +kind: NetworkPolicy +apiVersion: {{ template "networkPolicy.apiVersion" . }} +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +spec: + podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.cluster.enabled }} + policyTypes: + - Ingress + - Egress + egress: + # Allow dns resolution + - ports: + - port: 53 + protocol: UDP + # Allow outbound connections to other cluster pods + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + to: + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- end }} + ingress: + # Allow inbound connections + - ports: + - port: {{ .Values.redisPort }} + {{- if .Values.sentinel.enabled }} + - port: {{ .Values.sentinel.port }} + {{- end }} + {{- if not .Values.networkPolicy.allowExternal }} + from: + - podSelector: + matchLabels: + {{ template "redis.fullname" . }}-client: "true" + - podSelector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + {{- if .Values.networkPolicy.ingressNSMatchLabels }} + - namespaceSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- if .Values.networkPolicy.ingressNSPodMatchLabels }} + podSelector: + matchLabels: + {{- range $key, $value := .Values.networkPolicy.ingressNSPodMatchLabels }} + {{ $key | quote }}: {{ $value | quote }} + {{- end }} + {{- end }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + # Allow prometheus scrapes for metrics + - ports: + - port: 9121 + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/pdb.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/pdb.yaml new file mode 100644 index 000000000..b9dc54b36 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/pdb.yaml @@ -0,0 +1,22 @@ +{{- if .Values.podDisruptionBudget.enabled }} +apiVersion: policy/v1beta1 +kind: PodDisruptionBudget +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} +spec: + {{- if .Values.podDisruptionBudget.minAvailable }} + minAvailable: {{ .Values.podDisruptionBudget.minAvailable }} + {{- end }} + {{- if .Values.podDisruptionBudget.maxUnavailable }} + maxUnavailable: {{ .Values.podDisruptionBudget.maxUnavailable }} + {{- end }} + selector: + matchLabels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/prometheusrule.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/prometheusrule.yaml new file mode 100644 index 000000000..48ae017f6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/prometheusrule.yaml @@ -0,0 +1,25 @@ +{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled }} +apiVersion: monitoring.coreos.com/v1 +kind: PrometheusRule +metadata: + name: {{ template "redis.fullname" . }} + {{- if .Values.metrics.prometheusRule.namespace }} + namespace: {{ .Values.metrics.prometheusRule.namespace }} + {{- else }} + namespace: {{ .Release.Namespace | quote }} + {{- end }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name | quote }} + heritage: {{ .Release.Service | quote }} +{{- with .Values.metrics.prometheusRule.additionalLabels }} +{{- toYaml . | nindent 4 }} +{{- end }} +spec: +{{- with .Values.metrics.prometheusRule.rules }} + groups: + - name: {{ template "redis.name" $ }} + rules: {{- tpl (toYaml .) $ | nindent 8 }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/psp.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/psp.yaml new file mode 100644 index 000000000..eca04c134 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/psp.yaml @@ -0,0 +1,43 @@ +{{- if .Values.podSecurityPolicy.create }} +apiVersion: {{ template "podSecurityPolicy.apiVersion" . }} +kind: PodSecurityPolicy +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + heritage: {{ .Release.Service }} + release: {{ .Release.Name }} +spec: + allowPrivilegeEscalation: false + fsGroup: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.securityContext.fsGroup }} + max: {{ .Values.securityContext.fsGroup }} + hostIPC: false + hostNetwork: false + hostPID: false + privileged: false + readOnlyRootFilesystem: false + requiredDropCapabilities: + - ALL + runAsUser: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + seLinux: + rule: 'RunAsAny' + supplementalGroups: + rule: 'MustRunAs' + ranges: + - min: {{ .Values.containerSecurityContext.runAsUser }} + max: {{ .Values.containerSecurityContext.runAsUser }} + volumes: + - 'configMap' + - 'secret' + - 'emptyDir' + - 'persistentVolumeClaim' +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml new file mode 100644 index 000000000..80dc112a5 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-statefulset.yaml @@ -0,0 +1,394 @@ +{{- if or (not .Values.cluster.enabled) (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.statefulset.labels }} + {{- toYaml .Values.master.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.master.statefulset.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.annotations | nindent 4 }} +{{- end }} +spec: + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master + serviceName: {{ template "redis.fullname" . }}-headless + template: + metadata: + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + role: master + {{- if .Values.master.podLabels }} + {{- toYaml .Values.master.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.master.podAnnotations }} + {{- toYaml .Values.master.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.master.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.master.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.master.priorityClassName }} + priorityClassName: {{ .Values.master.priorityClassName | quote }} + {{- end }} + {{- with .Values.master.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + {{- if .Values.master.nodeSelector }} + nodeSelector: {{- toYaml .Values.master.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.master.tolerations }} + tolerations: {{- toYaml .Values.master.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.master.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.master.shareProcessNamespace }} + {{- end }} + {{- if .Values.master.schedulerName }} + schedulerName: {{ .Values.master.schedulerName }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-master.sh + env: + - name: REDIS_REPLICATION_MODE + value: master + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.master.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.master.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.master.extraEnvVarsCM .Values.master.extraEnvVarsSecret }} + envFrom: + {{- if .Values.master.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.master.extraEnvVarsCM }} + {{- end }} + {{- if .Values.master.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.master.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.master.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.master.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.livenessProbe.periodSeconds }} + # One second longer than command timeout should prevent generation of zombie processes. + timeoutSeconds: {{ add1 .Values.master.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.master.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_liveness_local.sh {{ .Values.master.livenessProbe.timeoutSeconds }} + {{- else if .Values.master.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.master.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.master.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.master.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.master.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.master.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.master.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.master.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local.sh {{ .Values.master.readinessProbe.timeoutSeconds }} + {{- else if .Values.master.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.master.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.master.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc/ + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.master.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.master.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: "{{ template "redis.volumePermissions.image" . }}" + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.master.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.master.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 10 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.master.persistence.path }} + subPath: {{ .Values.master.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 10 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 10 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if not .Values.master.persistence.enabled }} + - name: "redis-data" + emptyDir: {} + {{- else }} + {{- if .Values.persistence.existingClaim }} + - name: "redis-data" + persistentVolumeClaim: + claimName: {{ include "redis.tplValue" (dict "value" .Values.persistence.existingClaim "context" $) }} + {{- end }} + {{- if .Values.master.persistence.volumes }} + {{- toYaml .Values.master.persistence.volumes | nindent 8 }} + {{- end }} + {{- end }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + {{- if .Values.master.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.master.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if and .Values.master.persistence.enabled (not .Values.persistence.existingClaim) (not .Values.master.persistence.volumes) }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: master + {{- if .Values.master.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.master.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.master.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + spec: + accessModes: + {{- range .Values.master.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.master.persistence.size | quote }} + {{ include "redis.master.storageClass" . }} + selector: + {{- if .Values.master.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.master.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.master.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.master.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.master.statefulset.updateStrategy }} + {{- if .Values.master.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.master.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.master.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml new file mode 100644 index 000000000..8bd2f8c1c --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-master-svc.yaml @@ -0,0 +1,43 @@ +{{- if not .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-master + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.master.service.labels -}} + {{- toYaml .Values.master.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.master.service.annotations }} + annotations: {{- toYaml .Values.master.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.master.service.type }} + {{ if eq .Values.master.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.master.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.master.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.master.service.type "LoadBalancer") .Values.master.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.master.service.loadBalancerSourceRanges }} +{{- toYaml . | nindent 4 }} +{{- end }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.master.service.port }} + targetPort: redis + {{- if .Values.master.service.nodePort }} + nodePort: {{ .Values.master.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: master +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml new file mode 100644 index 000000000..9ede64175 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-node-statefulset.yaml @@ -0,0 +1,556 @@ +{{- if and .Values.cluster.enabled .Values.sentinel.enabled }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-node + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: node + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: node + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: "{{ .Values.slave.priorityClassName }}" + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-node.sh + env: + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + - name: REDIS_DATA_DIR + value: {{ .Values.slave.persistence.path }} + {{- if .Values.sentinel.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.sentinel.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.sentinel.extraEnvVarsCM .Values.sentinel.extraEnvVarsSecret }} + envFrom: + {{- if .Values.sentinel.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.sentinel.extraEnvVarsCM }} + {{- end }} + {{- if .Values.sentinel.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.sentinel.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_liveness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + {{- if .Values.sentinel.enabled }} + - /health/ping_readiness_local.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else }} + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- end }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + - name: tmp + mountPath: /tmp + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if and .Values.cluster.enabled .Values.sentinel.enabled }} + - name: sentinel + image: {{ template "sentinel.image" . }} + imagePullPolicy: {{ .Values.sentinel.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-sentinel.sh + env: + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_SENTINEL_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_SENTINEL_TLS_PORT_NUMBER + value: {{ .Values.sentinel.port | quote }} + - name: REDIS_SENTINEL_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_SENTINEL_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_SENTINEL_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_SENTINEL_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_SENTINEL_TLS_DH_PARAMS_FILE + value: {{ template "redis.dhParams" . }} + {{- end }} + {{- else }} + - name: REDIS_SENTINEL_PORT + value: {{ .Values.sentinel.port | quote }} + {{- end }} + ports: + - name: redis-sentinel + containerPort: {{ .Values.sentinel.port }} + {{- if .Values.sentinel.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.sentinel.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.livenessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.livenessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.sentinel.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.readinessProbe.enabled}} + readinessProbe: + initialDelaySeconds: {{ .Values.sentinel.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.sentinel.readinessProbe.periodSeconds }} + timeoutSeconds: {{ .Values.sentinel.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.sentinel.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.sentinel.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_sentinel.sh {{ .Values.sentinel.livenessProbe.timeoutSeconds }} + {{- else if .Values.sentinel.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.sentinel.customReadinessProbe | nindent 12 }} + {{- end }} + lifecycle: + preStop: + exec: + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/prestop-sentinel.sh + resources: {{- toYaml .Values.sentinel.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + - name: config + mountPath: /opt/bitnami/redis-sentinel/mounted-etc + - name: sentinel-tmp-conf + mountPath: /opt/bitnami/redis-sentinel/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.sentinel.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- if .Values.sentinel.metrics.enabled }} + - name: sentinel-metrics + image: {{ include "sentinel.metrics.image" . }} + imagePullPolicy: {{ .Values.sentinel.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - redis_sentinel_exporter{{- range $key, $value := .Values.sentinel.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + {{- if and .Values.sentinel.usePassword (and .Values.usePassword (not .Values.usePasswordFile)) }} + - name: SENTINEL_PASSWORD + valueFrom: + secretKeyRef: + name: {{ include "redis.secretName" . }} + key: {{ include "redis.secretPasswordKey" . }} + {{- end }} + {{- if and .Values.sentinel.usePassword .Values.usePassword .Values.usePasswordFile }} + - name: SENTINEL_PASSWORD_FILE + value: /secrets/redis-password + {{- end }} + volumeMounts: + {{- if and .Values.sentinel.usePassword .Values.usePassword .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + ports: + - name: sentinelmetrics + containerPort: 9355 + resources: {{- include "common.tplvalues.render" (dict "value" .Values.metrics.resources "context" $) | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: sentinel-tmp-conf + emptyDir: {} + - name: redis-tmp-conf + emptyDir: {} + - name: tmp + emptyDir: {} + {{- if .Values.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.sentinel.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.sentinel.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-role.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-role.yaml new file mode 100644 index 000000000..080a7f960 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-role.yaml @@ -0,0 +1,22 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: Role +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +rules: +{{- if .Values.podSecurityPolicy.create }} + - apiGroups: ['{{ template "podSecurityPolicy.apiGroup" . }}'] + resources: ['podsecuritypolicies'] + verbs: ['use'] + resourceNames: [{{ template "redis.fullname" . }}] +{{- end -}} +{{- if .Values.rbac.role.rules }} +{{- toYaml .Values.rbac.role.rules | nindent 2 }} +{{- end -}} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml new file mode 100644 index 000000000..835aa0361 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-rolebinding.yaml @@ -0,0 +1,19 @@ +{{- if .Values.rbac.create -}} +apiVersion: {{ include "common.capabilities.rbac.apiVersion" . }} +kind: RoleBinding +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} +roleRef: + apiGroup: rbac.authorization.k8s.io + kind: Role + name: {{ template "redis.fullname" . }} +subjects: +- kind: ServiceAccount + name: {{ template "redis.serviceAccountName" . }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml new file mode 100644 index 000000000..081691de6 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-serviceaccount.yaml @@ -0,0 +1,15 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ template "redis.serviceAccountName" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.serviceAccount.annotations }} + annotations: {{ toYaml .Values.serviceAccount.annotations | nindent 4 }} + {{- end }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml new file mode 100644 index 000000000..778cac5cf --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-statefulset.yaml @@ -0,0 +1,398 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: apps/v1 +kind: StatefulSet +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.statefulset.labels }} + {{- toYaml .Values.slave.statefulset.labels | nindent 4 }} + {{- end }} +{{- if .Values.slave.statefulset.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.annotations | nindent 4 }} +{{- end }} +spec: +{{- if .Values.slave.updateStrategy }} + strategy: {{- toYaml .Values.slave.updateStrategy | nindent 4 }} +{{- end }} + replicas: {{ .Values.cluster.slaveCount }} + serviceName: {{ template "redis.fullname" . }}-headless + selector: + matchLabels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave + template: + metadata: + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + chart: {{ template "redis.chart" . }} + role: slave + {{- if .Values.slave.podLabels }} + {{- toYaml .Values.slave.podLabels | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podLabels }} + {{- toYaml .Values.metrics.podLabels | nindent 8 }} + {{- end }} + annotations: + checksum/health: {{ include (print $.Template.BasePath "/health-configmap.yaml") . | sha256sum }} + checksum/configmap: {{ include (print $.Template.BasePath "/configmap.yaml") . | sha256sum }} + checksum/secret: {{ include (print $.Template.BasePath "/secret.yaml") . | sha256sum }} + {{- if .Values.slave.podAnnotations }} + {{- toYaml .Values.slave.podAnnotations | nindent 8 }} + {{- end }} + {{- if and .Values.metrics.enabled .Values.metrics.podAnnotations }} + {{- toYaml .Values.metrics.podAnnotations | nindent 8 }} + {{- end }} + spec: + {{- include "redis.imagePullSecrets" . | nindent 6 }} + {{- if .Values.slave.hostAliases }} + hostAliases: {{- include "common.tplvalues.render" (dict "value" .Values.slave.hostAliases "context" $) | nindent 8 }} + {{- end }} + {{- if .Values.securityContext.enabled }} + securityContext: {{- omit .Values.securityContext "enabled" | toYaml | nindent 8 }} + {{- end }} + serviceAccountName: {{ template "redis.serviceAccountName" . }} + {{- if .Values.slave.priorityClassName }} + priorityClassName: {{ .Values.slave.priorityClassName | quote }} + {{- end }} + {{- if .Values.slave.nodeSelector }} + nodeSelector: {{- toYaml .Values.slave.nodeSelector | nindent 8 }} + {{- end }} + {{- if .Values.slave.tolerations }} + tolerations: {{- toYaml .Values.slave.tolerations | nindent 8 }} + {{- end }} + {{- if .Values.slave.shareProcessNamespace }} + shareProcessNamespace: {{ .Values.slave.shareProcessNamespace }} + {{- end }} + {{- if .Values.slave.schedulerName }} + schedulerName: {{ .Values.slave.schedulerName }} + {{- end }} + {{- if .Values.master.spreadConstraints }} + topologySpreadConstraints: {{- toYaml .Values.master.spreadConstraints | nindent 8 }} + {{- end }} + {{- with .Values.slave.affinity }} + affinity: {{- tpl (toYaml .) $ | nindent 8 }} + {{- end }} + containers: + - name: {{ template "redis.name" . }} + image: {{ template "redis.image" . }} + imagePullPolicy: {{ .Values.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - /opt/bitnami/scripts/start-scripts/start-slave.sh + env: + - name: REDIS_REPLICATION_MODE + value: slave + - name: REDIS_MASTER_HOST + value: {{ template "redis.fullname" . }}-master-0.{{ template "redis.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }} + - name: REDIS_MASTER_PORT_NUMBER + value: {{ .Values.redisPort | quote }} + {{- if .Values.usePassword }} + {{- if .Values.usePasswordFile }} + - name: REDIS_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + - name: REDIS_MASTER_PASSWORD_FILE + value: "/opt/bitnami/redis/secrets/redis-password" + {{- else }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + - name: REDIS_MASTER_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- else }} + - name: ALLOW_EMPTY_PASSWORD + value: "yes" + {{- end }} + - name: REDIS_TLS_ENABLED + value: {{ ternary "yes" "no" .Values.tls.enabled | quote }} + {{- if .Values.tls.enabled }} + - name: REDIS_TLS_PORT + value: {{ .Values.redisPort | quote }} + - name: REDIS_TLS_AUTH_CLIENTS + value: {{ ternary "yes" "no" .Values.tls.authClients | quote }} + - name: REDIS_TLS_CERT_FILE + value: {{ template "redis.tlsCert" . }} + - name: REDIS_TLS_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_TLS_CA_FILE + value: {{ template "redis.tlsCACert" . }} + {{- if .Values.tls.dhParamsFilename }} + - name: REDIS_TLS_DH_PARAMS_FILE + value: {{ template "redis.tlsDHParams" . }} + {{- end }} + {{- else }} + - name: REDIS_PORT + value: {{ .Values.redisPort | quote }} + {{- end }} + {{- if .Values.slave.extraEnvVars }} + {{- include "redis.tplValue" (dict "value" .Values.slave.extraEnvVars "context" $) | nindent 12 }} + {{- end }} + {{- if or .Values.slave.extraEnvVarsCM .Values.slave.extraEnvVarsSecret }} + envFrom: + {{- if .Values.slave.extraEnvVarsCM }} + - configMapRef: + name: {{ .Values.slave.extraEnvVarsCM }} + {{- end }} + {{- if .Values.slave.extraEnvVarsSecret }} + - secretRef: + name: {{ .Values.slave.extraEnvVarsSecret }} + {{- end }} + {{- end }} + ports: + - name: redis + containerPort: {{ .Values.redisPort }} + {{- if .Values.slave.livenessProbe.enabled }} + livenessProbe: + initialDelaySeconds: {{ .Values.slave.livenessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.livenessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.livenessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.livenessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.livenessProbe.failureThreshold}} + exec: + command: + - sh + - -c + - /health/ping_liveness_local_and_master.sh {{ .Values.slave.livenessProbe.timeoutSeconds }} + {{- else if .Values.slave.customLivenessProbe }} + livenessProbe: {{- toYaml .Values.slave.customLivenessProbe | nindent 12 }} + {{- end }} + {{- if .Values.slave.readinessProbe.enabled }} + readinessProbe: + initialDelaySeconds: {{ .Values.slave.readinessProbe.initialDelaySeconds }} + periodSeconds: {{ .Values.slave.readinessProbe.periodSeconds }} + timeoutSeconds: {{ add1 .Values.slave.readinessProbe.timeoutSeconds }} + successThreshold: {{ .Values.slave.readinessProbe.successThreshold }} + failureThreshold: {{ .Values.slave.readinessProbe.failureThreshold }} + exec: + command: + - sh + - -c + - /health/ping_readiness_local_and_master.sh {{ .Values.slave.readinessProbe.timeoutSeconds }} + {{- else if .Values.slave.customReadinessProbe }} + readinessProbe: {{- toYaml .Values.slave.customReadinessProbe | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.slave.resources | nindent 12 }} + volumeMounts: + - name: start-scripts + mountPath: /opt/bitnami/scripts/start-scripts + - name: health + mountPath: /health + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /opt/bitnami/redis/secrets/ + {{- end }} + - name: redis-data + mountPath: /data + - name: config + mountPath: /opt/bitnami/redis/mounted-etc + - name: redis-tmp-conf + mountPath: /opt/bitnami/redis/etc + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + {{- if .Values.slave.extraVolumeMounts }} + {{- include "common.tplvalues.render" ( dict "value" .Values.slave.extraVolumeMounts "context" $ ) | nindent 12 }} + {{- end }} + {{- if .Values.metrics.enabled }} + - name: metrics + image: {{ template "redis.metrics.image" . }} + imagePullPolicy: {{ .Values.metrics.image.pullPolicy | quote }} + {{- if .Values.containerSecurityContext.enabled }} + securityContext: {{- omit .Values.containerSecurityContext "enabled" | toYaml | nindent 12 }} + {{- end }} + command: + - /bin/bash + - -c + - | + if [[ -f '/secrets/redis-password' ]]; then + export REDIS_PASSWORD=$(cat /secrets/redis-password) + fi + redis_exporter{{- range $key, $value := .Values.metrics.extraArgs }} --{{ $key }}={{ $value }}{{- end }} + env: + - name: REDIS_ALIAS + value: {{ template "redis.fullname" . }} + {{- if .Values.usePassword }} + - name: REDIS_USER + value: default + {{- if (not .Values.usePasswordFile) }} + - name: REDIS_PASSWORD + valueFrom: + secretKeyRef: + name: {{ template "redis.secretName" . }} + key: {{ template "redis.secretPasswordKey" . }} + {{- end }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: REDIS_ADDR + value: rediss://{{ .Values.metrics.redisTargetHost }}:{{ .Values.redisPort }} + {{- if .Values.tls.authClients }} + - name: REDIS_EXPORTER_TLS_CLIENT_KEY_FILE + value: {{ template "redis.tlsCertKey" . }} + - name: REDIS_EXPORTER_TLS_CLIENT_CERT_FILE + value: {{ template "redis.tlsCert" . }} + {{- end }} + - name: REDIS_EXPORTER_TLS_CA_CERT_FILE + value: {{ template "redis.tlsCACert" . }} + {{- end }} + volumeMounts: + {{- if .Values.usePasswordFile }} + - name: redis-password + mountPath: /secrets/ + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + mountPath: /opt/bitnami/redis/certs + readOnly: true + {{- end }} + ports: + - name: metrics + containerPort: 9121 + resources: {{- toYaml .Values.metrics.resources | nindent 12 }} + {{- end }} + {{- $needsVolumePermissions := and .Values.volumePermissions.enabled .Values.slave.persistence.enabled .Values.securityContext.enabled .Values.containerSecurityContext.enabled }} + {{- if or $needsVolumePermissions .Values.sysctlImage.enabled }} + initContainers: + {{- if $needsVolumePermissions }} + - name: volume-permissions + image: {{ template "redis.volumePermissions.image" . }} + imagePullPolicy: {{ .Values.volumePermissions.image.pullPolicy | quote }} + command: + - /bin/bash + - -ec + - | + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto" }} + chown -R `id -u`:`id -G | cut -d " " -f2` {{ .Values.slave.persistence.path }} + {{- else }} + chown -R {{ .Values.containerSecurityContext.runAsUser }}:{{ .Values.securityContext.fsGroup }} {{ .Values.slave.persistence.path }} + {{- end }} + {{- if eq ( toString ( .Values.volumePermissions.securityContext.runAsUser )) "auto "}} + securityContext: {{- omit .Values.volumePermissions.securityContext "runAsUser" | toYaml | nindent 12 }} + {{- else }} + securityContext: {{- .Values.volumePermissions.securityContext | toYaml | nindent 12 }} + {{- end }} + resources: {{- toYaml .Values.volumePermissions.resources | nindent 12 }} + volumeMounts: + - name: redis-data + mountPath: {{ .Values.slave.persistence.path }} + subPath: {{ .Values.slave.persistence.subPath }} + {{- end }} + {{- if .Values.sysctlImage.enabled }} + - name: init-sysctl + image: {{ template "redis.sysctl.image" . }} + imagePullPolicy: {{ default "" .Values.sysctlImage.pullPolicy | quote }} + resources: {{- toYaml .Values.sysctlImage.resources | nindent 12 }} + {{- if .Values.sysctlImage.mountHostSys }} + volumeMounts: + - name: host-sys + mountPath: /host-sys + {{- end }} + command: {{- toYaml .Values.sysctlImage.command | nindent 12 }} + securityContext: + privileged: true + runAsUser: 0 + {{- end }} + {{- end }} + volumes: + - name: start-scripts + configMap: + name: {{ include "redis.fullname" . }}-scripts + defaultMode: 0755 + - name: health + configMap: + name: {{ template "redis.fullname" . }}-health + defaultMode: 0755 + {{- if .Values.usePasswordFile }} + - name: redis-password + secret: + secretName: {{ template "redis.secretName" . }} + items: + - key: {{ template "redis.secretPasswordKey" . }} + path: redis-password + {{- end }} + - name: config + configMap: + name: {{ template "redis.fullname" . }} + {{- if .Values.sysctlImage.mountHostSys }} + - name: host-sys + hostPath: + path: /sys + {{- end }} + - name: redis-tmp-conf + emptyDir: {} + {{- if .Values.slave.extraVolumes }} + {{- include "common.tplvalues.render" ( dict "value" .Values.slave.extraVolumes "context" $ ) | nindent 8 }} + {{- end }} + {{- if .Values.tls.enabled }} + - name: redis-certificates + secret: + secretName: {{ required "A secret containing the certificates for the TLS traffic is required when TLS in enabled" .Values.tls.certificatesSecret }} + defaultMode: 256 + {{- end }} + {{- if not .Values.slave.persistence.enabled }} + - name: redis-data + emptyDir: {} + {{- else }} + volumeClaimTemplates: + - metadata: + name: redis-data + labels: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + component: slave + {{- if .Values.slave.statefulset.volumeClaimTemplates }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.labels }} + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.labels | nindent 10 }} + {{- end }} + {{- if .Values.slave.statefulset.volumeClaimTemplates.annotations }} + annotations: + {{- toYaml .Values.slave.statefulset.volumeClaimTemplates.annotations | nindent 10 }} + {{- end }} + {{- end }} + spec: + accessModes: + {{- range .Values.slave.persistence.accessModes }} + - {{ . | quote }} + {{- end }} + resources: + requests: + storage: {{ .Values.slave.persistence.size | quote }} + {{ include "redis.slave.storageClass" . }} + selector: + {{- if .Values.slave.persistence.matchLabels }} + matchLabels: {{- toYaml .Values.slave.persistence.matchLabels | nindent 12 }} + {{- end -}} + {{- if .Values.slave.persistence.matchExpressions }} + matchExpressions: {{- toYaml .Values.slave.persistence.matchExpressions | nindent 12 }} + {{- end -}} + {{- end }} + updateStrategy: + type: {{ .Values.slave.statefulset.updateStrategy }} + {{- if .Values.slave.statefulset.rollingUpdatePartition }} + {{- if (eq "Recreate" .Values.slave.statefulset.updateStrategy) }} + rollingUpdate: null + {{- else }} + rollingUpdate: + partition: {{ .Values.slave.statefulset.rollingUpdatePartition }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml new file mode 100644 index 000000000..a67ebb05e --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-slave-svc.yaml @@ -0,0 +1,43 @@ +{{- if and .Values.cluster.enabled (not .Values.sentinel.enabled) }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }}-slave + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.slave.service.labels -}} + {{- toYaml .Values.slave.service.labels | nindent 4 }} + {{- end -}} +{{- if .Values.slave.service.annotations }} + annotations: {{- toYaml .Values.slave.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.slave.service.type }} + {{ if eq .Values.slave.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.slave.service.externalTrafficPolicy }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.slave.service.loadBalancerIP }} + {{- end }} + {{- if and (eq .Values.slave.service.type "LoadBalancer") .Values.slave.service.loadBalancerSourceRanges }} + loadBalancerSourceRanges: + {{- with .Values.slave.service.loadBalancerSourceRanges }} + {{- toYaml . | nindent 4 }} + {{- end }} + {{- end }} + ports: + - name: tcp-redis + port: {{ .Values.slave.service.port }} + targetPort: redis + {{- if .Values.slave.service.nodePort }} + nodePort: {{ .Values.slave.service.nodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} + role: slave +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml new file mode 100644 index 000000000..e1c9073a4 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/redis-with-sentinel-svc.yaml @@ -0,0 +1,43 @@ +{{- if .Values.sentinel.enabled }} +apiVersion: v1 +kind: Service +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: {{ .Release.Name }} + heritage: {{ .Release.Service }} + {{- if .Values.sentinel.service.labels }} + {{- toYaml .Values.sentinel.service.labels | nindent 4 }} + {{- end }} +{{- if .Values.sentinel.service.annotations }} + annotations: {{- toYaml .Values.sentinel.service.annotations | nindent 4 }} +{{- end }} +spec: + type: {{ .Values.sentinel.service.type }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" }} + externalTrafficPolicy: {{ .Values.sentinel.service.externalTrafficPolicy }} + {{- end }} + {{ if eq .Values.sentinel.service.type "LoadBalancer" -}} {{ if .Values.sentinel.service.loadBalancerIP }} + loadBalancerIP: {{ .Values.sentinel.service.loadBalancerIP }} + {{ end -}} + {{- end -}} + ports: + - name: tcp-redis + port: {{ .Values.sentinel.service.redisPort }} + targetPort: redis + {{- if .Values.sentinel.service.redisNodePort }} + nodePort: {{ .Values.sentinel.service.redisNodePort }} + {{- end }} + - name: tcp-sentinel + port: {{ .Values.sentinel.service.sentinelPort }} + targetPort: redis-sentinel + {{- if .Values.sentinel.service.sentinelNodePort }} + nodePort: {{ .Values.sentinel.service.sentinelNodePort }} + {{- end }} + selector: + app: {{ template "redis.name" . }} + release: {{ .Release.Name }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/charts/redis/templates/secret.yaml b/scripts/helm/helmcharts/databases/charts/redis/templates/secret.yaml new file mode 100644 index 000000000..197aa1890 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/templates/secret.yaml @@ -0,0 +1,15 @@ +{{- if and .Values.usePassword (not .Values.existingSecret) -}} +apiVersion: v1 +kind: Secret +metadata: + name: {{ template "redis.fullname" . }} + namespace: {{ .Release.Namespace | quote }} + labels: + app: {{ template "redis.name" . }} + chart: {{ template "redis.chart" . }} + release: "{{ .Release.Name }}" + heritage: "{{ .Release.Service }}" +type: Opaque +data: + redis-password: {{ include "redis.password" . | b64enc | quote }} +{{- end -}} diff --git a/scripts/helm/helmcharts/databases/charts/redis/values.schema.json b/scripts/helm/helmcharts/databases/charts/redis/values.schema.json new file mode 100644 index 000000000..3188d0c93 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/values.schema.json @@ -0,0 +1,168 @@ +{ + "$schema": "http://json-schema.org/schema#", + "type": "object", + "properties": { + "usePassword": { + "type": "boolean", + "title": "Use password authentication", + "form": true + }, + "password": { + "type": "string", + "title": "Password", + "form": true, + "description": "Defaults to a random 10-character alphanumeric string if not set", + "hidden": { + "value": false, + "path": "usePassword" + } + }, + "cluster": { + "type": "object", + "title": "Cluster Settings", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable master-slave", + "description": "Enable master-slave architecture" + }, + "slaveCount": { + "type": "integer", + "title": "Slave Replicas", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + } + } + } + }, + "master": { + "type": "object", + "title": "Master replicas settings", + "form": true, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for master replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "master/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "slave": { + "type": "object", + "title": "Slave replicas settings", + "form": true, + "hidden": { + "value": false, + "path": "cluster/enabled" + }, + "properties": { + "persistence": { + "type": "object", + "title": "Persistence for slave replicas", + "form": true, + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable persistence", + "description": "Enable persistence using Persistent Volume Claims" + }, + "size": { + "type": "string", + "title": "Persistent Volume Size", + "form": true, + "render": "slider", + "sliderMin": 1, + "sliderMax": 100, + "sliderUnit": "Gi", + "hidden": { + "value": false, + "path": "slave/persistence/enabled" + } + }, + "matchLabels": { + "type": "object", + "title": "Persistent Match Labels Selector" + }, + "matchExpressions": { + "type": "object", + "title": "Persistent Match Expressions Selector" + } + } + } + } + }, + "volumePermissions": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "form": true, + "title": "Enable Init Containers", + "description": "Use an init container to set required folder permissions on the data volume before mounting it in the final destination" + } + } + }, + "metrics": { + "type": "object", + "form": true, + "title": "Prometheus metrics details", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus metrics exporter", + "description": "Create a side-car container to expose Prometheus metrics", + "form": true + }, + "serviceMonitor": { + "type": "object", + "properties": { + "enabled": { + "type": "boolean", + "title": "Create Prometheus Operator ServiceMonitor", + "description": "Create a ServiceMonitor to track metrics using Prometheus Operator", + "form": true, + "hidden": { + "value": false, + "path": "metrics/enabled" + } + } + } + } + } + } + } +} diff --git a/scripts/helm/helmcharts/databases/charts/redis/values.yaml b/scripts/helm/helmcharts/databases/charts/redis/values.yaml new file mode 100644 index 000000000..64e908211 --- /dev/null +++ b/scripts/helm/helmcharts/databases/charts/redis/values.yaml @@ -0,0 +1,1008 @@ +## Global Docker image parameters +## Please, note that this will override the image parameters, including dependencies, configured to use the global value +## Current available global Docker image parameters: imageRegistry and imagePullSecrets +## +global: + # imageRegistry: myRegistryName + # imagePullSecrets: + # - myRegistryKeySecretName + # storageClass: myStorageClass + redis: {} + +## Bitnami Redis(TM) image version +## ref: https://hub.docker.com/r/bitnami/redis/tags/ +## +image: + registry: docker.io + repository: bitnami/redis + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.12-debian-10-r33 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + +## String to partially override redis.fullname template (will maintain the release name) +## +# nameOverride: + +## String to fully override redis.fullname template +## +# fullnameOverride: + +## Cluster settings +## +cluster: + enabled: true + slaveCount: 2 + +## Use redis sentinel in the redis pod. This will disable the master and slave services and +## create one redis service with ports to the sentinel and the redis instances +## +sentinel: + enabled: false + ## Require password authentication on the sentinel itself + ## ref: https://redis.io/topics/sentinel + ## + usePassword: true + ## Bitnami Redis(TM) Sentintel image version + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel + ## Bitnami Redis(TM) image tag + ## ref: https://github.com/bitnami/bitnami-docker-redis-sentinel#supported-tags-and-respective-dockerfile-links + ## + tag: 6.0.12-debian-10-r24 + ## Specify a imagePullPolicy + ## Defaults to 'Always' if image tag is 'latest', else set to 'IfNotPresent' + ## ref: http://kubernetes.io/docs/user-guide/images/#pre-pulling-images + ## + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + masterSet: mymaster + initialCheckTimeout: 5 + quorum: 2 + downAfterMilliseconds: 20000 + failoverTimeout: 18000 + parallelSyncs: 1 + port: 26379 + + ## Delay seconds when cleaning nodes IPs + ## When starting it will clean the sentinels IP (RESET "*") in all the nodes + ## This is the delay time before sending the command to the next node + ## + cleanDelaySeconds: 5 + + ## Additional Redis(TM) configuration for the sentinel nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Enable or disable static sentinel IDs for each replicas + ## If disabled each sentinel will generate a random id at startup + ## If enabled, each replicas will have a constant ID on each start-up + ## + staticID: false + ## Configure extra options for Redis(TM) Sentinel liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + customLivenessProbe: {} + customReadinessProbe: {} + ## Redis(TM) Sentinel resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Redis(TM) Sentinel Service properties + ## + service: + ## Redis(TM) Sentinel Service type + ## + type: ClusterIP + sentinelPort: 26379 + redisPort: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # sentinelNodePort: + # redisNodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + + ## Additional commands to run prior to starting Redis(TM) node with sentinel + ## + preExecCmds: "" + + ## An array to add extra env var to the sentinel node configurations + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + + ## Prometheus Exporter / Metrics for Redis Sentinel Exporter + ## + metrics: + enabled: false + + ## Bitnami Redis Sentinel Exporter image + ## ref: https://hub.docker.com/r/bitnami/redis-sentinel-exporter/tags/ + ## + image: + registry: docker.io + repository: bitnami/redis-sentinel-exporter + tag: 1.7.1-debian-10-r105 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + ## Sentinel metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + resources: + ## We usually recommend not to specify default resources and to leave this as a conscious + ## choice for the user. This also increases chances charts run on environments with little + ## resources, such as Minikube. If you do want to specify resources, uncomment the following + ## lines, adjust them as necessary, and remove the curly braces after 'resources:'. + ## e.g: + ## limits: + ## cpu: 500m + ## memory: 1Gi + ## + limits: {} + requests: {} + + ## Extra arguments for Sentinel metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Enable this if you're using https://github.com/coreos/prometheus-operator + ## + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + service: + type: ClusterIP + + ## Metrics port + ## + port: 9355 + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, otherwise leave blank + ## + # loadBalancerIP: + annotations: {} + labels: {} + +## Specifies the Kubernetes Cluster's Domain Name. +## +clusterDomain: cluster.local + +networkPolicy: + ## Specifies whether a NetworkPolicy should be created + ## + enabled: false + + ## The Policy model to apply. When set to false, only pods with the correct + ## client label will have network access to the port Redis(TM) is listening + ## on. When true, Redis(TM) will accept connections from any source + ## (with the correct destination port). + ## + # allowExternal: true + + ## Allow connections from other namespaces. Just set label for namespace and set label for pods (optional). + ## + ingressNSMatchLabels: {} + ingressNSPodMatchLabels: {} + +serviceAccount: + ## Specifies whether a ServiceAccount should be created + ## + create: false + ## The name of the ServiceAccount to use. + ## If not set and create is true, a name is generated using the fullname template + ## + name: + ## Add annotations to service account + # annotations: + # iam.gke.io/gcp-service-account: "sa@project.iam.gserviceaccount.com" + +rbac: + ## Specifies whether RBAC resources should be created + ## + create: false + + role: + ## Rules to create. It follows the role specification + # rules: + # - apiGroups: + # - extensions + # resources: + # - podsecuritypolicies + # verbs: + # - use + # resourceNames: + # - gce.unprivileged + rules: [] + +## Redis(TM) pod Security Context +## +securityContext: + enabled: true + fsGroup: 1001 + ## sysctl settings for master and slave pods + ## + ## Uncomment the setting below to increase the net.core.somaxconn value + ## + # sysctls: + # - name: net.core.somaxconn + # value: "10000" + +## Container Security Context +## ref: https://kubernetes.io/docs/tasks/configure-pod-container/security-context/ +## +containerSecurityContext: + enabled: true + runAsUser: 1001 + +## Use password authentication +## +usePassword: true +## Redis(TM) password (both master and slave) +## Defaults to a random 10-character alphanumeric string if not set and usePassword is true +## ref: https://github.com/bitnami/bitnami-docker-redis#setting-the-server-password-on-first-run +## +password: "" +## Use existing secret (ignores previous password) +# existingSecret: +## Password key to be retrieved from Redis(TM) secret +## +# existingSecretPasswordKey: + +## Mount secrets as files instead of environment variables +## +usePasswordFile: false + +## Persist data to a persistent volume (Redis(TM) Master) +## +persistence: + ## A manually managed Persistent Volume and Claim + ## Requires persistence.enabled: true + ## If defined, PVC must be created manually before volume will be bound + ## + existingClaim: + +# Redis(TM) port +redisPort: 6379 + +## +## TLS configuration +## +tls: + # Enable TLS traffic + enabled: false + # + # Whether to require clients to authenticate or not. + authClients: true + # + # Name of the Secret that contains the certificates + certificatesSecret: + # + # Certificate filename + certFilename: + # + # Certificate Key filename + certKeyFilename: + # + # CA Certificate filename + certCAFilename: + # + # File containing DH params (in order to support DH based ciphers) + # dhParamsFilename: + +## +## Redis(TM) Master parameters +## +master: + ## Redis(TM) command arguments + ## + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the master nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Redis(TM) additional command line flags + ## + ## Can be used to specify command line flags, for example: + ## extraFlags: + ## - "--maxmemory-policy volatile-ttl" + ## - "--repl-backlog-size 1024mb" + ## + extraFlags: [] + ## Comma-separated list of Redis(TM) commands to disable + ## + ## Can be used to disable Redis(TM) commands for security reasons. + ## Commands will be completely disabled by renaming each to an empty string. + ## ref: https://redis.io/topics/security#disabling-of-specific-commands + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Master additional pod labels and annotations + ## ref: https://kubernetes.io/docs/concepts/overview/working-with-objects/labels/ + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) Master resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Master liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 5 + timeoutSeconds: 1 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) Master Node selectors and tolerations for pod assignment + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#nodeselector + ## ref: https://kubernetes.io/docs/concepts/configuration/assign-pod-node/#taints-and-tolerations-beta-feature + ## + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + ## Redis(TM) Master pod/node affinity/anti-affinity + ## + affinity: {} + + ## Redis(TM) Master Service properties + ## + service: + ## Redis(TM) Master Service type + ## + type: ClusterIP + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + volumes: + # - name: volume_name + # emptyDir: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## Redis(TM) Master pod priorityClassName + ## + priorityClassName: null + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## +## Redis(TM) Slave properties +## Note: service.type is a mandatory parameter +## The rest of the parameters are either optional or, if undefined, will inherit those declared in Redis(TM) Master +## +slave: + ## Slave Service properties + ## + service: + ## Redis(TM) Slave Service type + ## + type: ClusterIP + ## Redis(TM) port + ## + port: 6379 + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Specify the nodePort value for the LoadBalancer and NodePort service types. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#type-nodeport + ## + # nodePort: + + ## Provide any additional annotations which may be required. This can be used to + ## set the LoadBalancer service type to internal only. + ## ref: https://kubernetes.io/docs/concepts/services-networking/service/#internal-load-balancer + ## + annotations: {} + labels: {} + loadBalancerIP: + # loadBalancerSourceRanges: ["10.0.0.0/8"] + + ## Redis(TM) slave port + ## + port: 6379 + ## Deployment pod host aliases + ## https://kubernetes.io/docs/concepts/services-networking/add-entries-to-pod-etc-hosts-with-host-aliases/ + ## + hostAliases: [] + ## Can be used to specify command line arguments, for example: + ## Note `exec` is prepended to command + ## + command: "/run.sh" + ## Additional commands to run prior to starting Redis(TM) + ## + preExecCmds: "" + ## Additional Redis(TM) configuration for the slave nodes + ## ref: https://redis.io/topics/config + ## + configmap: + ## Redis(TM) extra flags + ## + extraFlags: [] + ## List of Redis(TM) commands to disable + ## + disableCommands: + - FLUSHDB + - FLUSHALL + + ## Redis(TM) Slave pod/node affinity/anti-affinity + ## + affinity: {} + + ## Kubernetes Spread Constraints for pod assignment + ## ref: https://kubernetes.io/docs/concepts/workloads/pods/pod-topology-spread-constraints/ + ## + # - maxSkew: 1 + # topologyKey: node + # whenUnsatisfiable: DoNotSchedule + spreadConstraints: {} + + # Enable shared process namespace in a pod. + # If set to false (default), each container will run in separate namespace, redis will have PID=1. + # If set to true, the /pause will run as init process and will reap any zombie PIDs, + # for example, generated by a custom exec probe running longer than a probe timeoutSeconds. + # Enable this only if customLivenessProbe or customReadinessProbe is used and zombie PIDs are accumulating. + # Ref: https://kubernetes.io/docs/tasks/configure-pod-container/share-process-namespace/ + shareProcessNamespace: false + ## Configure extra options for Redis(TM) Slave liveness and readiness probes + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/configure-liveness-readiness-probes/#configure-probes) + ## + livenessProbe: + enabled: true + initialDelaySeconds: 30 + periodSeconds: 10 + timeoutSeconds: 5 + successThreshold: 1 + failureThreshold: 5 + readinessProbe: + enabled: true + initialDelaySeconds: 5 + periodSeconds: 10 + timeoutSeconds: 10 + successThreshold: 1 + failureThreshold: 5 + + ## Configure custom probes for images other images like + ## rhscl/redis-32-rhel7 rhscl/redis-5-rhel7 + ## Only used if readinessProbe.enabled: false / livenessProbe.enabled: false + ## + # customLivenessProbe: + # tcpSocket: + # port: 6379 + # initialDelaySeconds: 10 + # periodSeconds: 5 + # customReadinessProbe: + # initialDelaySeconds: 30 + # periodSeconds: 10 + # timeoutSeconds: 5 + # exec: + # command: + # - "container-entrypoint" + # - "bash" + # - "-c" + # - "redis-cli set liveness-probe \"`date`\" | grep OK" + customLivenessProbe: {} + customReadinessProbe: {} + + ## Redis(TM) slave Resource + # resources: + # requests: + # memory: 256Mi + # cpu: 100m + + ## Redis(TM) slave selectors and tolerations for pod assignment + # nodeSelector: {"beta.kubernetes.io/arch": "amd64"} + # tolerations: [] + + ## Use an alternate scheduler, e.g. "stork". + ## ref: https://kubernetes.io/docs/tasks/administer-cluster/configure-multiple-schedulers/ + ## + # schedulerName: + + ## Redis(TM) slave pod Annotation and Labels + ## + podLabels: {} + podAnnotations: {} + + ## Redis(TM) slave pod priorityClassName + priorityClassName: null + + ## Enable persistence using Persistent Volume Claims + ## ref: http://kubernetes.io/docs/user-guide/persistent-volumes/ + ## + persistence: + enabled: true + ## The path the volume will be mounted at, useful when using different + ## Redis(TM) images. + ## + path: /data + ## The subdirectory of the volume to mount to, useful in dev environments + ## and one PV for multiple services. + ## + subPath: "" + ## redis data Persistent Volume Storage Class + ## If defined, storageClassName: + ## If set to "-", storageClassName: "", which disables dynamic provisioning + ## If undefined (the default) or set to null, no storageClassName spec is + ## set, choosing the default provisioner. (gp2 on AWS, standard on + ## GKE, AWS & OpenStack) + ## + # storageClass: "-" + accessModes: + - ReadWriteOnce + size: 8Gi + ## Persistent Volume selectors + ## https://kubernetes.io/docs/concepts/storage/persistent-volumes/#selector + ## + matchLabels: {} + matchExpressions: {} + + ## Update strategy, can be set to RollingUpdate or onDelete by default. + ## https://kubernetes.io/docs/tutorials/stateful-application/basic-stateful-set/#updating-statefulsets + ## + statefulset: + labels: {} + annotations: {} + updateStrategy: RollingUpdate + ## Partition update strategy + ## https://kubernetes.io/docs/concepts/workloads/controllers/statefulset/#partitions + # rollingUpdatePartition: + volumeClaimTemplates: + labels: {} + annotations: {} + + ## An array to add extra env vars + ## For example: + ## extraEnvVars: + ## - name: name + ## value: value + ## - name: other_name + ## valueFrom: + ## fieldRef: + ## fieldPath: fieldPath + ## + extraEnvVars: [] + + ## ConfigMap with extra env vars: + ## + extraEnvVarsCM: [] + + ## Secret with extra env vars: + ## + extraEnvVarsSecret: [] + +## Prometheus Exporter / Metrics +## +metrics: + enabled: false + + image: + registry: docker.io + repository: bitnami/redis-exporter + tag: 1.20.0-debian-10-r12 + pullPolicy: IfNotPresent + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + + # A way to specify an alternative redis hostname, if you set a local endpoint in hostAliases for example + # Useful for certificate CN/SAN matching + redisTargetHost: "localhost" + + ## Metrics exporter resource requests and limits + ## ref: http://kubernetes.io/docs/user-guide/compute-resources/ + ## + # resources: {} + + ## Extra arguments for Metrics exporter, for example: + ## extraArgs: + ## check-keys: myKey,myOtherKey + # extraArgs: {} + + ## Metrics exporter pod Annotation and Labels + ## + podAnnotations: + prometheus.io/scrape: "true" + prometheus.io/port: "9121" + # podLabels: {} + + # Enable this if you're using https://github.com/coreos/prometheus-operator + serviceMonitor: + enabled: false + ## Specify a namespace if needed + # namespace: monitoring + # fallback to the prometheus default unless specified + # interval: 10s + ## Defaults to what's used if you follow CoreOS [Prometheus Install Instructions](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#tldr) + ## [Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#prometheus-operator-1) + ## [Kube Prometheus Selector Label](https://github.com/bitnami/charts/tree/master/bitnami/prometheus-operator#exporters) + ## + selector: + prometheus: kube-prometheus + + ## RelabelConfigs to apply to samples before scraping + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + relabelings: [] + + ## MetricRelabelConfigs to apply to samples before ingestion + ## ref: https://github.com/coreos/prometheus-operator/blob/master/Documentation/api.md#relabelconfig + ## Value is evalued as a template + ## + metricRelabelings: [] + # - sourceLabels: + # - "__name__" + # targetLabel: "__name__" + # action: replace + # regex: '(.*)' + # replacement: 'example_prefix_$1' + + ## Custom PrometheusRule to be defined + ## The value is evaluated as a template, so, for example, the value can depend on .Release or .Chart + ## ref: https://github.com/coreos/prometheus-operator#customresourcedefinitions + ## + prometheusRule: + enabled: false + additionalLabels: {} + namespace: "" + ## Redis(TM) prometheus rules + ## These are just examples rules, please adapt them to your needs. + ## Make sure to constraint the rules to the current redis service. + # rules: + # - alert: RedisDown + # expr: redis_up{service="{{ template "redis.fullname" . }}-metrics"} == 0 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} down + # description: Redis(TM) instance {{ "{{ $labels.instance }}" }} is down + # - alert: RedisMemoryHigh + # expr: > + # redis_memory_used_bytes{service="{{ template "redis.fullname" . }}-metrics"} * 100 + # / + # redis_memory_max_bytes{service="{{ template "redis.fullname" . }}-metrics"} + # > 90 + # for: 2m + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} is using too much memory + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} is using {{ "{{ $value }}" }}% of its available memory. + # - alert: RedisKeyEviction + # expr: | + # increase(redis_evicted_keys_total{service="{{ template "redis.fullname" . }}-metrics"}[5m]) > 0 + # for: 1s + # labels: + # severity: error + # annotations: + # summary: Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted keys + # description: | + # Redis(TM) instance {{ "{{ $labels.instance }}" }} has evicted {{ "{{ $value }}" }} keys in the last 5 minutes. + rules: [] + + ## Metrics exporter pod priorityClassName + priorityClassName: null + service: + type: ClusterIP + + ## External traffic policy (when service type is LoadBalancer) + ## ref: https://kubernetes.io/docs/tasks/access-application-cluster/create-external-load-balancer/#preserving-the-client-source-ip + ## + externalTrafficPolicy: Cluster + + ## Use serviceLoadBalancerIP to request a specific static IP, + ## otherwise leave blank + # loadBalancerIP: + annotations: {} + labels: {} + +## +## Init containers parameters: +## volumePermissions: Change the owner of the persist volume mountpoint to RunAsUser:fsGroup +## +volumePermissions: + enabled: false + image: + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + + ## Init container Security Context + ## Note: the chown of the data folder is done to containerSecurityContext.runAsUser + ## and not the below volumePermissions.securityContext.runAsUser + ## When runAsUser is set to special value "auto", init container will try to chwon the + ## data folder to autodetermined user&group, using commands: `id -u`:`id -G | cut -d" " -f2` + ## "auto" is especially useful for OpenShift which has scc with dynamic userids (and 0 is not allowed). + ## You may want to use this volumePermissions.securityContext.runAsUser="auto" in combination with + ## podSecurityContext.enabled=false,containerSecurityContext.enabled=false + ## + securityContext: + runAsUser: 0 + +## Redis(TM) config file +## ref: https://redis.io/topics/config +## +configmap: |- + # Enable AOF https://redis.io/topics/persistence#append-only-file + appendonly yes + # Disable RDB persistence, AOF persistence already enabled. + save "" + +## Sysctl InitContainer +## used to perform sysctl operation to modify Kernel settings (needed sometimes to avoid warnings) +## +sysctlImage: + enabled: false + command: [] + registry: docker.io + repository: bitnami/bitnami-shell + tag: "10" + pullPolicy: Always + ## Optionally specify an array of imagePullSecrets. + ## Secrets must be manually created in the namespace. + ## ref: https://kubernetes.io/docs/tasks/configure-pod-container/pull-image-private-registry/ + ## + # pullSecrets: + # - myRegistryKeySecretName + mountHostSys: false + resources: {} + # resources: + # requests: + # memory: 128Mi + # cpu: 100m + +## PodSecurityPolicy configuration +## ref: https://kubernetes.io/docs/concepts/policy/pod-security-policy/ +## +podSecurityPolicy: + ## Specifies whether a PodSecurityPolicy should be created + ## + create: false + +## Define a disruption budget +## ref: https://kubernetes.io/docs/concepts/workloads/pods/disruptions/ +## +podDisruptionBudget: + enabled: false + minAvailable: 1 + # maxUnavailable: 1 diff --git a/scripts/helm/helmcharts/databases/templates/NOTES.txt b/scripts/helm/helmcharts/databases/templates/NOTES.txt new file mode 100644 index 000000000..0a725d663 --- /dev/null +++ b/scripts/helm/helmcharts/databases/templates/NOTES.txt @@ -0,0 +1,5 @@ +To get minio credentials run: + + echo "AccessKey: `kubectl get secret --namespace {{ .Release.Namespace }} minio -o jsonpath="{.data.access-key}" | base64 --decode`" + echo "SecretKey: `kubectl get secret --namespace {{ .Release.Namespace }} minio -o jsonpath="{.data.secret-key}" | base64 --decode`" + diff --git a/scripts/helm/helmcharts/databases/templates/_helpers.tpl b/scripts/helm/helmcharts/databases/templates/_helpers.tpl new file mode 100644 index 000000000..066c03a17 --- /dev/null +++ b/scripts/helm/helmcharts/databases/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "databases.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "databases.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "databases.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "databases.labels" -}} +helm.sh/chart: {{ include "databases.chart" . }} +{{ include "databases.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "databases.selectorLabels" -}} +app.kubernetes.io/name: {{ include "databases.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "databases.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "databases.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/databases/values.yaml b/scripts/helm/helmcharts/databases/values.yaml new file mode 100644 index 000000000..6c7f28960 --- /dev/null +++ b/scripts/helm/helmcharts/databases/values.yaml @@ -0,0 +1,142 @@ +# Default values for databases. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + + +## Child charts +redis: + enabled: true + fullnameOverride: redis + usePassword: false + cluster: + enabled: false + redis: + resources: + limits: + cpu: 250m + memory: 2Gi + requests: + cpu: 100m + memory: 128Mi + +postgresql: + # postgresqlPassword: asayerPostgres + fullnameOverride: postgresql + image: + tag: 13.5.0-debian-10-r62 + resources: + limits: + cpu: 1 + memory: 2Gi + requests: + cpu: 250m + memory: 256Mi + +minio: + # global: + # minio: + # accessKey: "{{ minio_access_key }}" + # secretKey: "{{ minio_secret_key }}" + fullnameOverride: minio + resources: + limits: + cpu: 256m + memory: 512Mi + requests: + cpu: 100m + memory: 128Mi + +kafka: + fullnameOverride: kafka + enabled: false + + +# Enterprise dbs +clickhouse: + image: + tag: "21.9.4.35" + enabled: false + +postgreql: + enabled: true + diff --git a/scripts/helm/helmcharts/init.sh b/scripts/helm/helmcharts/init.sh new file mode 100644 index 000000000..c284fab92 --- /dev/null +++ b/scripts/helm/helmcharts/init.sh @@ -0,0 +1,99 @@ +#/bin/bash + +# --- helper functions for logs --- +info() +{ + echo '[INFO] ' "$@" +} +warn() +{ + echo '[WARN] ' "$@" >&2 +} +fatal() +{ + echo '[ERROR] ' "$@" >&2 + exit 1 +} + +version="v1.4.0" +usr=`whoami` + +# Installing k3s +curl -sL https://get.k3s.io | sudo K3S_KUBECONFIG_MODE="644" INSTALL_K3S_VERSION='v1.19.5+k3s2' INSTALL_K3S_EXEC="--no-deploy=traefik" sh - +mkdir ~/.kube +sudo cp /etc/rancher/k3s/k3s.yaml ~/.kube/config +chmod 0644 ~/.kube/config +sudo chown -R $usr ~/.kube/config + + +## installing kubectl +which kubectl &> /dev/null || { + info "kubectl not installed. Installing it..." + sudo curl -SsL https://dl.k8s.io/release/v1.20.0/bin/linux/amd64/kubectl -o /usr/local/bin/kubectl ; sudo chmod +x /usr/local/bin/kubectl +} + +## installing stern +which stern &> /dev/null || { + info "stern not installed. installing..." + sudo curl -SsL https://github.com/derdanne/stern/releases/download/2.1.16/stern_linux_amd64 -o /usr/local/bin/stern ; sudo chmod +x /usr/local/bin/stern +} + +## installing k9s +which k9s &> /dev/null || { + info "k9s not installed. Installing it..." + sudo curl -SsL https://github.com/derailed/k9s/releases/download/v0.24.2/k9s_Linux_x86_64.tar.gz -o /tmp/k9s.tar.gz + cd /tmp + tar -xf k9s.tar.gz + sudo mv k9s /usr/local/bin/k9s + sudo chmod +x /usr/local/bin/k9s + cd - +} + +## installing helm +which helm &> /dev/null +if [[ $? -ne 0 ]]; then + info "helm not installed. Installing it..." + curl -ssl https://get.helm.sh/helm-v3.4.2-linux-amd64.tar.gz -o /tmp/helm.tar.gz + tar -xf /tmp/helm.tar.gz + chmod +x linux-amd64/helm + sudo cp linux-amd64/helm /usr/local/bin/helm + rm -rf linux-amd64/helm /tmp/helm.tar.gz +fi + +## Installing openssl +sudo apt update &> /dev/null +sudo apt install openssl -y &> /dev/null + +randomPass() { + openssl rand -hex 10 +} + +## Prepping the infra + +## Don't override existing variables file. +[[ -f vars.yaml ]] && { + warn "Existing Variables file. Not downloading." +}|| { + +info " Downloading vars file" +curl -L -O vars.yaml https://raw.githubusercontent.com/rjshrjndrn/openreplay/${version="v1.4.0" + +} + +[[ -z $DOMAIN_NAME ]] && { +fatal 'DOMAIN_NAME variable is empty. Rerun the script `DOMAIN_NAME=openreplay.mycomp.org bash init.sh `' +} + +info "Creating dynamic passwords" +sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"$(randomPass)\"/g" vars.yaml +sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"$(randomPass)\"/g" vars.yaml +sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"$(randomPass)\"/g" vars.yaml +sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"$(randomPass)\"/g" vars.yaml +sed -i "s/domainName: \"\"/domainName: \"${DOMAIN_NAME}\"/g" vars.yaml + + +## Installing OpenReplay +info "Installing databases" +helm upgrade --install databases ./databases -n db --create-namespace --wait -f ./vars.yaml --atomic +info "Installing application" +helm upgrade --install openreplay ./openreplay -n app --create-namespace --wait -f ./vars.yaml --atomic diff --git a/scripts/helm/helmcharts/openreplay/.helmignore b/scripts/helm/helmcharts/openreplay/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/Chart.yaml b/scripts/helm/helmcharts/openreplay/Chart.yaml new file mode 100644 index 000000000..d7bfd0074 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/Chart.yaml @@ -0,0 +1,25 @@ +apiVersion: v2 +name: openreplay +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +# Ref: https://github.com/helm/helm/issues/7858#issuecomment-608114589 +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/.helmignore b/scripts/helm/helmcharts/openreplay/charts/alerts/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/Chart.yaml new file mode 100644 index 000000000..4cda7945b --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: alerts +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/NOTES.txt new file mode 100644 index 000000000..4aded5587 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "alerts.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "alerts.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "alerts.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "alerts.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl new file mode 100644 index 000000000..35ad32196 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "alerts.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "alerts.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "alerts.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "alerts.labels" -}} +helm.sh/chart: {{ include "alerts.chart" . }} +{{ include "alerts.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "alerts.selectorLabels" -}} +app.kubernetes.io/name: {{ include "alerts.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "alerts.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "alerts.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/deployment.yaml new file mode 100644 index 000000000..3639ae413 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/deployment.yaml @@ -0,0 +1,64 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "alerts.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "alerts.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "alerts.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/hpa.yaml new file mode 100644 index 000000000..b25fef03d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "alerts.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/ingress.yaml new file mode 100644 index 000000000..497e2ec3c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "alerts.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "alerts.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/service.yaml new file mode 100644 index 000000000..a002cfd1b --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "alerts.fullname" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "alerts.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml new file mode 100644 index 000000000..044787236 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "alerts.serviceAccountName" . }} + labels: + {{- include "alerts.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml new file mode 100644 index 000000000..05b0f0496 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "alerts.fullname" . }}-test-connection" + labels: + {{- include "alerts.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "alerts.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/alerts/values.yaml b/scripts/helm/helmcharts/openreplay/charts/alerts/values.yaml new file mode 100644 index 000000000..9a353b057 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/alerts/values.yaml @@ -0,0 +1,87 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/alerts + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "alerts" +fullnameOverride: "alerts" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + ALERT_NOTIFICATION_STRING: http://chalice-openreplay.app.svc.cluster.local:8000/alerts/notifications + CLICKHOUSE_STRING: 'tcp://clickhouse.db.svc.cluster.local:9000/default' + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/.helmignore b/scripts/helm/helmcharts/openreplay/charts/assets/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/Chart.yaml new file mode 100644 index 000000000..fe932b71c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: assets +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/assets/templates/NOTES.txt new file mode 100644 index 000000000..1758d5c64 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "assets.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "assets.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "assets.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "assets.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/assets/templates/_helpers.tpl new file mode 100644 index 000000000..6d684eed5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "assets.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "assets.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "assets.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "assets.labels" -}} +helm.sh/chart: {{ include "assets.chart" . }} +{{ include "assets.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "assets.selectorLabels" -}} +app.kubernetes.io/name: {{ include "assets.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "assets.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "assets.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/deployment.yaml new file mode 100644 index 000000000..5df3ee4a8 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/deployment.yaml @@ -0,0 +1,84 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "assets.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "assets.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "assets.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: '{{ .Values.global.s3.accessKey }}' + - name: AWS_SECRET_ACCESS_KEY + value: '{{ .Values.global.s3.secretKey }}' + - name: S3_BUCKET_ASSETS + value: '{{ .Values.global.s3.assetsBucket }}' + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + # Ref: https://stackoverflow.com/questions/53634583/go-template-split-string-by-delimiter + # We need https://bucketname.s3endpoint + - name: ASSETS_ORIGIN + value: '{{ (split "://" .Values.global.s3.endpoint)._0 }}://{{.Values.global.s3.assetsBucket}}.{{ (split "://" .Values.global.s3.endpoint)._1 }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/hpa.yaml new file mode 100644 index 000000000..7b271a20e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "assets.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/ingress.yaml new file mode 100644 index 000000000..62c421a9a --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "assets.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "assets.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/service.yaml new file mode 100644 index 000000000..5e613c67e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "assets.fullname" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "assets.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml new file mode 100644 index 000000000..90c3f5319 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "assets.serviceAccountName" . }} + labels: + {{- include "assets.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml new file mode 100644 index 000000000..ce67efb1c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "assets.fullname" . }}-test-connection" + labels: + {{- include "assets.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "assets.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/assets/values.yaml b/scripts/helm/helmcharts/openreplay/charts/assets/values.yaml new file mode 100644 index 000000000..875d0450a --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/assets/values.yaml @@ -0,0 +1,84 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/assets + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "assets" +fullnameOverride: "assets" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/.helmignore b/scripts/helm/helmcharts/openreplay/charts/chalice/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/Chart.yaml new file mode 100644 index 000000000..42ec55dec --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: chalice +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/NOTES.txt new file mode 100644 index 000000000..ecc5a589c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "chalice.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "chalice.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "chalice.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "chalice.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl new file mode 100644 index 000000000..27c1ff9f3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "chalice.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "chalice.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "chalice.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "chalice.labels" -}} +helm.sh/chart: {{ include "chalice.chart" . }} +{{ include "chalice.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "chalice.selectorLabels" -}} +app.kubernetes.io/name: {{ include "chalice.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "chalice.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "chalice.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/deployment.yaml new file mode 100644 index 000000000..805873de7 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/deployment.yaml @@ -0,0 +1,94 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "chalice.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "chalice.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "chalice.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: version_number + value: '{{ .Chart.AppVersion }}' + - name: pg_host + value: '{{ .Values.global.postgresql.postgresqlHost }}' + - name: pg_port + value: "5432" + - name: pg_dbname + value: "{{ .Values.global.postgresql.postgresqlDatabase }}" + - name: pg_user + value: '{{ .Values.global.postgresql.postgresqlUser }}' + - name: pg_password + value: '{{ .Values.global.postgresql.postgresqlPassword }}' + - name: S3_HOST + {{- if eq .Values.global.s3.endpoint "http://minio.db.svc.cluster.local:9000" }} + value: 'https://{{ .Values.global.domainName }}' + {{- else}} + value: '{{ .Values.global.s3.endpoint }}' + {{- end}} + - name: S3_KEY + value: '{{ .Values.global.s3.accessKey }}' + - name: S3_SECRET + value: '{{ .Values.global.s3.secretKey }}' + - name: AWS_DEFAULT_REGION + value: '{{ .Values.global.s3.region }}' + - name: sessions_region + value: '{{ .Values.global.s3.region }}' + - name: sessions_bucket + value: '{{ .Values.global.s3.recordings_bucket }}' + - name: sourcemaps_bucket + value: '{{ .Values.global.s3.sourcemaps_bucket }}' + - name: js_cache_bucket + value: '{{ .Values.global.s3.assetsBucket }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/hpa.yaml new file mode 100644 index 000000000..dc1cb12c1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "chalice.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/ingress.yaml new file mode 100644 index 000000000..8f680e949 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "chalice.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "chalice.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/service.yaml new file mode 100644 index 000000000..02b01d1ab --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "chalice.fullname" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "chalice.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml new file mode 100644 index 000000000..9af2e3ac4 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "chalice.serviceAccountName" . }} + labels: + {{- include "chalice.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml new file mode 100644 index 000000000..4e76f6cd3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "chalice.fullname" . }}-test-connection" + labels: + {{- include "chalice.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "chalice.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/chalice/values.yaml b/scripts/helm/helmcharts/openreplay/charts/chalice/values.yaml new file mode 100644 index 000000000..509516136 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/chalice/values.yaml @@ -0,0 +1,115 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/chalice + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "chalice" +fullnameOverride: "chalice" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 8000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + ch_host: clickhouse.db.svc.cluster.local + ch_port: 9000 + captcha_server: '' + captcha_key: '' + async_Token: '' + EMAIL_HOST: '' + EMAIL_PORT: '587' + EMAIL_USER: '' + EMAIL_PASSWORD: '' + EMAIL_USE_TLS: 'true' + EMAIL_USE_SSL: 'false' + EMAIL_SSL_KEY: '' + EMAIL_SSL_CERT: '' + EMAIL_FROM: OpenReplay + SITE_URL: '' + announcement_url: '' + jwt_secret: "SetARandomStringHere" + jwt_algorithm: HS512 + jwt_exp_delta_seconds: '2592000' + # Enable logging for python app + # Ref: https://stackoverflow.com/questions/43969743/logs-in-kubernetes-pod-not-showing-up + PYTHONUNBUFFERED: '0' + SAML2_MD_URL: '' + idp_entityId: '' + idp_sso_url: '' + idp_x509cert: '' + idp_sls_url: '' + idp_name: '' + assist_secret: '' + iceServers: '' + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/.helmignore b/scripts/helm/helmcharts/openreplay/charts/db/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/db/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/db/Chart.yaml new file mode 100644 index 000000000..af2db8c06 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: db +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/db/templates/NOTES.txt new file mode 100644 index 000000000..b066541cf --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "db.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "db.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "db.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "db.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/db/templates/_helpers.tpl new file mode 100644 index 000000000..8df84e9e2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "db.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "db.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "db.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "db.labels" -}} +helm.sh/chart: {{ include "db.chart" . }} +{{ include "db.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "db.selectorLabels" -}} +app.kubernetes.io/name: {{ include "db.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "db.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "db.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/deployment.yaml new file mode 100644 index 000000000..fd879d92f --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "db.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "db.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "db.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/hpa.yaml new file mode 100644 index 000000000..8dbac030f --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "db.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/ingress.yaml new file mode 100644 index 000000000..1cbf3640e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "db.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "db.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/service.yaml new file mode 100644 index 000000000..e0fcac464 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "db.fullname" . }} + labels: + {{- include "db.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "db.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml new file mode 100644 index 000000000..80decdb84 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "db.serviceAccountName" . }} + labels: + {{- include "db.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml new file mode 100644 index 000000000..9dd1bdac9 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "db.fullname" . }}-test-connection" + labels: + {{- include "db.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "db.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/db/values.yaml b/scripts/helm/helmcharts/openreplay/charts/db/values.yaml new file mode 100644 index 000000000..77acb5a80 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/db/values.yaml @@ -0,0 +1,85 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/db + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "db" +fullnameOverride: "db" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/.helmignore b/scripts/helm/helmcharts/openreplay/charts/ender/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/Chart.yaml new file mode 100644 index 000000000..92e286a7d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: ender +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/ender/templates/NOTES.txt new file mode 100644 index 000000000..13aaa8365 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "ender.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "ender.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "ender.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "ender.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/ender/templates/_helpers.tpl new file mode 100644 index 000000000..1d0dc291c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "ender.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "ender.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "ender.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "ender.labels" -}} +helm.sh/chart: {{ include "ender.chart" . }} +{{ include "ender.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "ender.selectorLabels" -}} +app.kubernetes.io/name: {{ include "ender.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "ender.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "ender.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/deployment.yaml new file mode 100644 index 000000000..bbdb35c00 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/deployment.yaml @@ -0,0 +1,70 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "ender.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "ender.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "ender.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/hpa.yaml new file mode 100644 index 000000000..62138535e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "ender.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/ingress.yaml new file mode 100644 index 000000000..7f6cc557b --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "ender.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "ender.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/service.yaml new file mode 100644 index 000000000..be0b79e9e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "ender.fullname" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "ender.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml new file mode 100644 index 000000000..f85c1d07b --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "ender.serviceAccountName" . }} + labels: + {{- include "ender.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml new file mode 100644 index 000000000..8fa7e3b55 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "ender.fullname" . }}-test-connection" + labels: + {{- include "ender.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "ender.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/ender/values.yaml b/scripts/helm/helmcharts/openreplay/charts/ender/values.yaml new file mode 100644 index 000000000..d5a7c52b1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/ender/values.yaml @@ -0,0 +1,85 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/ender + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "ender" +fullnameOverride: "ender" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/.helmignore b/scripts/helm/helmcharts/openreplay/charts/http/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/http/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/http/Chart.yaml new file mode 100644 index 000000000..91b9ce66f --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: http +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/http/templates/NOTES.txt new file mode 100644 index 000000000..f3b24def1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "http.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "http.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "http.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "http.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/http/templates/_helpers.tpl new file mode 100644 index 000000000..c695edd7e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "http.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "http.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "http.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "http.labels" -}} +helm.sh/chart: {{ include "http.chart" . }} +{{ include "http.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "http.selectorLabels" -}} +app.kubernetes.io/name: {{ include "http.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "http.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "http.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/deployment.yaml new file mode 100644 index 000000000..7de2a80c5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/deployment.yaml @@ -0,0 +1,80 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "http.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "http.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "http.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: '{{ .Values.global.s3.accessKey }}' + - name: AWS_SECRET_ACCESS_KEY + value: '{{ .Values.global.s3.secretKey }}' + - name: AWS_REGION + value: '{{ .Values.global.s3.region }}' + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + - name: ASSETS_ORIGIN + value: '{{ (split "://" .Values.global.s3.endpoint)._0 }}://{{.Values.global.s3.assetsBucket}}.{{ (split "://" .Values.global.s3.endpoint)._1 }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/hpa.yaml new file mode 100644 index 000000000..8e88b2807 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "http.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/ingress.yaml new file mode 100644 index 000000000..51a20e2a6 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "http.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "http.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/service.yaml new file mode 100644 index 000000000..f33db7394 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "http.fullname" . }} + labels: + {{- include "http.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "http.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml new file mode 100644 index 000000000..7c55e5954 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "http.serviceAccountName" . }} + labels: + {{- include "http.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml new file mode 100644 index 000000000..0f29e5a06 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "http.fullname" . }}-test-connection" + labels: + {{- include "http.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "http.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/http/values.yaml b/scripts/helm/helmcharts/openreplay/charts/http/values.yaml new file mode 100644 index 000000000..e841b4cac --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/http/values.yaml @@ -0,0 +1,89 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/http + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "http" +fullnameOverride: "http" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 80 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + TOKEN_SECRET: secret_token_string # TODO: generate on buld + S3_BUCKET_IOS_IMAGES: sessions-mobile-assets + CACHE_ASSETS: false + HTTP_PORT: 80 + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/.helmignore b/scripts/helm/helmcharts/openreplay/charts/integrations/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/Chart.yaml new file mode 100644 index 000000000..a643daa69 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: integrations +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/NOTES.txt new file mode 100644 index 000000000..b7af1aaf2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "integrations.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "integrations.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "integrations.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "integrations.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl new file mode 100644 index 000000000..cdeef0f54 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "integrations.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "integrations.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "integrations.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "integrations.labels" -}} +helm.sh/chart: {{ include "integrations.chart" . }} +{{ include "integrations.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "integrations.selectorLabels" -}} +app.kubernetes.io/name: {{ include "integrations.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "integrations.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "integrations.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/deployment.yaml new file mode 100644 index 000000000..942f8c0a5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/deployment.yaml @@ -0,0 +1,72 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "integrations.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "integrations.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "integrations.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + - name: POSTGRES_STRING + value: 'postgres://{{ .Values.global.postgresql.postgresqlUser }}:{{ .Values.global.postgresql.postgresqlPassword }}@{{ .Values.global.postgresql.postgresqlHost }}:{{ .Values.global.postgresql.postgresqlPort }}/{{ .Values.global.postgresql.postgresqlDatabase }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/hpa.yaml new file mode 100644 index 000000000..cb9cf17db --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "integrations.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/ingress.yaml new file mode 100644 index 000000000..236409677 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "integrations.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "integrations.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/service.yaml new file mode 100644 index 000000000..77f0bb3c7 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "integrations.fullname" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "integrations.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml new file mode 100644 index 000000000..0bb50f48d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "integrations.serviceAccountName" . }} + labels: + {{- include "integrations.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml new file mode 100644 index 000000000..ad5e040b5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "integrations.fullname" . }}-test-connection" + labels: + {{- include "integrations.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "integrations.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/integrations/values.yaml b/scripts/helm/helmcharts/openreplay/charts/integrations/values.yaml new file mode 100644 index 000000000..e35b79308 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/integrations/values.yaml @@ -0,0 +1,86 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/integrations + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "integrations" +fullnameOverride: "integrations" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + TOKEN_SECRET: secret_token_string # TODO: generate on buld + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/.helmignore b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml new file mode 100644 index 000000000..3c824500e --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: nginx-ingress +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt new file mode 100644 index 000000000..8125afe9c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "nginx-ingress.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "nginx-ingress.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "nginx-ingress.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "nginx-ingress.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl new file mode 100644 index 000000000..b0f2808f1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "nginx-ingress.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "nginx-ingress.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "nginx-ingress.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "nginx-ingress.labels" -}} +helm.sh/chart: {{ include "nginx-ingress.chart" . }} +{{ include "nginx-ingress.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "nginx-ingress.selectorLabels" -}} +app.kubernetes.io/name: {{ include "nginx-ingress.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "nginx-ingress.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "nginx-ingress.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml new file mode 100644 index 000000000..f635b270c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/configMap.yaml @@ -0,0 +1,154 @@ +apiVersion: v1 +kind: ConfigMap +metadata: + name: nginx + namespace: {{ .Release.Namespace }} +data: + location.list: |- + location ~* /general_stats { + deny all; + } + location /healthz { + return 200 'OK'; + } + location ~ ^/(mobs|sessions-assets|frontend|static|sourcemaps|ios-images)/ { + proxy_set_header X-Real-IP $remote_addr; + proxy_set_header X-Forwarded-For $proxy_add_x_forwarded_for; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header Host $http_host; + + proxy_connect_timeout 300; + # Default is HTTP/1, keepalive is only enabled in HTTP/1.1 + proxy_http_version 1.1; + proxy_set_header Connection ""; + chunked_transfer_encoding off; + + proxy_pass http://minio.db.svc.cluster.local:9000; + } + + location /minio/ { + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://minio.db.svc.cluster.local:9000; + } + location /ingest/ { + rewrite ^/ingest/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header X-Forwarded-For $real_ip; + proxy_set_header X-Forwarded-Host $real_ip; + proxy_set_header X-Real-IP $real_ip; + proxy_set_header Host $host; + proxy_pass http://http-openreplay.app.svc.cluster.local; + proxy_read_timeout 300; + proxy_connect_timeout 120; + proxy_send_timeout 300; + } + location /grafana { + set $target http://monitoring-grafana.monitoring.svc.cluster.local; + rewrite ^/grafana/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass $target; + } + location /api/ { + rewrite ^/api/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_set_header X-Forwarded-Proto $scheme; + proxy_pass http://chalice-openreplay.app.svc.cluster.local:8000; + } + location /assist/ { + rewrite ^/assist/(.*) /$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://utilities-openreplay.app.svc.cluster.local:9000; + } + location /assets/ { + rewrite ^/assets/(.*) /sessions-assets/$1 break; + proxy_http_version 1.1; + proxy_set_header Upgrade $http_upgrade; + proxy_set_header Connection "Upgrade"; + proxy_set_header Host $host; + proxy_pass http://minio.db.svc.cluster.local:9000; + } + location / { + index /index.html; + rewrite ^((?!.(js|css|png|svg|jpg|woff|woff2)).)*$ /frontend/index.html break; + include /etc/nginx/conf.d/compression.conf; + proxy_set_header Host $http_host; + proxy_pass http://minio.db.svc.cluster.local:9000/frontend/; + proxy_intercept_errors on; # see http://nginx.org/en/docs/http/ngx_http_proxy_module.html#proxy_intercept_errors + error_page 404 =200 /index.html; + } + compression.conf: |- + # Compression + gzip on; + gzip_comp_level 5; + gzip_min_length 256; # 256Bytes + gzip_proxied any; + gzip_vary on; + # Content types for compression + gzip_types + application/atom+xml + application/javascript + application/json + application/ld+json + application/manifest+json + application/rss+xml + application/vnd.geo+json + application/vnd.ms-fontobject + application/x-font-ttf + application/x-web-app-manifest+json + application/xhtml+xml + application/xml + font/opentype + image/bmp + image/svg+xml + image/x-icon + text/cache-manifest + text/css + text/plain + ; + + sites.conf: |- + # Ref: https://github.com/openresty/openresty/#resolvconf-parsing + resolver local=on; + # Need real ip address for flags in replay. + # Some LBs will forward real ips as x-forwarded-for + # So making that as priority + map $http_x_forwarded_for $real_ip { + ~^(\d+\.\d+\.\d+\.\d+) $1; + default $remote_addr; + } + map $http_upgrade $connection_upgrade { + default upgrade; + '' close; + } + server { + listen 80 default_server; + listen [::]:80 default_server; + # server_name _; + {{ .Values.customServerConfigs }} + include /etc/nginx/conf.d/location.list; + client_max_body_size 10M; + } + server { + listen 443 ssl; + ssl_certificate /etc/secrets/site.crt; + ssl_certificate_key /etc/secrets/site.key; + ssl_protocols TLSv1.2 TLSv1.3; + ssl_ciphers "EECDH+ECDSA+AESGCM EECDH+aRSA+AESGCM EECDH+ECDSA+SHA384 EECDH+ECDSA+SHA256 EECDH+aRSA+SHA384 EECDH+aRSA+SHA256 EECDH+aRSA+RC4 EECDH EDH+aRSA HIGH !RC4 !aNULL !eNULL !LOW !3DES !MD5 !EXP !PSK !SRP !DSS"; + include /etc/nginx/conf.d/location.list; + client_max_body_size 10M; + } + diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml new file mode 100644 index 000000000..7a64f5886 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/deployment.yaml @@ -0,0 +1,77 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "nginx-ingress.selectorLabels" . | nindent 6 }} + template: + metadata: + annotations: + nginxRolloutID: {{ randAlphaNum 5 | quote }} # Restart nginx after every deployment + {{- with .Values.podAnnotations }} + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "nginx-ingress.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "nginx-ingress.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + imagePullPolicy: {{ .Values.image.pullPolicy }} + ports: + - name: https + containerPort: 443 + protocol: TCP + - name: http + containerPort: 80 + protocol: TCP + livenessProbe: + httpGet: + path: /healthz + port: http + readinessProbe: + httpGet: + path: /healthz + port: http + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: nginx + mountPath: /etc/nginx/conf.d/ + - name: ssl + mountPath: /etc/secrets/ + volumes: + - name: nginx + configMap: + name: nginx + - name: ssl + secret: + secretName: ssl + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml new file mode 100644 index 000000000..348f8f95b --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "nginx-ingress.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml new file mode 100644 index 000000000..63cfce077 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "nginx-ingress.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml new file mode 100644 index 000000000..e5e4d7dd9 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/secrets.yaml @@ -0,0 +1,9 @@ +--- +apiVersion: v1 +kind: Secret +metadata: + name: ssl +data: + ca.crt: "" + site.crt: "{{ .Values.ssl_certificate | b64enc }}" + site.key: "{{ .Values.ssl_privatekey | b64enc }}" diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml new file mode 100644 index 000000000..94c034375 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/service.yaml @@ -0,0 +1,19 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "nginx-ingress.fullname" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + # Make sure to get client ip + externalTrafficPolicy: Local + ports: + {{- range .Values.service.ports }} + - port: {{ .port }} + targetPort: {{ .targetPort }} + protocol: TCP + name: {{ .targetPort }} + {{- end }} + selector: + {{- include "nginx-ingress.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml new file mode 100644 index 000000000..bc0091029 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "nginx-ingress.serviceAccountName" . }} + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml new file mode 100644 index 000000000..074cec518 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "nginx-ingress.fullname" . }}-test-connection" + labels: + {{- include "nginx-ingress.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "nginx-ingress.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/values.yaml b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/values.yaml new file mode 100644 index 000000000..8c595aafa --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/nginx-ingress/values.yaml @@ -0,0 +1,173 @@ +# Default values for nginx-ingress. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: nginx + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "" +fullnameOverride: "" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: LoadBalancer + ports: + - port: 80 + targetPort: http + - port: 443 + targetPort: https + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: false + minReplicas: 1 + maxReplicas: 100 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +nodeSelector: {} + +tolerations: [] + +affinity: {} + + +ssl_certificate: |- + -----BEGIN CERTIFICATE----- + MIIFITCCAwmgAwIBAgIUQ8hQoDbW3Z4DxRVjIYlIlbEHp/8wDQYJKoZIhvcNAQEL + BQAwIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2NhbC5ob3N0MB4XDTIxMTIyMjA3 + NDIxOVoXDTIyMTIyMjA3NDIxOVowIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2Nh + bC5ob3N0MIICIjANBgkqhkiG9w0BAQEFAAOCAg8AMIICCgKCAgEAyXTX6RwqNVM+ + LSvc5TkuBnxlw1sHxtkkojwpbwavr6ccSdtoYB7KYwcufh0zz3LaSDgPNqStOf6w + hAWV830bxvOvU6yJ7MgP8/htfY1KWIoNS6ducoct4VhgshWXWwQtrtWZJku+cyds + QTkr2BziSX+Y7/1rALKbOU4CIRCKtJ2jeaI+c4kcXXB+ARauDlqB7+CS4B+wjlfX + sOoC2bWgZOxyZnHolb3hKMLfBswLwYq0DRjjNMDqX8xS6V1AgoTrCxl1DqPLw47o + immbSKZ4voot60cSBYVK4qOX5Nqw5RmqwELb9Ib4QPVCt9HjbYQp77EcOonkgE4l + fYabvvOeM/U6vdtZhI2CJg0tkytuJ4+Hb7i7nRK2SRMppmtP7yDDXpMGoAXK2bVZ + ipZBRct0onxLifH5vdrUNbOlXItjWLQMfiHlDeG48kbXbKaJPv3tRvU0Gix2X8SJ + OlRNezNNz8pce0Bbgx3YoQhrRTad4CC6cIpRjgTt/pww3BoF7jDLl6RNI1cXfy4u + tkSlMqAQV6x0aig9Ldg1VFM2oCaEyvzx0BWDm/jmbZcyVizlb+uQQ/huNSJXT++p + CmPNG7rP6eYNTh7+7DDWvKBQQFWOPaVfwvrhzvb7q2B2Bmc33bDXeRuF4MJE6syA + YUCV2Ztw65uI864PRDIKO4ru1UQgx5sCAwEAAaNTMFEwHQYDVR0OBBYEFIdNQGn2 + z3xmJfExKAUeohFnLjzsMB8GA1UdIwQYMBaAFIdNQGn2z3xmJfExKAUeohFnLjzs + MA8GA1UdEwEB/wQFMAMBAf8wDQYJKoZIhvcNAQELBQADggIBAJvkSX+RoYGOcU0z + qEWlAN6Jy0CzLsTp/VoSZY9gd+hLFd/lK7SjXhAnNyEELpeanGs04ytJpXnSH1bA + dGs0UeWYalOxrHSN4ln5SNzWVE8kODM1zcyNePllI/PVcXLmujQz5wNbNoC5Qt8p + 0RoZ2wInmyh2wTQcflIPUtncsw84ozVVuebmc6jiuPxnxdTAXeYOwKUF25t8rSp6 + 5n23F0GP8Ypu7vjT7N2RpUe0zkutaij+uISBHZw50ohrelPlV4V9qhp6MV+h9xuh + 0z8OEyq2vK4KNn96A97mSRuqqt6Ajb2MHdErTr6fgj5/CtSD337oIK3froRmID8s + /JXADsNnBEqQBfcM6gSaw1M/fHDPNZzwVv6yAN+bKrI+KEmKJD31Tm2G55oPvLTP + XZdmVIAqxIu89v/GOJ2J29vC+h9pTjTze31DFg0niwLcr1aNawiC2d4n2wdDwKwc + HnCnflELyYcn4KgvpLNz5wEKEHTAQ3JF5VIel1/uqYN9cosw1vjRskPK/g3nIEPG + T247naj+JbW244P0jxb57VWiD/7IJ4ZErA1KrvqR/y1NnGxrgXoRjwmhCv/4YIYi + qgnvF7IkwGozdoLPiBMmvjNq/AmVLrfZNPxZjHL3nIW+PBEeBD/lkH36mcakg/1S + w7yMPvE+TIh6+HwDZc2jNLkv/8tY + -----END CERTIFICATE----- + +ssl_privatekey: |- + -----BEGIN PRIVATE KEY----- + MIIJQwIBADANBgkqhkiG9w0BAQEFAASCCS0wggkpAgEAAoICAQDJdNfpHCo1Uz4t + K9zlOS4GfGXDWwfG2SSiPClvBq+vpxxJ22hgHspjBy5+HTPPctpIOA82pK05/rCE + BZXzfRvG869TrInsyA/z+G19jUpYig1Lp25yhy3hWGCyFZdbBC2u1ZkmS75zJ2xB + OSvYHOJJf5jv/WsAsps5TgIhEIq0naN5oj5ziRxdcH4BFq4OWoHv4JLgH7COV9ew + 6gLZtaBk7HJmceiVveEowt8GzAvBirQNGOM0wOpfzFLpXUCChOsLGXUOo8vDjuiK + aZtIpni+ii3rRxIFhUrio5fk2rDlGarAQtv0hvhA9UK30eNthCnvsRw6ieSATiV9 + hpu+854z9Tq921mEjYImDS2TK24nj4dvuLudErZJEymma0/vIMNekwagBcrZtVmK + lkFFy3SifEuJ8fm92tQ1s6Vci2NYtAx+IeUN4bjyRtdspok+/e1G9TQaLHZfxIk6 + VE17M03Pylx7QFuDHdihCGtFNp3gILpwilGOBO3+nDDcGgXuMMuXpE0jVxd/Li62 + RKUyoBBXrHRqKD0t2DVUUzagJoTK/PHQFYOb+OZtlzJWLOVv65BD+G41IldP76kK + Y80bus/p5g1OHv7sMNa8oFBAVY49pV/C+uHO9vurYHYGZzfdsNd5G4XgwkTqzIBh + QJXZm3Drm4jzrg9EMgo7iu7VRCDHmwIDAQABAoICAQCebjlupiu7jB+Vvq0VyAYe + K66MGAbhpttcixu6qPN5nF5u5xIKpaxcfMVfgO/B8X0g1pWAT7m7pkSDTzFCL92s + dPApScOeZyfEolbZKkiRoOAb4yzE/PJkCfDhnIFPntWebXTn3SGFxjcohCGq7+w2 + CRbphc6k2dGhG2wpPK0YpfBuM94RVn7sLQ+rI3724s7VKzPW9pUPHJ4QD7j2JhRh + ymGdl29mc9GjEL38xnNoXgCDXFMypZSsii+aPzAAdS+zpu2b+czBmp3eXHc2h1Tl + 5B2Arn/Jv63I1wcZf7MmOS1DzlDU2WBbFYbGsVW+RvYD/rFIiDEfhlWNhlLttQFw + TJ9xk+EePK9VQuWzN5tG1lEjGcNWtPLUp3IxZTqaei5lWu6zyA6HVsxjyArzmfNk + x0fRpZU+VZYzbkgj0ROq3wg7QEEMQ8SPo9vvLF1ZNnndzs/ziPA1CodUuSwa6B2c + Zeref4s0B//q3U1SDQE08OD9iuZODwtkO4wQtW2DP33gC6VIts94jg87z8SRDp2g + DcT3D8ZhV5B2VPelluQZ/scWKGWKAvPVRjq51EiMeZtFBVyM6+o0xW2+MxxZdjbj + OWexc+dw8QfwIlFRm0v8Tfvljk1prqYEMLV4s9JD8up5X1h3Yg5uAsQpdZ+1JkGm + 5UvvQQVQgxkC1NFXxqYyQQKCAQEA95r3oYm+bnQXHXX4dBQO98+XwOOfnhMk815p + /CAuxCzbPNafjqyAxRgmp5D/IkdLzbitDL3uo73ot1RwB4CEN/Ovp3a/+CklBnnA + 0bKAtsGE2XWaqFeguVIy25WEWKaTxKGX0g6KHkOvGt0DNo4wUJUk+2sAqIvXU1Q6 + tUbd+8YRYxO7i6+92K7kxoZega6qiA/L3akZ2uTzFf+IskfqmDUoF2ZaEOFluG8E + ASX3KoVFfraV3DBEN0ionvfpaRIidr2IsuC848zHFBtAXA0mL55BCuf++HmAZnpy + HFN7owVVgqbEw+GGbNdRLt5zV00DmX/sHsIZU/gCLRPsfPUAqQKCAQEA0ElWWiS4 + IA91lWbzCwswFrHvjpcogh67fNd9kJCcFCUHabev7SSrIEcjqDP2m6HPvp/FwxYA + PEo1/vDZ884v9roft2J13OvpvXoqtRZLGo1E76sECBrcto4nhCiTeRQg9uRpHG+Q + p77QC/4eRBLGykFRJET6913x7JzpjAO0QLLLzilj1yBkbF5U01Up5KbIuNeXNvEO + GVGpbryIXxwR6Qhyv7C54xpjRdu9EOT1frRqdIs0qOGafnLXWAXKfvWUzz1wSiiw + 1p7xqYZrawXAr7XEkGA2aeqt/iqo2X2G9oYA0apJVwfR4WhuS2hPkSy405bsrGzZ + cjMs9bnJSYP8owKCAQEAxCTSvfisDjuQhBoL84hgQxcEFB09OK/ZuaC1PLER2v3d + vtgWFaO5bmivVlaahcEM367IByv+e1/ZlkEhbg/0rY4xO+vqLuAJIJQalwNcy2mJ + n+p11Z11CNmAyEotSTzMGhwYdKJn74mWkSU7gmApDezYGwKsxtfgf3Zd+3RkLSq+ + Y0oia4mQTrJdMJcJDpobJSW+TZ3DiY+MsYR3+SLXSDPzynWeK3kiZ3QqK+6zWc+x + OavSE1d48oJwcV3aXQ2sl3uVan51o894dQkRdtpDwb0PsWAOry8w8/1Tn/TSIFX9 + Yz5Q6Qsivd3jxckafbHYhCS+G6+O+OGid6ssz+AV4QKCAQAqK78ND0QsUZT4A9kP + kltRLQOYtiggeEJzm1mz7GN9fKXMlMFM3VC8f0rL4oF6rz9VlBnBTvILQuc9z9wB + De0OIk8LnSbJ7QXtNA/zjCj2nkWn1NNDJNUtLQj5LBH3wMiP1F0nwbrjC7Ipy3Cr + TbXr+1+HXWQGs4Go63gpvhI/yzOScTTiuI88lbjM9QA/aDlZm2TlXdcB71PDtO5T + e2Zw7SH2h7yLK6uP2FamVgUSe0rWf9zQmKTkFzJcgwelvuk7MHBMw4JSYeoB7dJP + 3+FMchvzM1exCC/kNxTqvAyYWzdNPBIPSekHn1I9eEgr14cwZ+1RV9SK16uxsMT9 + WnjLAoIBADKutRKB8nH+wD3sa4cP782QNbkDqJCcb3rPntnCWI/jA2TeY/wAvrXa + 8yFtSSeYSwN9Wr+UosSkQ+OQSO0WmT2NrxdkH8jK8kYnzYkJ9+EFE2YpMN2UosSb + esQ9oEMnivBMNv8DnB4IuO8LjTj1rhqcBmWJH1zvDi1Ur+/uAb+6XLm0Dp/59/Rn + PSlLQmFraq6mrUkKTU40zyT6eK8AvIn/+sXAF1Xb9Vnm8Ndl+gZ4imzjcCubbq+6 + PqvLjFJNGyya6b3MX4RSxVGfkIf5f6bcSSZ0zzSB3qLbCKS+JawwR1WF2rJp6Hj5 + 7qINKoGovqXB1oAdopIl1z64e7MWVE4= + -----END PRIVATE KEY----- diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/.helmignore b/scripts/helm/helmcharts/openreplay/charts/sink/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/Chart.yaml new file mode 100644 index 000000000..2b9f71d56 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: sink +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/sink/templates/NOTES.txt new file mode 100644 index 000000000..e49e60d4c --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "sink.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "sink.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "sink.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "sink.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/sink/templates/_helpers.tpl new file mode 100644 index 000000000..39f92ccc1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "sink.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "sink.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "sink.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "sink.labels" -}} +helm.sh/chart: {{ include "sink.chart" . }} +{{ include "sink.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "sink.selectorLabels" -}} +app.kubernetes.io/name: {{ include "sink.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "sink.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "sink.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/deployment.yaml new file mode 100644 index 000000000..ced1ee0c5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/deployment.yaml @@ -0,0 +1,86 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "sink.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "sink.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "sink.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- if eq .Values.pvc.name "hostPath" }} + volumes: + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + - name: datadir + persistentVolumeClaim: + claimName: {{ .Values.pvc.name }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/hpa.yaml new file mode 100644 index 000000000..8f9a98f79 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "sink.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/ingress.yaml new file mode 100644 index 000000000..ac5b25ba2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "sink.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "sink.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/service.yaml new file mode 100644 index 000000000..d2c0870c3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "sink.fullname" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "sink.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml new file mode 100644 index 000000000..34986e78d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "sink.serviceAccountName" . }} + labels: + {{- include "sink.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml new file mode 100644 index 000000000..248381268 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "sink.fullname" . }}-test-connection" + labels: + {{- include "sink.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "sink.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/sink/values.yaml b/scripts/helm/helmcharts/openreplay/charts/sink/values.yaml new file mode 100644 index 000000000..9d55ee370 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/sink/values.yaml @@ -0,0 +1,92 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/sink + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "sink" +fullnameOverride: "sink" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: hostPath + hostMountPath: /openreplay/storage/nfs + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/.helmignore b/scripts/helm/helmcharts/openreplay/charts/storage/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/Chart.yaml new file mode 100644 index 000000000..329af9ad0 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: storage +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/storage/templates/NOTES.txt new file mode 100644 index 000000000..217426ab3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "storage.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "storage.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "storage.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "storage.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/storage/templates/_helpers.tpl new file mode 100644 index 000000000..9f87a2965 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "storage.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "storage.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "storage.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "storage.labels" -}} +helm.sh/chart: {{ include "storage.chart" . }} +{{ include "storage.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "storage.selectorLabels" -}} +app.kubernetes.io/name: {{ include "storage.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "storage.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "storage.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/deployment.yaml new file mode 100644 index 000000000..4ca3be6e2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/deployment.yaml @@ -0,0 +1,102 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "storage.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "storage.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "storage.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_ACCESS_KEY_ID + value: '{{ .Values.global.s3.accessKey }}' + - name: AWS_SECRET_ACCESS_KEY + value: '{{ .Values.global.s3.secretKey }}' + - name: AWS_ENDPOINT + value: '{{ .Values.global.s3.endpoint }}' + - name: AWS_REGION_WEB + value: '{{ .Values.global.s3.region }}' + - name: AWS_REGION_IOS + value: '{{ .Values.global.s3.region }}' + - name: S3_BUCKET_WEB + value: '{{ .Values.global.s3.recordings_bucket }}' + - name: S3_BUCKET_IOS + value: '{{ .Values.global.s3.recordings_bucket }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: LICENSE_KEY + value: '{{ .Values.global.enterpriseEditionLicense }}' + - name: REDIS_STRING + value: '{{ .Values.global.redis.redisHost }}:{{ .Values.global.redis.redisPort }}' + - name: KAFKA_SERVERS + value: '{{ .Values.global.kafka.kafkaHost }}:{{ .Values.global.kafka.kafkaPort }}' + - name: KAFKA_USE_SSL + value: '{{ .Values.global.kafka.kafkaUseSsl }}' + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + volumeMounts: + - name: datadir + mountPath: /mnt/efs + {{- if eq .Values.pvc.name "hostPath" }} + volumes: + - name: datadir + hostPath: + # Ensure the file directory is created. + path: {{ .Values.pvc.hostMountPath }} + type: DirectoryOrCreate + {{- else }} + volumes: + - name: datadir + persistentVolumeClaim: + claimName: {{ .Values.pvc.name }} + {{- end }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/hpa.yaml new file mode 100644 index 000000000..d015e4bb2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "storage.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/ingress.yaml new file mode 100644 index 000000000..10bf66e98 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "storage.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "storage.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/service.yaml new file mode 100644 index 000000000..f55083c92 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "storage.fullname" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "storage.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml new file mode 100644 index 000000000..a361acc50 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "storage.serviceAccountName" . }} + labels: + {{- include "storage.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml new file mode 100644 index 000000000..84b0a6786 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "storage.fullname" . }}-test-connection" + labels: + {{- include "storage.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "storage.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/storage/values.yaml b/scripts/helm/helmcharts/openreplay/charts/storage/values.yaml new file mode 100644 index 000000000..9f1dbfd91 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/storage/values.yaml @@ -0,0 +1,93 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/storage + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "storage" +fullnameOverride: "storage" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: + FS_CLEAN_HRS: 24 + +pvc: + # This can be either persistentVolumeClaim or hostPath. + # In case of pvc, you'll have to provide the pvc name. + # For example + # name: openreplay-efs + name: hostPath + hostMountPath: /openreplay/storage/nfs + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/.helmignore b/scripts/helm/helmcharts/openreplay/charts/utilities/.helmignore new file mode 100644 index 000000000..0e8a0eb36 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/.helmignore @@ -0,0 +1,23 @@ +# Patterns to ignore when building packages. +# This supports shell glob matching, relative path matching, and +# negation (prefixed with !). Only one pattern per line. +.DS_Store +# Common VCS dirs +.git/ +.gitignore +.bzr/ +.bzrignore +.hg/ +.hgignore +.svn/ +# Common backup files +*.swp +*.bak +*.tmp +*.orig +*~ +# Various IDEs +.project +.idea/ +*.tmproj +.vscode/ diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/Chart.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/Chart.yaml new file mode 100644 index 000000000..4c00e90b2 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/Chart.yaml @@ -0,0 +1,24 @@ +apiVersion: v2 +name: utilities +description: A Helm chart for Kubernetes + +# A chart can be either an 'application' or a 'library' chart. +# +# Application charts are a collection of templates that can be packaged into versioned archives +# to be deployed. +# +# Library charts provide useful utilities or functions for the chart developer. They're included as +# a dependency of application charts to inject those utilities and functions into the rendering +# pipeline. Library charts do not define any templates and therefore cannot be deployed. +type: application + +# This is the chart version. This version number should be incremented each time you make changes +# to the chart and its templates, including the app version. +# Versions are expected to follow Semantic Versioning (https://semver.org/) +version: 0.1.0 + +# This is the version number of the application being deployed. This version number should be +# incremented each time you make changes to the application. Versions are not expected to +# follow Semantic Versioning. They should reflect the version the application is using. +# It is recommended to use it with quotes. +AppVersion: "v1.4.0" diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/NOTES.txt b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/NOTES.txt new file mode 100644 index 000000000..323bc20c1 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/NOTES.txt @@ -0,0 +1,22 @@ +1. Get the application URL by running these commands: +{{- if .Values.ingress.enabled }} +{{- range $host := .Values.ingress.hosts }} + {{- range .paths }} + http{{ if $.Values.ingress.tls }}s{{ end }}://{{ $host.host }}{{ .path }} + {{- end }} +{{- end }} +{{- else if contains "NodePort" .Values.service.type }} + export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ include "utilities.fullname" . }}) + export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}") + echo http://$NODE_IP:$NODE_PORT +{{- else if contains "LoadBalancer" .Values.service.type }} + NOTE: It may take a few minutes for the LoadBalancer IP to be available. + You can watch the status of by running 'kubectl get --namespace {{ .Release.Namespace }} svc -w {{ include "utilities.fullname" . }}' + export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ include "utilities.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}") + echo http://$SERVICE_IP:{{ .Values.service.port }} +{{- else if contains "ClusterIP" .Values.service.type }} + export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ include "utilities.name" . }},app.kubernetes.io/instance={{ .Release.Name }}" -o jsonpath="{.items[0].metadata.name}") + export CONTAINER_PORT=$(kubectl get pod --namespace {{ .Release.Namespace }} $POD_NAME -o jsonpath="{.spec.containers[0].ports[0].containerPort}") + echo "Visit http://127.0.0.1:8080 to use your application" + kubectl --namespace {{ .Release.Namespace }} port-forward $POD_NAME 8080:$CONTAINER_PORT +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl new file mode 100644 index 000000000..8999db4be --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/_helpers.tpl @@ -0,0 +1,62 @@ +{{/* +Expand the name of the chart. +*/}} +{{- define "utilities.name" -}} +{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Create a default fully qualified app name. +We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec). +If release name contains chart name it will be used as a full name. +*/}} +{{- define "utilities.fullname" -}} +{{- if .Values.fullnameOverride }} +{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- $name := default .Chart.Name .Values.nameOverride }} +{{- if contains $name .Release.Name }} +{{- .Release.Name | trunc 63 | trimSuffix "-" }} +{{- else }} +{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }} +{{- end }} +{{- end }} +{{- end }} + +{{/* +Create chart name and version as used by the chart label. +*/}} +{{- define "utilities.chart" -}} +{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }} +{{- end }} + +{{/* +Common labels +*/}} +{{- define "utilities.labels" -}} +helm.sh/chart: {{ include "utilities.chart" . }} +{{ include "utilities.selectorLabels" . }} +{{- if .Chart.AppVersion }} +app.kubernetes.io/version: {{ .Chart.AppVersion | quote }} +{{- end }} +app.kubernetes.io/managed-by: {{ .Release.Service }} +{{- end }} + +{{/* +Selector labels +*/}} +{{- define "utilities.selectorLabels" -}} +app.kubernetes.io/name: {{ include "utilities.name" . }} +app.kubernetes.io/instance: {{ .Release.Name }} +{{- end }} + +{{/* +Create the name of the service account to use +*/}} +{{- define "utilities.serviceAccountName" -}} +{{- if .Values.serviceAccount.create }} +{{- default (include "utilities.fullname" .) .Values.serviceAccount.name }} +{{- else }} +{{- default "default" .Values.serviceAccount.name }} +{{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/deployment.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/deployment.yaml new file mode 100644 index 000000000..628cda58d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/deployment.yaml @@ -0,0 +1,74 @@ +apiVersion: apps/v1 +kind: Deployment +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + {{- if not .Values.autoscaling.enabled }} + replicas: {{ .Values.replicaCount }} + {{- end }} + selector: + matchLabels: + {{- include "utilities.selectorLabels" . | nindent 6 }} + template: + metadata: + {{- with .Values.podAnnotations }} + annotations: + {{- toYaml . | nindent 8 }} + {{- end }} + labels: + {{- include "utilities.selectorLabels" . | nindent 8 }} + spec: + {{- with .Values.imagePullSecrets }} + imagePullSecrets: + {{- toYaml . | nindent 8 }} + {{- end }} + serviceAccountName: {{ include "utilities.serviceAccountName" . }} + securityContext: + {{- toYaml .Values.podSecurityContext | nindent 8 }} + containers: + - name: {{ .Chart.Name }} + securityContext: + {{- toYaml .Values.securityContext | nindent 12 }} + {{- if .Values.global.enterpriseEditionLicense }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}-ee" + {{- else }} + image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}" + {{- end }} + imagePullPolicy: {{ .Values.image.pullPolicy }} + env: + - name: AWS_DEFAULT_REGION + value: "{{ .Values.global.s3.region }}" + - name: S3_HOST + {{- if eq .Values.global.s3.endpoint "http://minio.db.svc.cluster.local:9000" }} + value: 'https://{{ .Values.global.domainName }}' + {{- else}} + value: '{{ .Values.global.s3.endpoint }}' + {{- end}} + - name: S3_KEY + value: "{{ .Values.global.s3.accessKey }}" + - name: S3_SECRET + value: "{{ .Values.global.s3.secretKey }}" + {{- range $key, $val := .Values.env }} + - name: {{ $key }} + value: '{{ $val }}' + {{- end}} + ports: + - name: http + containerPort: {{ .Values.service.port }} + protocol: TCP + resources: + {{- toYaml .Values.resources | nindent 12 }} + {{- with .Values.nodeSelector }} + nodeSelector: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.affinity }} + affinity: + {{- toYaml . | nindent 8 }} + {{- end }} + {{- with .Values.tolerations }} + tolerations: + {{- toYaml . | nindent 8 }} + {{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/hpa.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/hpa.yaml new file mode 100644 index 000000000..8944056ea --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/hpa.yaml @@ -0,0 +1,28 @@ +{{- if .Values.autoscaling.enabled }} +apiVersion: autoscaling/v2beta1 +kind: HorizontalPodAutoscaler +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + scaleTargetRef: + apiVersion: apps/v1 + kind: Deployment + name: {{ include "utilities.fullname" . }} + minReplicas: {{ .Values.autoscaling.minReplicas }} + maxReplicas: {{ .Values.autoscaling.maxReplicas }} + metrics: + {{- if .Values.autoscaling.targetCPUUtilizationPercentage }} + - type: Resource + resource: + name: cpu + targetAverageUtilization: {{ .Values.autoscaling.targetCPUUtilizationPercentage }} + {{- end }} + {{- if .Values.autoscaling.targetMemoryUtilizationPercentage }} + - type: Resource + resource: + name: memory + targetAverageUtilization: {{ .Values.autoscaling.targetMemoryUtilizationPercentage }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/ingress.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/ingress.yaml new file mode 100644 index 000000000..567cac846 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/ingress.yaml @@ -0,0 +1,61 @@ +{{- if .Values.ingress.enabled -}} +{{- $fullName := include "utilities.fullname" . -}} +{{- $svcPort := .Values.service.port -}} +{{- if and .Values.ingress.className (not (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion)) }} + {{- if not (hasKey .Values.ingress.annotations "kubernetes.io/ingress.class") }} + {{- $_ := set .Values.ingress.annotations "kubernetes.io/ingress.class" .Values.ingress.className}} + {{- end }} +{{- end }} +{{- if semverCompare ">=1.19-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1 +{{- else if semverCompare ">=1.14-0" .Capabilities.KubeVersion.GitVersion -}} +apiVersion: networking.k8s.io/v1beta1 +{{- else -}} +apiVersion: extensions/v1beta1 +{{- end }} +kind: Ingress +metadata: + name: {{ $fullName }} + labels: + {{- include "utilities.labels" . | nindent 4 }} + {{- with .Values.ingress.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +spec: + {{- if and .Values.ingress.className (semverCompare ">=1.18-0" .Capabilities.KubeVersion.GitVersion) }} + ingressClassName: {{ .Values.ingress.className }} + {{- end }} + {{- if .Values.ingress.tls }} + tls: + {{- range .Values.ingress.tls }} + - hosts: + {{- range .hosts }} + - {{ . | quote }} + {{- end }} + secretName: {{ .secretName }} + {{- end }} + {{- end }} + rules: + {{- range .Values.ingress.hosts }} + - host: {{ .host | quote }} + http: + paths: + {{- range .paths }} + - path: {{ .path }} + {{- if and .pathType (semverCompare ">=1.18-0" $.Capabilities.KubeVersion.GitVersion) }} + pathType: {{ .pathType }} + {{- end }} + backend: + {{- if semverCompare ">=1.19-0" $.Capabilities.KubeVersion.GitVersion }} + service: + name: {{ $fullName }} + port: + number: {{ $svcPort }} + {{- else }} + serviceName: {{ $fullName }} + servicePort: {{ $svcPort }} + {{- end }} + {{- end }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/service.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/service.yaml new file mode 100644 index 000000000..c9afad7d5 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/service.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Service +metadata: + name: {{ include "utilities.fullname" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} +spec: + type: {{ .Values.service.type }} + ports: + - port: {{ .Values.service.port }} + targetPort: http + protocol: TCP + name: http + selector: + {{- include "utilities.selectorLabels" . | nindent 4 }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml new file mode 100644 index 000000000..dd5c35012 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/serviceaccount.yaml @@ -0,0 +1,12 @@ +{{- if .Values.serviceAccount.create -}} +apiVersion: v1 +kind: ServiceAccount +metadata: + name: {{ include "utilities.serviceAccountName" . }} + labels: + {{- include "utilities.labels" . | nindent 4 }} + {{- with .Values.serviceAccount.annotations }} + annotations: + {{- toYaml . | nindent 4 }} + {{- end }} +{{- end }} diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml new file mode 100644 index 000000000..44b72f68d --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/templates/tests/test-connection.yaml @@ -0,0 +1,15 @@ +apiVersion: v1 +kind: Pod +metadata: + name: "{{ include "utilities.fullname" . }}-test-connection" + labels: + {{- include "utilities.labels" . | nindent 4 }} + annotations: + "helm.sh/hook": test +spec: + containers: + - name: wget + image: busybox + command: ['wget'] + args: ['{{ include "utilities.fullname" . }}:{{ .Values.service.port }}'] + restartPolicy: Never diff --git a/scripts/helm/helmcharts/openreplay/charts/utilities/values.yaml b/scripts/helm/helmcharts/openreplay/charts/utilities/values.yaml new file mode 100644 index 000000000..6c3e0056f --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/charts/utilities/values.yaml @@ -0,0 +1,85 @@ +# Default values for openreplay. +# This is a YAML-formatted file. +# Declare variables to be passed into your templates. + +replicaCount: 1 + +image: + repository: rg.fr-par.scw.cloud/foss/utilities + pullPolicy: IfNotPresent + # Overrides the image tag whose default is the chart appVersion. + tag: "" + +imagePullSecrets: [] +nameOverride: "utililities" +fullnameOverride: "utililities" + +serviceAccount: + # Specifies whether a service account should be created + create: true + # Annotations to add to the service account + annotations: {} + # The name of the service account to use. + # If not set and create is true, a name is generated using the fullname template + name: "" + +podAnnotations: {} + +podSecurityContext: {} + # fsGroup: 2000 + +securityContext: {} + # capabilities: + # drop: + # - ALL + # readOnlyRootFilesystem: true + # runAsNonRoot: true + # runAsUser: 1000 + +service: + type: ClusterIP + port: 9000 + +ingress: + enabled: false + className: "" + annotations: {} + # kubernetes.io/ingress.class: nginx + # kubernetes.io/tls-acme: "true" + hosts: + - host: chart-example.local + paths: + - path: / + pathType: ImplementationSpecific + tls: [] + # - secretName: chart-example-tls + # hosts: + # - chart-example.local + +resources: {} + # We usually recommend not to specify default resources and to leave this as a conscious + # choice for the user. This also increases chances charts run on environments with little + # resources, such as Minikube. If you do want to specify resources, uncomment the following + # lines, adjust them as necessary, and remove the curly braces after 'resources:'. + # limits: + # cpu: 100m + # memory: 128Mi + # requests: + # cpu: 100m + # memory: 128Mi + +autoscaling: + enabled: true + minReplicas: 1 + maxReplicas: 5 + targetCPUUtilizationPercentage: 80 + # targetMemoryUtilizationPercentage: 80 + +env: {} + + +nodeSelector: {} + +tolerations: [] + +affinity: {} diff --git a/scripts/helm/helmcharts/openreplay/files/clickhouse.sh b/scripts/helm/helmcharts/openreplay/files/clickhouse.sh new file mode 100644 index 000000000..80cb98fa3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/files/clickhouse.sh @@ -0,0 +1,38 @@ +#!/bin/bash + +set -e + +clickhousedir=/opt/openreplay/openreplay/scripts/helm/db/init_dbs/clickhouse + +function migrate() { + echo "Starting clickhouse migration" + migration_versions=$1 + for version in $migration_versions; do + echo "Migrating clickhouse version $version" + # For now, we can ignore the clickhouse db inject errors. + # TODO: Better error handling in script + clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < ${clickhousedir}/${version}/${version}.sql || true + done +} + +function init() { + echo "Initializing clickhouse" + for file in `ls ${clickhousedir}/create/*.sql`; do + echo "Injecting $file" + clickhouse-client -h clickhouse.db.svc.cluster.local --port 9000 < $file + done +} + +# /bin/bash clickhouse.sh migrate $migration_versions +case "$1" in + migrate) + migrate $2 + ;; + init) + init + ;; + *) + echo "Unknown operation for clickhouse migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helm/helmcharts/openreplay/files/dbops.sh b/scripts/helm/helmcharts/openreplay/files/dbops.sh new file mode 100644 index 000000000..542b6ffcf --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/files/dbops.sh @@ -0,0 +1,82 @@ +#!/bin/bash + +cd $(dirname $0) + +function migration() { + ls -la /opt/openreplay/openreplay + db=$1 + + # Checking if previous app version is set. + if [[ $PREVIOUS_APP_VERSION == "" ]]; then + echo "Previous app version to be migrated is not set. Rerun using --set fromVersion=v1.3.5" + exit 100 + fi + + if [[ $PREVIOUS_APP_VERSION == $CHART_APP_VERSION ]]; then + echo "No application version change. Not upgrading." + exit 0 + fi + + # Checking migration versions + cd /opt/openreplay/openreplay/scripts/helm + migration_versions=(`ls -l db/init_dbs/$db | grep -E ^d | awk -v number=${PREVIOUS_APP_VERSION} '$NF > number {print $NF}' | grep -v create`) + echo "Migration version: $migration_versions" + + cd - + + case "$1" in + postgresql) + /bin/bash postgresql.sh migrate $migration_versions + ;; + minio) + /bin/bash minio.sh migrate $migration_versions + ;; + clickhouse) + /bin/bash clickhouse.sh migrate $migration_versions + ;; + kafka) + /bin/bash kafka.sh migrate $migration_versions + ;; + *) + echo "Unknown operation for db migration; exiting." + exit 1 + ;; + esac +} + +function init(){ + case $1 in + postgresql) + /bin/bash postgresql.sh init + ;; + minio) + /bin/bash minio.sh migrate $migration_versions + ;; + clickhouse) + /bin/bash clickhouse.sh init + ;; + kafka) + /bin/bash kafka.sh init + ;; + *) + echo "Unknown operation for db init; exiting." + exit 1 + ;; + + esac +} + + +# dbops.sh true(upgrade) clickhouse +case "$1" in + "false") + init $2 + ;; + "true") + migration $2 + ;; + *) + echo "Unknown operation for db migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helm/helmcharts/openreplay/files/kafka.sh b/scripts/helm/helmcharts/openreplay/files/kafka.sh new file mode 100644 index 000000000..9105f3b00 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/files/kafka.sh @@ -0,0 +1,41 @@ +#!/bin/bash + +set -e + +topics=( + "raw" + "raw-ios" + "trigger" + "cache" + "analytics" + ) + +touch /tmp/config.txt + +if [[ $KAFKA_SSL == "true" ]]; then + echo 'securty.protocol=SSL' > /tmp/config.txt +fi + +function init() { + echo "Initializing kafka" + for topic in ${topics[*]}; do + echo "Creating topic: $topic" + # TODO: Have to check an idempotent way of creating topics. + kafka-topics.sh --create --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --replication-factor 2 --partitions 16 --topic ${topic} --command-config /tmp/config.txt || true + kafka-configs.sh --bootstrap-server ${KAFKA_HOST}:${KAFKA_PORT} --entity-type topics --alter --add-config retention.ms=3456000000 --entity-name=${topic} --command-config /tmp/config.txt || true + done +} + +# /bin/bash kafka.sh migrate $migration_versions +case "$1" in + migrate) + init + ;; + init) + init + ;; + *) + echo "Unknown operation for kafka migration; exiting." + exit 1 + ;; +esac diff --git a/scripts/helm/helmcharts/openreplay/files/minio.sh b/scripts/helm/helmcharts/openreplay/files/minio.sh new file mode 100644 index 000000000..2bb7682f3 --- /dev/null +++ b/scripts/helm/helmcharts/openreplay/files/minio.sh @@ -0,0 +1,56 @@ +#!/bin/bash + +set -e + + +cd /tmp + +buckets=("mobs" "sessions-assets" "static" "sourcemaps") + +mc alias set minio http://minio.db.svc.cluster.local:9000 $MINIO_ACCESS_KEY $MINIO_SECRET_KEY + +function init() { +echo "Initializing minio" + +for bucket in ${buckets[*]}; do +mc mb minio/${bucket} || true +mc ilm import minio/${bucket} < + # SAML2_MD_URL: '' + # idp_entityId: '' + # idp_sso_url: '' + # idp_x509cert: '' + # idp_sls_url: '' + # idp_name: '' + + +# If you want to override something +# chartname: +# filedFrom chart/Values.yaml: +# key: value + +# For example: +# +# alerts: +# resources: +# limits: +# cpu: 1 + +# nginx-ingress: +# +### If you want to redirect nginx http to https +# customServerConfigs: | +# return 301 https://$host$request_uri; +# +### Change the ssl certificates +# +### Public certificate ( content from site.crt, mind the indentation ) +# ssl_certificate: |- +# -----BEGIN CERTIFICATE----- +# MIIFITCCAwmgAwIBAgIUQ8hQoDbW3Z4DxRVjIYlIlbEHp/8wDQYJKoZIhvcNAQEL +# BQAwIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2NhbC5ob3N0MB4XDTIxMTIyMjA3 +# NDIxOVoXDTIyMTIyMjA3NDIxOVowIDEeMBwGA1UEAwwVb3BlbnJlcGxheS5sb2Nh +# -----END CERTIFICATE----- +# +### Private certificate ( content from site.key, mind the indentation. ) +# ssl_privatekey: |- +# -----BEGIN PRIVATE KEY----- +# TbXr+1+HXWQGs4Go63gpvhI/yzOScTTiuI88lbjM9QA/aDlZm2TlXdcB71PDtO5T +# e2Zw7SH2h7yLK6uP2FamVgUSe0rWf9zQmKTkFzJcgwelvuk7MHBMw4JSYeoB7dJP +# 3+FMchvzM1exCC/kNxTqvAyYWzdNPBIPSekHn1I9eEgr14cwZ+1RV9SK16uxsMT9 +# WnjLAoIBADKutRKB8nH+wD3sa4cP782QNbkDqJCcb3rPntnCWI/jA2TeY/wAvrXa +# 8yFtSSeYSwN9Wr+UosSkQ+OQSO0WmT2NrxdkH8jK8kYnzYkJ9+EFE2YpMN2UosSb +# esQ9oEMnivBMNv8DnB4IuO8LjTj1rhqcBmWJH1zvDi1Ur+/uAb+6XLm0Dp/59/Rn +# PSlLQmFraq6mrUkKTU40zyT6eK8AvIn/+sXAF1Xb9Vnm8Ndl+gZ4imzjcCubbq+6 +# PqvLjFJNGyya6b3MX4RSxVGfkIf5f6bcSSZ0zzSB3qLbCKS+JawwR1WF2rJp6Hj5 +# 7qINKoGovqXB1oAdopIl1z64e7MWVE4= +# -----END PRIVATE KEY----- diff --git a/scripts/helm/helmcharts/versionUpdater.sh b/scripts/helm/helmcharts/versionUpdater.sh new file mode 100644 index 000000000..5fc8230a2 --- /dev/null +++ b/scripts/helm/helmcharts/versionUpdater.sh @@ -0,0 +1,10 @@ +#!/bin/bash +# This script will update the version of openreplay components. +currentVersion=$1 +[[ -z $currentVersion ]] && { + echo "Usage: $0 " + echo "eg: $0 v1.5.0" +} +find ./openreplay -type f -iname chart.yaml -exec sed -i "s/AppVersion.*/AppVersion: \"$currentVersion\"/g" {} \; +sed -i "s/fromVersion.*/fromVersion: \"$currentVersion\"/g" vars.yaml +sed -i "s/version.*/version=\"$currentVersion\"/g" init.sh diff --git a/scripts/helm/kube-install.sh b/scripts/helm/kube-install.sh index 0a42416bf..056a45de4 100755 --- a/scripts/helm/kube-install.sh +++ b/scripts/helm/kube-install.sh @@ -147,7 +147,7 @@ function enterprise(){ enterprise=1 sed -i "s#enterprise_edition_license.*#enterprise_edition_license: \"${1}\"#g" vars.yaml # Updating image version to be ee - sed -i "s/\(image_tag.*[0-9]\)\"$/\1-ee\"/" vars.yaml + sed 's/\(image_tag.*[0-9]\)\(-pr\)\?"$/\1\2-ee"/' vars.yaml echo "Importing enterprise code..." cp -rf ../../ee/scripts/* ../ } diff --git a/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml b/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml index bf41a28c2..ea59aa82d 100644 --- a/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml +++ b/scripts/helm/nginx-ingress/nginx-ingress/templates/configmap.yaml @@ -62,7 +62,7 @@ data: proxy_set_header Upgrade $http_upgrade; proxy_set_header Connection "Upgrade"; proxy_set_header Host $host; - proxy_set_header X-Forwarded-Proto $scheme; + proxy_set_header X-Forwarded-Proto $origin_proto; proxy_pass http://chalice-openreplay.app.svc.cluster.local:8000; } location /assist/ { @@ -133,6 +133,10 @@ data: default upgrade; '' close; } + map $http_x_forwarded_proto $origin_proto { + default $http_x_forwarded_proto; + '' $scheme; + } server { listen 80 default_server; listen [::]:80 default_server; diff --git a/scripts/helm/openreplay-cli b/scripts/helm/openreplay-cli index 93478e2d6..0f229e93a 100755 --- a/scripts/helm/openreplay-cli +++ b/scripts/helm/openreplay-cli @@ -51,7 +51,8 @@ EOF [ -d | --status ] [ -v | --verbose ] [ -l | --logs SERVICE ] - [ -i | --install SERVICE ] + [ -i | --legacy-install SERVICE ] + [ -I | --helm-install SERVICE ] [ -s | --stop SERVICE|all ] [ -S | --start SERVICE|all ] [ -r | --restart SERVICE|all ]" @@ -103,10 +104,14 @@ restart() { kubectl rollout restart -n app deployment $1-openreplay } -install() { +legacyInstall() { bash kube-install.sh --app $1 } +helmInstall() { + helm upgrade --install openreplay helmcharts/openreplay -f helmcharts/vars.yaml +} + upgrade() { sed -i "s/tag:.*/ tag: 'latest'/g" ./app/$1.yaml } @@ -122,7 +127,7 @@ status() { [[ $# -eq 0 ]] && usage && exit 1 -PARSED_ARGUMENTS=$(color getopt -a -n openreplay-cli -o vhds:S:l:r:i: --long verbose,help,status,start:,stop:,logs:,restart:,install: -- "$@") +PARSED_ARGUMENTS=$(color getopt -a -n openreplay-cli -o vhds:S:l:r:i:I: --long verbose,help,status,start:,stop:,logs:,restart:,legacy-install:,helm-install: -- "$@") VALID_ARGUMENTS=$? if [[ "$VALID_ARGUMENTS" != "0" ]]; then usage @@ -139,7 +144,8 @@ do -S | --start) start $2 ; shift 2 ;; -l | --logs) logs "$2" ; shift 2 ;; -r | --restart) restart "$2" ; shift 2 ;; - -i | --install) install "$2" ; shift 2 ;; + -i | --legacy-install) legacyInstall "$2" ; shift 2 ;; + -I | --helm-install) helmInstall "$2" ; shift 2 ;; # -- means the end of the arguments; drop this, and break out of the while loop --) shift; break ;; # If invalid options were passed, then getopt should have reported an error, diff --git a/scripts/helm/roles/openreplay/templates/alerts.yaml b/scripts/helm/roles/openreplay/templates/alerts.yaml index b2a91832b..97178c1ce 100644 --- a/scripts/helm/roles/openreplay/templates/alerts.yaml +++ b/scripts/helm/roles/openreplay/templates/alerts.yaml @@ -4,8 +4,32 @@ image: tag: {{ image_tag }} {% endif %} env: + S3_KEY: "{{ minio_access_key }}" + S3_SECRET: "{{ minio_secret_key }}" + SITE_URL: "https://{{ domain_name }}" + pg_host: "{{ postgres_endpoint }}" + pg_port: "{{ postgres_port }}" + pg_dbname: "{{ postgres_db_name }}" + pg_user: "{{ postgres_db_user }}" + pg_password: "{{ postgres_db_password }}" + EMAIL_HOST: "{{ email_host }}" + EMAIL_PORT: "{{ email_port }}" + EMAIL_USER: "{{ email_user }}" + EMAIL_PASSWORD: "{{ email_password }}" + EMAIL_USE_TLS: "{{ email_use_tls }}" + EMAIL_USE_SSL: "{{ email_use_ssl }}" + EMAIL_SSL_KEY: "{{ email_ssl_key }}" + EMAIL_SSL_CERT: "{{ email_ssl_cert }}" + EMAIL_FROM: "{{ email_from }}" + AWS_DEFAULT_REGION: "{{ aws_region }}" LICENSE_KEY: "{{ enterprise_edition_license }}" - POSTGRES_STRING: "postgres://{{postgres_db_user}}:{{postgres_db_password}}@{{postgres_endpoint}}:{{postgres_port}}/{{ postgres_db_name }}" + # In case of minio, the instance is running inside kuberntes, + # which is accessible via nginx ingress. +{% if s3_endpoint == "http://minio.db.svc.cluster.local:9000" %} + S3_HOST: "https://{{ domain_name }}" +{% else %} + S3_HOST: "{{ s3_endpoint }}" +{% endif %} {% if not (docker_registry_username is defined and docker_registry_username and docker_registry_password is defined and docker_registry_password) %} imagePullSecrets: [] diff --git a/scripts/helm/upgrade.sh b/scripts/helm/upgrade.sh index f57d2fb07..3c437db9a 100644 --- a/scripts/helm/upgrade.sh +++ b/scripts/helm/upgrade.sh @@ -81,6 +81,7 @@ patch installation_type=1 if [[ ${ENTERPRISE} -eq 1 ]]; then cp -rf ../../ee/scripts/* ../../scripts/ + sed 's/\(image_tag.*[0-9]\)\(-pr\)\?"$/\1\2-ee"/' vars.yaml echo -e "Migrating clickhouse" migration clickhouse fi diff --git a/tracker/tracker-assist/README.md b/tracker/tracker-assist/README.md index f679bd8f6..0c7bfe00f 100644 --- a/tracker/tracker-assist/README.md +++ b/tracker/tracker-assist/README.md @@ -30,6 +30,9 @@ Options: { confirmText: string, confirmStyle: Object, + config: RTCConfiguration, + onAgentConnect: () => (()=>void | void), + onCallStart: () => (()=>void | void), } ``` Use `confirmText` option to specify a text in the call confirmation popup. @@ -41,4 +44,43 @@ You can specify its styles as well with `confirmStyle` style object. color: "orange" } -``` \ No newline at end of file +``` + +It is possible to pass `config` RTCConfiguration object in order to configure TURN server or other parameters. +```ts +config: { + iceServers: [{ + urls: "stun:stun.services.mozilla.com", + username: "louis@mozilla.com", + credential: "webrtcdemo" + }, { + urls: ["stun:stun.example.com", "stun:stun-1.example.com"] + }] +} + +``` + +You can pass `onAgentConnect` callback. It will be called when someone from OpenReplay UI connects to the current live session. It can return another function. In this case, returned callback will be called when the same agent connection gets closed. +```ts +onAgentConnect: () => { + console.log("Hello!") + const onAgentDisconnect = () => console.log("Bye!") + return onAgentDisconnect +} + +``` +Warning: it is possible for the same agent to be connected/disconnected several times during one session due to a bad network. Several agents may connect simultaneously. + + +A callback `onCallStart` will be fired when the end-user accepts the call. It can return another callback that will be called on the call end. +```ts +onCallStart: () => { + console.log("Allo!") + const onCallEnd = () => console.log("short beeps...") + return onCallEnd +} + +``` + + + diff --git a/tracker/tracker-assist/package.json b/tracker/tracker-assist/package.json index 345d42ecb..506f05e2d 100644 --- a/tracker/tracker-assist/package.json +++ b/tracker/tracker-assist/package.json @@ -1,7 +1,7 @@ { "name": "@openreplay/tracker-assist", "description": "Tracker plugin for screen assistance through the WebRTC", - "version": "3.4.9", + "version": "3.4.13", "keywords": [ "WebRTC", "assistance", diff --git a/tracker/tracker-assist/src/CallWindow.ts b/tracker/tracker-assist/src/CallWindow.ts index f22aa6a7a..1299008a8 100644 --- a/tracker/tracker-assist/src/CallWindow.ts +++ b/tracker/tracker-assist/src/CallWindow.ts @@ -1,4 +1,4 @@ -import type { LocalStream } from './LocalStream'; +import type { LocalStream } from './LocalStream.js'; const SS_START_TS_KEY = "__openreplay_assist_call_start_ts" @@ -207,14 +207,6 @@ export default class CallWindow { private toggleAudio() { const enabled = this.localStream?.toggleAudio() || false this.toggleAudioUI(enabled) - // if (!this.audioBtn) { return; } - // if (enabled) { - // this.audioBtn.classList.remove("muted"); - // this.audioBtn.childNodes[1].textContent = "Mute"; - // } else { - // this.audioBtn.classList.add("muted"); - // this.audioBtn.childNodes[1].textContent = "Unmute"; - // } } private toggleVideoUI(enabled: boolean) { diff --git a/tracker/tracker-assist/src/Mouse.ts b/tracker/tracker-assist/src/Mouse.ts index 51fb67e8e..d2c89cfe6 100644 --- a/tracker/tracker-assist/src/Mouse.ts +++ b/tracker/tracker-assist/src/Mouse.ts @@ -1,3 +1,5 @@ +type XY = [number, number] + export default class Mouse { private mouse: HTMLDivElement @@ -14,23 +16,90 @@ export default class Mouse { background: "radial-gradient(red, transparent)", }); document.body.appendChild(this.mouse); + + + window.addEventListener("scroll", this.handleWScroll) + window.addEventListener("resize", this.resetLastScrEl) } - move({x, y}: {x: number, y: number}) { - this.position = [x, y]; + move(pos: XY) { + if (this.position[0] !== pos[0] || this.position[1] !== pos[1]) { + this.resetLastScrEl() + } + + this.position = pos; Object.assign(this.mouse.style, { - left: `${x || 0}px`, - top: `${y || 0}px` + left: `${pos[0] || 0}px`, + top: `${pos[1] || 0}px` }) + } - getPosition(): [ number, number] { + getPosition(): XY { return this.position; } + click(pos: XY) { + const el = document.elementFromPoint(pos[0], pos[1]) + if (el instanceof HTMLElement) { + el.click() + el.focus() + } + } + + private readonly pScrEl = document.scrollingElement || document.documentElement + private lastScrEl: Element | "window" | null = null + private resetLastScrEl = () => { this.lastScrEl = null } + private handleWScroll = e => { + if (e.target !== this.lastScrEl) { + this.resetLastScrEl() + } + } + scroll(delta: XY) { + // what would be the browser-like logic? + const [mouseX, mouseY] = this.position + const [dX, dY] = delta + + let el = this.lastScrEl + // Scroll the same one + if (el instanceof Element) { + el.scrollLeft += dX + el.scrollTop += dY + return // TODO: if not scrolled + } + if (el === "window") { + window.scroll(this.pScrEl.scrollLeft + dX, this.pScrEl.scrollTop + dY) + return + } + + el = document.elementFromPoint( + mouseX-this.pScrEl.scrollLeft, + mouseY-this.pScrEl.scrollTop, + ) + while (el) { + //if(el.scrollWidth > el.clientWidth) // - This check doesn't work in common case + const esl = el.scrollLeft + el.scrollLeft += dX + const est = el.scrollTop + el.scrollTop += dY + if (esl !== el.scrollLeft || est !== el.scrollTop) { + this.lastScrEl = el + return + } else { + el = el.parentElement + } + } + + // If not scrolled + window.scroll(this.pScrEl.scrollLeft + dX, this.pScrEl.scrollTop + dY) + this.lastScrEl = "window" + } + remove() { if (this.mouse.parentElement) { document.body.removeChild(this.mouse); } + window.removeEventListener("scroll", this.handleWScroll) + window.removeEventListener("resize", this.resetLastScrEl) } } \ No newline at end of file diff --git a/tracker/tracker-assist/src/index.ts b/tracker/tracker-assist/src/index.ts index c1e452a34..74514378a 100644 --- a/tracker/tracker-assist/src/index.ts +++ b/tracker/tracker-assist/src/index.ts @@ -11,11 +11,13 @@ import ConfirmWindow from './ConfirmWindow.js'; import RequestLocalStream from './LocalStream.js'; export interface Options { + onAgentConnect: () => ((()=>{}) | void), + onCallStart: () => ((()=>{}) | void), confirmText: string, confirmStyle: Object, // Styles object session_calling_peer_key: string, config: RTCConfiguration, - __messages_per_send?: number, + // __messages_per_send?: number, } enum CallingState { @@ -25,7 +27,7 @@ enum CallingState { }; //@ts-ignore peerjs hack for webpack5 (?!) TODO: ES/node modules; -Peer = Peer.default || Peer; +//Peer = Peer.default || Peer; // type IncomeMessages = // "call_end" | @@ -40,6 +42,8 @@ export default function(opts?: Partial) { confirmStyle: {}, session_calling_peer_key: "__openreplay_calling_peer", config: null, + onCallStart: ()=>{}, + onAgentConnect: ()=>{}, }, opts, ); @@ -98,8 +102,11 @@ export default function(opts?: Partial) { log('Connection opened.') assistDemandedRestart = true; app.stop(); - openDataConnections[conn.peer] = new BufferingConnection(conn, options.__messages_per_send) + openDataConnections[conn.peer] = new BufferingConnection(conn) + + const onAgentDisconnect = options.onAgentConnect(); conn.on('close', () => { + onAgentDisconnect && onAgentDisconnect(); log("Connection close: ", conn.peer) delete openDataConnections[conn.peer] // TODO: check if works properly }) @@ -136,8 +143,8 @@ export default function(opts?: Partial) { let confirmAnswer: Promise - const peerOnCall = sessionStorage.getItem(options.session_calling_peer_key) - if (peerOnCall === call.peer) { + const callingPeer = sessionStorage.getItem(options.session_calling_peer_key) + if (callingPeer === call.peer) { confirmAnswer = Promise.resolve(true) } else { setCallingState(CallingState.Requesting); @@ -161,10 +168,13 @@ export default function(opts?: Partial) { return } + const onCallEnd = options.onCallStart() + const mouse = new Mouse() let callUI = new CallWindow() - const onCallEnd = () => { + const handleCallEnd = () => { + onCallEnd && onCallEnd() mouse.remove(); callUI.remove(); setCallingState(CallingState.False); @@ -173,10 +183,10 @@ export default function(opts?: Partial) { log("initiateCallEnd") call.close() notifyCallEnd(); - onCallEnd(); + handleCallEnd(); } RequestLocalStream().then(lStream => { - dataConn.on("close", onCallEnd); // For what case? + dataConn.on("close", handleCallEnd); // For what case? //call.on('close', onClose); // Works from time to time (peerjs bug) const checkConnInterval = setInterval(() => { if (!dataConn.open) { @@ -184,7 +194,7 @@ export default function(opts?: Partial) { clearInterval(checkConnInterval); } if (!call.open) { - onCallEnd(); + handleCallEnd(); clearInterval(checkConnInterval); } }, 3000); @@ -202,46 +212,22 @@ export default function(opts?: Partial) { document.addEventListener("click", onInteraction) }); dataConn.on('data', (data: any) => { + log("Income data: ", data) if (!data) { return } if (data === "call_end") { - log('"call_end" received') - onCallEnd(); - return; + return handleCallEnd(); } if (data.name === 'string') { - log("Name received: ", data) - callUI.setAssistentName(data.name); + return callUI.setAssistentName(data.name); } if (data.type === "scroll" && Array.isArray(data.delta)) { - const scrEl = document.scrollingElement || document.documentElement - const [mouseX, mouseY] = mouse.getPosition() - const [dX, dY] = data.delta; - const el = document.elementFromPoint(mouseX-scrEl.scrollLeft, mouseY-scrEl.scrollTop) - let scrolled = false // what would be the browser-like logic? - if (el) { - if(el.scrollWidth > el.clientWidth) { - el.scrollLeft += data.delta[0] - scrolled = true - } - if (el && el.scrollHeight > el.clientHeight) { - el.scrollTop += data.delta[1] - scrolled = true - } - } - if (!scrolled) { - window.scroll(scrEl.scrollLeft + data.delta[0], scrEl.scrollTop + data.delta[1]) - } + return mouse.scroll(data.delta) } if (data.type === "click" && typeof data.x === 'number' && typeof data.y === 'number') { - const el = document.elementFromPoint(data.x, data.y) - if (el instanceof HTMLElement) { - el.click() - el.focus() - } - return + return mouse.click([ data.x, data.y ]) } if (typeof data.x === 'number' && typeof data.y === 'number') { - mouse.move(data); + return mouse.move([ data.x, data.y ]) } }); @@ -262,7 +248,7 @@ export default function(opts?: Partial) { }) .catch(e => { warn("Audio mediadevice request error:", e) - onCallEnd() + handleCallEnd() }); }).catch(); // in case of Confirm.remove() without any confirmation });