Merge branch 'dev' of github.com:openreplay/openreplay into custom-metrics-ui

This commit is contained in:
Shekar Siri 2022-01-26 17:05:33 +05:30
commit 96c926db22
560 changed files with 44055 additions and 5184 deletions

View file

@ -1,51 +0,0 @@
name: S3 Deploy EE
on:
push:
branches:
- dev
paths:
- ee/frontend/**
jobs:
build:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Cache node modules
uses: actions/cache@v1
with:
path: node_modules
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.OS }}-build-
${{ runner.OS }}-
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Install
run: npm install
- name: Build and deploy
run: |
cd frontend
bash build.sh
cp -arl public frontend
minio_pod=$(kubectl get po -n db -l app.kubernetes.io/name=minio -n db --output custom-columns=name:.metadata.name | tail -n+2)
echo $minio_pod
echo copying frontend to container.
kubectl -n db cp frontend $minio_pod:/data/
rm -rf frontend
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# AWS_REGION: eu-central-1
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}

View file

@ -1,4 +1,4 @@
name: S3 Deploy
name: Frontend FOSS Deployment
on:
push:
branches:
@ -27,8 +27,8 @@ jobs:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Install
run: npm install
# - name: Install
# run: npm install
- name: Build and deploy
run: |

View file

@ -1,6 +1,6 @@
MIT License
Copyright (c) 2021 Asayer SAS.
Copyright (c) 2022 Asayer SAS.
Portions of this software are licensed as follows:

View file

@ -1,67 +0,0 @@
{
"version": "2.0",
"app_name": "parrot",
"environment_variables": {
},
"stages": {
"default-foss": {
"api_gateway_stage": "default-fos",
"manage_iam_role": false,
"iam_role_arn": "",
"autogen_policy": true,
"environment_variables": {
"isFOS": "true",
"isEE": "false",
"stage": "default-foss",
"jwt_issuer": "openreplay-default-foss",
"sentryURL": "",
"pg_host": "postgresql.db.svc.cluster.local",
"pg_port": "5432",
"pg_dbname": "postgres",
"pg_user": "postgres",
"pg_password": "asayerPostgres",
"alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s",
"email_signup": "http://127.0.0.1:8000/async/email_signup/%s",
"email_funnel": "http://127.0.0.1:8000/async/funnel/%s",
"email_basic": "http://127.0.0.1:8000/async/basic/%s",
"assign_link": "http://127.0.0.1:8000/async/email_assignment",
"captcha_server": "",
"captcha_key": "",
"sessions_bucket": "mobs",
"sessions_region": "us-east-1",
"put_S3_TTL": "20",
"sourcemaps_reader": "http://0.0.0.0:9000/sourcemaps",
"sourcemaps_bucket": "sourcemaps",
"js_cache_bucket": "sessions-assets",
"peers": "http://0.0.0.0:9000/assist/peers",
"async_Token": "",
"EMAIL_HOST": "",
"EMAIL_PORT": "587",
"EMAIL_USER": "",
"EMAIL_PASSWORD": "",
"EMAIL_USE_TLS": "true",
"EMAIL_USE_SSL": "false",
"EMAIL_SSL_KEY": "",
"EMAIL_SSL_CERT": "",
"EMAIL_FROM": "OpenReplay<do-not-reply@openreplay.com>",
"SITE_URL": "",
"announcement_url": "",
"jwt_secret": "",
"jwt_algorithm": "HS512",
"jwt_exp_delta_seconds": "2592000",
"S3_HOST": "",
"S3_KEY": "",
"S3_SECRET": "",
"invitation_link": "/api/users/invitation?token=%s",
"change_password_link": "/reset-password?invitation=%s&&pass=%s",
"version_number": "1.3.5"
},
"lambda_timeout": 150,
"lambda_memory_size": 400,
"subnet_ids": [
],
"security_group_ids": [
]
}
}
}

View file

@ -1,68 +0,0 @@
{
"version": "2.0",
"app_name": "parrot",
"environment_variables": {
},
"stages": {
"default-foss": {
"api_gateway_stage": "default-fos",
"manage_iam_role": false,
"iam_role_arn": "",
"autogen_policy": true,
"environment_variables": {
"isFOS": "true",
"isEE": "false",
"stage": "default-foss",
"jwt_issuer": "openreplay-default-foss",
"sentryURL": "",
"pg_host": "postgresql.db.svc.cluster.local",
"pg_port": "5432",
"pg_dbname": "postgres",
"pg_user": "postgres",
"pg_password": "asayerPostgres",
"alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s",
"email_signup": "http://127.0.0.1:8000/async/email_signup/%s",
"email_funnel": "http://127.0.0.1:8000/async/funnel/%s",
"email_basic": "http://127.0.0.1:8000/async/basic/%s",
"assign_link": "http://127.0.0.1:8000/async/email_assignment",
"captcha_server": "",
"captcha_key": "",
"sessions_bucket": "mobs",
"sessions_region": "us-east-1",
"put_S3_TTL": "20",
"sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps",
"sourcemaps_bucket": "sourcemaps",
"js_cache_bucket": "sessions-assets",
"peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers",
"async_Token": "",
"EMAIL_HOST": "",
"EMAIL_PORT": "587",
"EMAIL_USER": "",
"EMAIL_PASSWORD": "",
"EMAIL_USE_TLS": "true",
"EMAIL_USE_SSL": "false",
"EMAIL_SSL_KEY": "",
"EMAIL_SSL_CERT": "",
"EMAIL_FROM": "OpenReplay<do-not-reply@openreplay.com>",
"SITE_URL": "",
"announcement_url": "",
"jwt_secret": "",
"jwt_algorithm": "HS512",
"jwt_exp_delta_seconds": "2592000",
"S3_HOST": "",
"S3_KEY": "",
"S3_SECRET": "",
"invitation_link": "/api/users/invitation?token=%s",
"change_password_link": "/reset-password?invitation=%s&&pass=%s",
"iosBucket": "openreplay-ios-images",
"version_number": "1.3.6"
},
"lambda_timeout": 150,
"lambda_memory_size": 400,
"subnet_ids": [
],
"security_group_ids": [
]
}
}
}

View file

@ -34,6 +34,8 @@ pg_host=postgresql.db.svc.cluster.local
pg_password=asayerPostgres
pg_port=5432
pg_user=postgres
pg_timeout=30
pg_minconn=45
put_S3_TTL=20
sentryURL=
sessions_bucket=mobs

18
api/Dockerfile.alerts Normal file
View file

@ -0,0 +1,18 @@
FROM python:3.9.7-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env && mv app_alerts.py app.py
ENV pg_minconn 2
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh

View file

@ -1,4 +1,7 @@
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from starlette.responses import StreamingResponse
@ -60,5 +63,8 @@ Schedule.start()
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
Schedule.add_job(id=job["func"].__name__, **job)
# for job in Schedule.get_jobs():
# print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
for job in Schedule.get_jobs():
print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))

27
api/app_alerts.py Normal file
View file

@ -0,0 +1,27 @@
import logging
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI
from chalicelib.core import alerts_processor
app = FastAPI()
print("============= ALERTS =============")
@app.get("/")
async def root():
return {"status": "Running"}
app.schedule = AsyncIOScheduler()
app.schedule.start()
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
"minutes": config("ALERTS_INTERVAL", cast=int, default=5),
"misfire_grace_time": 20})
for job in app.schedule.get_jobs():
print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))

View file

@ -22,7 +22,6 @@ function build_api(){
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -rf ../ee/api/* ./
cp -rf ../ee/api/.chalice/* ./.chalice/
envarg="default-ee"
tag="ee-"
}
@ -31,8 +30,9 @@ function build_api(){
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
}
}
}
check_prereq
build_api $1
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1

70
api/build_alerts.sh Normal file
View file

@ -0,0 +1,70 @@
#!/bin/bash
# Script to build alerts module
# flags to accept:
# envarg: build for enterprise edition.
# Default will be OSS build.
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
function make_submodule() {
[[ $1 != "ee" ]] && {
# -- this part was generated by modules_lister.py --
mkdir alerts
cp -R ./{app_alerts,schemas}.py ./alerts/
mkdir -p ./alerts/chalicelib/
cp -R ./chalicelib/__init__.py ./alerts/chalicelib/
mkdir -p ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,assist,events_ios,sessions_mobs,errors,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
mkdir -p ./alerts/chalicelib/utils/
cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,email_helper,email_handler,smtp,s3,metrics_helper}.py ./alerts/chalicelib/utils/
# -- end of generated part
}
[[ $1 == "ee" ]] && {
# -- this part was generated by modules_lister.py --
mkdir alerts
cp -R ./{app_alerts,schemas,schemas_ee}.py ./alerts/
mkdir -p ./alerts/chalicelib/
cp -R ./chalicelib/__init__.py ./alerts/chalicelib/
mkdir -p ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,dashboard,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
mkdir -p ./alerts/chalicelib/utils/
cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,SAML2_helper,email_helper,email_handler,smtp,s3,args_transformer,ch_client,metrics_helper}.py ./alerts/chalicelib/utils/
# -- end of generated part
}
cp -R ./{Dockerfile.alerts,requirements.txt,.env.default,entrypoint.sh} ./alerts/
cp -R ./chalicelib/utils/html ./alerts/chalicelib/utils/html
}
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
envarg="default-foss"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
}
[[ exit -eq 1 ]] && exit 1
}
function build_api(){
tag=""
# Copy enterprise code
[[ $1 == "ee" ]] && {
cp -rf ../ee/api/* ./
envarg="default-ee"
tag="ee-"
}
make_submodule $1
cd alerts
docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
cd ..
rm -rf alerts
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
}
}
check_prereq
build_api $1

View file

@ -1,14 +1,12 @@
import json
import logging
import time
from fastapi import BackgroundTasks
import schemas
from chalicelib.core import notifications, slack, webhook
from chalicelib.utils import pg_client, helper, email_helper
from chalicelib.utils.TimeUTC import TimeUTC
ALLOW_UPDATE = ["name", "description", "active", "detectionMethod", "query", "options"]
def get(id):
with pg_client.PostgresClient() as cur:
@ -38,34 +36,6 @@ def get_all(project_id):
return all
SUPPORTED_THRESHOLD = [15, 30, 60, 120, 240, 1440]
def __transform_structure(data):
if data.get("options") is None:
return f"Missing 'options'", None
if data["options"].get("currentPeriod") not in SUPPORTED_THRESHOLD:
return f"Unsupported currentPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None
if data["options"].get("previousPeriod", 15) not in SUPPORTED_THRESHOLD:
return f"Unsupported previousPeriod, please provide one of these values {SUPPORTED_THRESHOLD}", None
if data["options"].get("renotifyInterval") is None:
data["options"]["renotifyInterval"] = 720
data["query"]["right"] = float(data["query"]["right"])
data["query"] = json.dumps(data["query"])
data["description"] = data["description"] if data.get("description") is not None and len(
data["description"]) > 0 else None
if data.get("options"):
messages = []
for m in data["options"].get("message", []):
if m.get("value") is None:
continue
m["value"] = str(m["value"])
messages.append(m)
data["options"]["message"] = messages
data["options"] = json.dumps(data["options"])
return None, data
def __process_circular(alert):
if alert is None:
return None
@ -74,15 +44,16 @@ def __process_circular(alert):
return alert
def create(project_id, data):
err, data = __transform_structure(data)
if err is not None:
return {"errors": [err]}
def create(project_id, data: schemas.AlertSchema):
data = data.dict()
data["query"] = json.dumps(data["query"])
data["options"] = json.dumps(data["options"])
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options)
VALUES (%(project_id)s, %(name)s, %(description)s, %(detectionMethod)s, %(query)s, %(options)s::jsonb)
INSERT INTO public.alerts(project_id, name, description, detection_method, query, options, series_id)
VALUES (%(project_id)s, %(name)s, %(description)s, %(detection_method)s, %(query)s, %(options)s::jsonb, %(series_id)s)
RETURNING *;""",
{"project_id": project_id, **data})
)
@ -90,29 +61,30 @@ def create(project_id, data):
return {"data": helper.dict_to_camel_case(__process_circular(a))}
def update(id, changes):
changes = {k: changes[k] for k in changes.keys() if k in ALLOW_UPDATE}
err, changes = __transform_structure(changes)
if err is not None:
return {"errors": [err]}
updateq = []
for k in changes.keys():
updateq.append(f"{helper.key_to_snake_case(k)} = %({k})s")
if len(updateq) == 0:
return {"errors": ["nothing to update"]}
def update(id, data: schemas.AlertSchema):
data = data.dict()
data["query"] = json.dumps(data["query"])
data["options"] = json.dumps(data["options"])
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""\
query = cur.mogrify("""\
UPDATE public.alerts
SET {", ".join(updateq)}
SET name = %(name)s,
description = %(description)s,
active = TRUE,
detection_method = %(detection_method)s,
query = %(query)s,
options = %(options)s,
series_id = %(series_id)s
WHERE alert_id =%(id)s AND deleted_at ISNULL
RETURNING *;""",
{"id": id, **changes})
{"id": id, **data})
cur.execute(query=query)
a = helper.dict_to_camel_case(cur.fetchone())
return {"data": __process_circular(a)}
def process_notifications(data, background_tasks: BackgroundTasks):
def process_notifications(data):
full = {}
for n in data:
if "message" in n["options"]:
@ -133,15 +105,26 @@ def process_notifications(data, background_tasks: BackgroundTasks):
BATCH_SIZE = 200
for t in full.keys():
for i in range(0, len(full[t]), BATCH_SIZE):
# helper.async_post(config('alert_ntf') % t, {"notifications": full[t][i:i + BATCH_SIZE]})
notifications_list = full[t][i:i + BATCH_SIZE]
if t == "slack":
background_tasks.add_task(slack.send_batch, notifications_list=notifications_list)
try:
slack.send_batch(notifications_list=notifications_list)
except Exception as e:
logging.error("!!!Error while sending slack notifications batch")
logging.error(str(e))
elif t == "email":
background_tasks.add_task(send_by_email_batch, notifications_list=notifications_list)
try:
send_by_email_batch(notifications_list=notifications_list)
except Exception as e:
logging.error("!!!Error while sending email notifications batch")
logging.error(str(e))
elif t == "webhook":
background_tasks.add_task(webhook.trigger_batch, data_list=notifications_list)
try:
webhook.trigger_batch(data_list=notifications_list)
except Exception as e:
logging.error("!!!Error while sending webhook notifications batch")
logging.error(str(e))
def send_by_email(notification, destination):
@ -175,3 +158,13 @@ def delete(project_id, alert_id):
{"alert_id": alert_id, "project_id": project_id})
)
return {"data": {"state": "success"}}
def get_predefined_values():
values = [e.value for e in schemas.AlertColumn]
values = [{"name": v, "value": v,
"unit": "count" if v.endswith(".count") else "ms",
"predefined": True,
"metricId": None,
"seriesId": None} for v in values]
return values

View file

@ -0,0 +1,27 @@
from chalicelib.utils import pg_client, helper
def get_all_alerts():
with pg_client.PostgresClient(long_query=True) as cur:
query = """SELECT -1 AS tenant_id,
alert_id,
project_id,
detection_method,
query,
options,
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
alerts.name,
alerts.series_id,
filter
FROM public.alerts
LEFT JOIN metric_series USING (series_id)
INNER JOIN projects USING (project_id)
WHERE alerts.deleted_at ISNULL
AND alerts.active
AND projects.active
AND projects.deleted_at ISNULL
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
ORDER BY alerts.created_at;"""
cur.execute(query=query)
all_alerts = helper.list_to_camel_case(cur.fetchall())
return all_alerts

View file

@ -0,0 +1,250 @@
import decimal
import logging
import schemas
from chalicelib.core import alerts_listener
from chalicelib.core import sessions, alerts
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
LeftToDb = {
schemas.AlertColumn.performance__dom_content_loaded__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "COALESCE(AVG(NULLIF(dom_content_loaded_time ,0)),0)"},
schemas.AlertColumn.performance__first_meaningful_paint__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "COALESCE(AVG(NULLIF(first_contentful_paint_time,0)),0)"},
schemas.AlertColumn.performance__page_load_time__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(load_time ,0))"},
schemas.AlertColumn.performance__dom_build_time__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(dom_building_time,0))"},
schemas.AlertColumn.performance__speed_index__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)", "formula": "AVG(NULLIF(speed_index,0))"},
schemas.AlertColumn.performance__page_response_time__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(response_time,0))"},
schemas.AlertColumn.performance__ttfb__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(first_paint_time,0))"},
schemas.AlertColumn.performance__time_to_render__average: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(visually_complete,0))"},
schemas.AlertColumn.performance__image_load_time__average: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"},
schemas.AlertColumn.performance__request_load_time__average: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"},
schemas.AlertColumn.resources__load_time__average: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))"},
schemas.AlertColumn.resources__missing__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE"},
schemas.AlertColumn.errors__4xx_5xx__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
"condition": "status/100!=2"},
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
schemas.AlertColumn.performance__crashes__count: {
"table": "(SELECT *, start_ts AS timestamp FROM public.sessions WHERE errors_count > 0) AS sessions",
"formula": "COUNT(DISTINCT session_id)", "condition": "errors_count > 0"},
schemas.AlertColumn.errors__javascript__count: {
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "source='js_exception'", "joinSessions": False},
schemas.AlertColumn.errors__backend__count: {
"table": "events.errors INNER JOIN public.errors AS m_errors USING (error_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
}
# This is the frequency of execution for each threshold
TimeInterval = {
15: 3,
30: 5,
60: 10,
120: 20,
240: 30,
1440: 60,
}
def can_check(a) -> bool:
now = TimeUTC.now()
repetitionBase = a["options"]["currentPeriod"] \
if a["detectionMethod"] == schemas.AlertDetectionMethod.change \
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
else a["options"]["previousPeriod"]
if TimeInterval.get(repetitionBase) is None:
logging.error(f"repetitionBase: {repetitionBase} NOT FOUND")
return False
return (a["options"]["renotifyInterval"] <= 0 or
a["options"].get("lastNotification") is None or
a["options"]["lastNotification"] <= 0 or
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
def Build(a):
params = {"project_id": a["projectId"]}
full_args = {}
j_s = True
if a["seriesId"] is not None:
a["filter"]["sort"] = "session_id"
a["filter"]["order"] = "DESC"
a["filter"]["startDate"] = -1
a["filter"]["endDate"] = TimeUTC.now()
full_args, query_part, sort = sessions.search_query_parts(
data=schemas.SessionsSearchPayloadSchema.parse_obj(a["filter"]),
error_status=None, errors_only=False,
favorite_only=False, issue=None, project_id=a["projectId"],
user_id=None)
subQ = f"""SELECT COUNT(session_id) AS value
{query_part}"""
else:
colDef = LeftToDb[a["query"]["left"]]
subQ = f"""SELECT {colDef["formula"]} AS value
FROM {colDef["table"]}
WHERE project_id = %(project_id)s
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
j_s = colDef.get("joinSessions", True)
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
# if len(colDef.group) > 0 {
# subQ = subQ.Column(colDef.group + " AS group_value")
# subQ = subQ.GroupBy(colDef.group)
# q = q.Column("group_value")
# }
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
if a["seriesId"] is not None:
q += f""" FROM ({subQ}) AS stat"""
else:
q += f""" FROM ({subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}) AS stat"""
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
else:
if a["options"]["change"] == schemas.AlertDetectionChangeType.change:
# if len(colDef.group) > 0:
# subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60))
# sub2, args2, _ := subQ.Where(
# sq.And{
# sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60),
# sq.Expr("timestamp>=$4 ", time.Now().Unix()-2 * a.Options.CurrentPeriod * 60),
# }).ToSql()
# sub1 := sq.Select("group_value", "(stat1.value-stat2.value) AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
# q = q.FromSelect(sub1, "stat")
# else:
if a["seriesId"] is not None:
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
sub1 = f"SELECT (({subQ})-({sub2})) AS value"
q += f" FROM ( {sub1} ) AS stat"
params = {**params, **full_args,
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
else:
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND timestamp<%(startDate)s
AND timestamp>=%(timestamp_sub2)s
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
q += f" FROM ( {sub1} ) AS stat"
else:
# if len(colDef.group) >0 {
# subq1 := subQ.Where(sq.Expr("timestamp>=$2 ", time.Now().Unix()-a.Options.CurrentPeriod * 60))
# sub2, args2, _ := subQ.Where(
# sq.And{
# sq.Expr("timestamp<$3 ", time.Now().Unix()-a.Options.CurrentPeriod * 60),
# sq.Expr("timestamp>=$4 ", time.Now().Unix()-a.Options.PreviousPeriod * 60-a.Options.CurrentPeriod * 60),
# }).ToSql()
# sub1 := sq.Select("group_value", "(stat1.value/stat2.value-1)*100 AS value").FromSelect(subq1, "stat1").JoinClause("INNER JOIN ("+sub2+") AS stat2 USING(group_value)", args2...)
# q = q.FromSelect(sub1, "stat")
# } else {
if a["seriesId"] is not None:
sub2 = subQ.replace("%(startDate)s", "%(timestamp_sub2)s").replace("%(endDate)s", "%(startDate)s")
sub1 = f"SELECT (({subQ})/NULLIF(({sub2}),0)-1)*100 AS value"
q += f" FROM ({sub1}) AS stat"
params = {**params, **full_args,
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
"timestamp_sub2": TimeUTC.now() \
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
* 60 * 1000}
else:
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}"""
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
sub2 = f"""{subQ} AND timestamp<%(startDate)s
AND timestamp>=%(timestamp_sub2)s
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
params["timestamp_sub2"] = TimeUTC.now() \
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
q += f" FROM ({sub1}) AS stat"
return q, params
def process():
notifications = []
all_alerts = alerts_listener.get_all_alerts()
with pg_client.PostgresClient() as cur:
for alert in all_alerts:
if can_check(alert):
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
query, params = Build(alert)
query = cur.mogrify(query, params)
logging.debug(alert)
logging.debug(query)
try:
cur.execute(query)
result = cur.fetchone()
if result["valid"]:
logging.info("Valid alert, notifying users")
notifications.append({
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
"description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
"options": {"source": "ALERT", "sourceId": alert["alertId"],
"sourceMeta": alert["detectionMethod"],
"message": alert["options"]["message"], "projectId": alert["projectId"],
"data": {"title": alert["name"],
"limitValue": alert["query"]["right"],
"actualValue": float(result["value"]) \
if isinstance(result["value"], decimal.Decimal) \
else result["value"],
"operator": alert["query"]["operator"],
"trigger": alert["query"]["left"],
"alertId": alert["alertId"],
"detectionMethod": alert["detectionMethod"],
"currentPeriod": alert["options"]["currentPeriod"],
"previousPeriod": alert["options"]["previousPeriod"],
"createdAt": TimeUTC.now()}},
})
except Exception as e:
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
logging.error(str(e))
logging.error(query)
if len(notifications) > 0:
cur.execute(
cur.mogrify(f"""UPDATE public.Alerts
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:
alerts.process_notifications(notifications)

View file

@ -1,3 +1,4 @@
import schemas
from chalicelib.utils import pg_client, helper
from chalicelib.core import projects, sessions, sessions_metas
import requests
@ -44,7 +45,7 @@ def get_live_sessions(project_id, filters=None):
continue
filter_type = f["type"].upper()
f["value"] = sessions.__get_sql_value_multiple(f["value"])
if filter_type == sessions_metas.meta_type.USERID:
if filter_type == schemas.FilterType.user_id:
op = sessions.__get_sql_operator(f["operator"])
extra_constraints.append(f"user_id {op} %(value_{i})s")
extra_params[f"value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op)

View file

@ -0,0 +1,232 @@
import json
import schemas
from chalicelib.core import sessions
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
def try_live(project_id, data: schemas.TryCustomMetricsSchema):
results = []
for s in data.series:
s.filter.startDate = data.startDate
s.filter.endDate = data.endDate
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
view_type=data.viewType))
if data.viewType == schemas.MetricViewType.progress:
r = {"count": results[-1]}
diff = s.filter.endDate - s.filter.startDate
s.filter.startDate = data.endDate
s.filter.endDate = data.endDate - diff
r["previousCount"] = sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
view_type=data.viewType)
r["countProgress"] = helper.__progress(old_val=r["previousCount"], new_val=r["count"])
results[-1] = r
return results
def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPayloadSchema):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.TryCustomMetricsSchema = schemas.TryCustomMetricsSchema.parse_obj({**data.dict(), **metric})
return try_live(project_id=project_id, data=metric)
def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema):
with pg_client.PostgresClient() as cur:
_data = {}
for i, s in enumerate(data.series):
for k in s.dict().keys():
_data[f"{k}_{i}"] = s.__getattribute__(k)
_data[f"index_{i}"] = i
_data[f"filter_{i}"] = s.filter.json()
series_len = len(data.series)
data.series = None
params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data}
query = cur.mogrify(f"""\
WITH m AS (INSERT INTO metrics (project_id, user_id, name)
VALUES (%(project_id)s, %(user_id)s, %(name)s)
RETURNING *)
INSERT
INTO metric_series(metric_id, index, name, filter)
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
for i in range(series_len)])}
RETURNING metric_id;""", params)
cur.execute(
query
)
r = cur.fetchone()
return {"data": get(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
def __get_series_id(metric_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT series_id
FROM metric_series
WHERE metric_series.metric_id = %(metric_id)s
AND metric_series.deleted_at ISNULL;""",
{"metric_id": metric_id}
)
)
rows = cur.fetchall()
return [r["series_id"] for r in rows]
def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSchema):
series_ids = __get_series_id(metric_id)
n_series = []
d_series_ids = []
u_series = []
u_series_ids = []
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
"user_id": user_id, "project_id": project_id}
for i, s in enumerate(data.series):
prefix = "u_"
if s.series_id is None:
n_series.append({"i": i, "s": s})
prefix = "n_"
else:
u_series.append({"i": i, "s": s})
u_series_ids.append(s.series_id)
ns = s.dict()
for k in ns.keys():
if k == "filter":
ns[k] = json.dumps(ns[k])
params[f"{prefix}{k}_{i}"] = ns[k]
for i in series_ids:
if i not in u_series_ids:
d_series_ids.append(i)
params["d_series_ids"] = tuple(d_series_ids)
with pg_client.PostgresClient() as cur:
sub_queries = []
if len(n_series) > 0:
sub_queries.append(f"""\
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
for s in n_series])}
RETURNING 1)""")
if len(u_series) > 0:
sub_queries.append(f"""\
u AS (UPDATE metric_series
SET name=series.name,
filter=series.filter,
index=series.index
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
for s in u_series])}) AS series(series_id, index, name, filter)
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
RETURNING 1)""")
if len(d_series_ids) > 0:
sub_queries.append("""\
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
RETURNING 1)""")
query = cur.mogrify(f"""\
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
UPDATE metrics
SET name = %(name)s, is_public= %(is_public)s
WHERE metric_id = %(metric_id)s
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)
RETURNING metric_id;""", params)
cur.execute(
query
)
return get(metric_id=metric_id, project_id=project_id, user_id=user_id)
def get_all(project_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT *
FROM metrics
LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series
FROM metric_series
WHERE metric_series.metric_id = metrics.metric_id
AND metric_series.deleted_at ISNULL
) AS metric_series ON (TRUE)
WHERE metrics.project_id = %(project_id)s
AND metrics.deleted_at ISNULL
AND (user_id = %(user_id)s OR is_public)
ORDER BY created_at;""",
{"project_id": project_id, "user_id": user_id}
)
)
rows = cur.fetchall()
for r in rows:
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
for s in r["series"]:
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
rows = helper.list_to_camel_case(rows)
return rows
def delete(project_id, metric_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
UPDATE public.metrics
SET deleted_at = timezone('utc'::text, now())
WHERE project_id = %(project_id)s
AND metric_id = %(metric_id)s
AND (user_id = %(user_id)s OR is_public);""",
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
)
return {"state": "success"}
def get(metric_id, project_id, user_id, flatten=True):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT *
FROM metrics
LEFT JOIN LATERAL (SELECT jsonb_agg(metric_series.* ORDER BY index) AS series
FROM metric_series
WHERE metric_series.metric_id = metrics.metric_id
AND metric_series.deleted_at ISNULL
) AS metric_series ON (TRUE)
WHERE metrics.project_id = %(project_id)s
AND metrics.deleted_at ISNULL
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
AND metrics.metric_id = %(metric_id)s
ORDER BY created_at;""",
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
)
)
row = cur.fetchone()
if row is None:
return None
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
if flatten:
for s in row["series"]:
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
return helper.dict_to_camel_case(row)
def get_series_for_alert(project_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT series_id AS value,
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
'count' AS unit,
FALSE AS predefined,
metric_id,
series_id
FROM metric_series
INNER JOIN metrics USING (metric_id)
WHERE metrics.deleted_at ISNULL
AND metrics.project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)
ORDER BY name;""",
{"project_id": project_id, "user_id": user_id}
)
)
rows = cur.fetchall()
return helper.list_to_camel_case(rows)

View file

@ -1,3 +1,4 @@
import schemas
from chalicelib.core import metadata
from chalicelib.utils import args_transformer
from chalicelib.utils import helper, dev
@ -94,25 +95,25 @@ def __get_meta_constraint(project_id, data):
else:
filter_type = f["key"].upper()
filter_type = [filter_type, "USER" + filter_type, filter_type[4:]]
if any(item in [sessions_metas.meta_type.USERBROWSER] \
if any(item in [schemas.FilterType.user_browser] \
for item in filter_type):
constraints.append(f"sessions.user_browser = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS] \
elif any(item in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios] \
for item in filter_type):
constraints.append(f"sessions.user_os = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS] \
elif any(item in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios] \
for item in filter_type):
constraints.append(f"sessions.user_device = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS] \
elif any(item in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios] \
for item in filter_type):
constraints.append(f"sessions.user_country = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS] \
elif any(item in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios] \
for item in filter_type):
constraints.append(f"sessions.user_id = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.USERANONYMOUSID, sessions_metas.meta_type.USERANONYMOUSID_IOS] \
elif any(item in [schemas.FilterType.user_anonymous_id, schemas.FilterType.user_anonymous_id_ios] \
for item in filter_type):
constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s")
elif any(item in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS] \
elif any(item in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios] \
for item in filter_type):
constraints.append(f"sessions.rev_id = %({f['key']}_{i})s")
return constraints

View file

@ -1,6 +1,7 @@
from chalicelib.utils import pg_client, helper
from chalicelib.core import sessions_metas, metadata
import schemas
from chalicelib.core import issues
from chalicelib.core import sessions_metas, metadata
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
@ -235,23 +236,23 @@ def __generic_autocomplete(event: Event):
class event_type:
CLICK = Event(ui_type="CLICK", table="events.clicks", column="label")
INPUT = Event(ui_type="INPUT", table="events.inputs", column="label")
LOCATION = Event(ui_type="LOCATION", table="events.pages", column="base_path")
CUSTOM = Event(ui_type="CUSTOM", table="events_common.customs", column="name")
REQUEST = Event(ui_type="REQUEST", table="events_common.requests", column="url")
GRAPHQL = Event(ui_type="GRAPHQL", table="events.graphql", column="name")
STATEACTION = Event(ui_type="STATEACTION", table="events.state_actions", column="name")
ERROR = Event(ui_type="ERROR", table="events.errors",
CLICK = Event(ui_type=schemas.EventType.click, table="events.clicks", column="label")
INPUT = Event(ui_type=schemas.EventType.input, table="events.inputs", column="label")
LOCATION = Event(ui_type=schemas.EventType.location, table="events.pages", column="base_path")
CUSTOM = Event(ui_type=schemas.EventType.custom, table="events_common.customs", column="name")
REQUEST = Event(ui_type=schemas.EventType.request, table="events_common.requests", column="url")
GRAPHQL = Event(ui_type=schemas.EventType.graphql, table="events.graphql", column="name")
STATEACTION = Event(ui_type=schemas.EventType.state_action, table="events.state_actions", column="name")
ERROR = Event(ui_type=schemas.EventType.error, table="events.errors",
column=None) # column=None because errors are searched by name or message
METADATA = Event(ui_type="METADATA", table="public.sessions", column=None)
METADATA = Event(ui_type=schemas.EventType.metadata, table="public.sessions", column=None)
# IOS
CLICK_IOS = Event(ui_type="CLICK_IOS", table="events_ios.clicks", column="label")
INPUT_IOS = Event(ui_type="INPUT_IOS", table="events_ios.inputs", column="label")
VIEW_IOS = Event(ui_type="VIEW_IOS", table="events_ios.views", column="name")
CUSTOM_IOS = Event(ui_type="CUSTOM_IOS", table="events_common.customs", column="name")
REQUEST_IOS = Event(ui_type="REQUEST_IOS", table="events_common.requests", column="url")
ERROR_IOS = Event(ui_type="ERROR_IOS", table="events_ios.crashes",
CLICK_IOS = Event(ui_type=schemas.EventType.click_ios, table="events_ios.clicks", column="label")
INPUT_IOS = Event(ui_type=schemas.EventType.input_ios, table="events_ios.inputs", column="label")
VIEW_IOS = Event(ui_type=schemas.EventType.view_ios, table="events_ios.views", column="name")
CUSTOM_IOS = Event(ui_type=schemas.EventType.custom_ios, table="events_common.customs", column="name")
REQUEST_IOS = Event(ui_type=schemas.EventType.request_ios, table="events_common.requests", column="url")
ERROR_IOS = Event(ui_type=schemas.EventType.error_ios, table="events_ios.crashes",
column=None) # column=None because errors are searched by name or message
@ -389,18 +390,18 @@ def search_pg2(text, event_type, project_id, source, key):
if not event_type:
return {"data": __get_autocomplete_table(text, project_id)}
if event_type.upper() in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type.upper()].get(project_id=project_id, value=text, key=key, source=source)
if event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys():
rows += SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key,
source=source)
elif event_type.upper() + "_IOS" in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type.upper() + "_IOS"].get(project_id=project_id, value=text, key=key,
source=source)
elif event_type.upper() in sessions_metas.SUPPORTED_TYPES.keys():
if event_type in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
if event_type + "_IOS" in SUPPORTED_TYPES.keys():
rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,
source=source)
elif event_type + "_IOS" in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,
source=source)
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id)
elif event_type.upper().endswith("_IOS") \
and event_type.upper()[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
elif event_type.endswith("_IOS") \
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id)
else:
return {"errors": ["unsupported event"]}

View file

@ -1,7 +1,8 @@
import json
import chalicelib.utils.helper
from chalicelib.core import events, significance, sessions
import schemas
from chalicelib.core import significance, sessions
from chalicelib.utils import dev
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
@ -11,23 +12,24 @@ REMOVE_KEYS = ["key", "_key", "startDate", "endDate"]
ALLOW_UPDATE_FOR = ["name", "filter"]
def filter_stages(stages):
ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type,
events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type,
events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type,
events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ]
return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None]
# def filter_stages(stages):
# ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type,
# events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type,
# events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type,
# events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ]
# return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None]
def create(project_id, user_id, name, filter, is_public):
def create(project_id, user_id, name, filter: schemas.FunnelSearchPayloadSchema, is_public):
helper.delete_keys_from_dict(filter, REMOVE_KEYS)
filter["events"] = filter_stages(stages=filter.get("events", []))
# filter.events = filter_stages(stages=filter.events)
with pg_client.PostgresClient() as cur:
query = cur.mogrify("""\
INSERT INTO public.funnels (project_id, user_id, name, filter,is_public)
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s)
RETURNING *;""",
{"user_id": user_id, "project_id": project_id, "name": name, "filter": json.dumps(filter),
{"user_id": user_id, "project_id": project_id, "name": name,
"filter": json.dumps(filter.dict()),
"is_public": is_public})
cur.execute(
@ -40,7 +42,7 @@ def create(project_id, user_id, name, filter, is_public):
return {"data": r}
def update(funnel_id, user_id, name=None, filter=None, is_public=None):
def update(funnel_id, user_id, project_id, name=None, filter=None, is_public=None):
s_query = []
if filter is not None:
helper.delete_keys_from_dict(filter, REMOVE_KEYS)
@ -56,9 +58,11 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None):
UPDATE public.funnels
SET {" , ".join(s_query)}
WHERE funnel_id=%(funnel_id)s
RETURNING *;""",
{"user_id": user_id, "funnel_id": funnel_id, "name": name,
"filter": json.dumps(filter) if filter is not None else None, "is_public": is_public})
AND project_id = %(project_id)s
AND (user_id = %(user_id)s OR is_public)
RETURNING *;""", {"user_id": user_id, "funnel_id": funnel_id, "name": name,
"filter": json.dumps(filter) if filter is not None else None, "is_public": is_public,
"project_id": project_id})
# print("--------------------")
# print(query)
# print("--------------------")
@ -74,13 +78,12 @@ def update(funnel_id, user_id, name=None, filter=None, is_public=None):
def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False):
with pg_client.PostgresClient() as cur:
team_query = ""
cur.execute(
cur.mogrify(
f"""\
SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public
SELECT funnel_id, project_id, user_id, name, created_at, deleted_at, is_public
{",filter" if details else ""}
FROM public.funnels {team_query}
FROM public.funnels
WHERE project_id = %(project_id)s
AND funnels.deleted_at IS NULL
AND (funnels.user_id = %(user_id)s OR funnels.is_public);""",
@ -93,12 +96,14 @@ def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date
for row in rows:
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
if details:
row["filter"]["events"] = filter_stages(row["filter"]["events"])
# row["filter"]["events"] = filter_stages(row["filter"]["events"])
get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date,
end_date=end_date)
counts = sessions.search2_pg(data=row["filter"], project_id=project_id, user_id=None, count_only=True)
counts = sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(row["filter"]),
project_id=project_id, user_id=None, count_only=True)
row["sessionsCount"] = counts["countSessions"]
row["usersCount"] = counts["countUsers"]
filter_clone = dict(row["filter"])
overview = significance.get_overview(filter_d=row["filter"], project_id=project_id)
row["stages"] = overview["stages"]
row.pop("filter")
@ -107,6 +112,7 @@ def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date
row["criticalIssuesCount"] = overview["criticalIssuesCount"]
row["missedConversions"] = 0 if len(row["stages"]) < 2 \
else row["stages"][0]["sessionsCount"] - row["stages"][-1]["sessionsCount"]
row["filter"] = helper.old_search_payload_to_flat(filter_clone)
return rows
@ -135,7 +141,8 @@ def delete(project_id, funnel_id, user_id):
UPDATE public.funnels
SET deleted_at = timezone('utc'::text, now())
WHERE project_id = %(project_id)s
AND funnel_id = %(funnel_id)s;""",
AND funnel_id = %(funnel_id)s
AND (user_id = %(user_id)s OR is_public);""",
{"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id})
)
@ -143,28 +150,29 @@ def delete(project_id, funnel_id, user_id):
def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
return sessions.search2_pg(data=f["filter"], project_id=project_id, user_id=user_id)
return sessions.search2_pg(data=schemas.SessionsSearchPayloadSchema.parse_obj(f["filter"]), project_id=project_id,
user_id=user_id)
def get_sessions_on_the_fly(funnel_id, project_id, user_id, data):
data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
def get_sessions_on_the_fly(funnel_id, project_id, user_id, data: schemas.FunnelSearchPayloadSchema):
# data.events = filter_stages(data.events)
if len(data.events) == 0:
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
start_date=data.get('startDate', None),
end_date=data.get('endDate', None))
data = f["filter"]
return sessions.search2_pg(data=data, project_id=project_id, user_id=user_id)
get_start_end_time(filter_d=f["filter"], range_value=data.range_value,
start_date=data.startDate, end_date=data.endDate)
data = schemas.FunnelSearchPayloadSchema.parse_obj(f["filter"])
return sessions.search2_pg(data=data, project_id=project_id,
user_id=user_id)
def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
def get_top_insights(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
@ -174,10 +182,10 @@ def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, e
"totalDropDueToIssues": total_drop_due_to_issues}}
def get_top_insights_on_the_fly(funnel_id, project_id, data):
data["events"] = filter_stages(data.get("events", []))
def get_top_insights_on_the_fly(funnel_id, user_id, project_id, data):
# data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
@ -191,8 +199,8 @@ def get_top_insights_on_the_fly(funnel_id, project_id, data):
"totalDropDueToIssues": total_drop_due_to_issues}}
def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
def get_issues(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
@ -202,12 +210,12 @@ def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_dat
@dev.timed
def get_issues_on_the_fly(funnel_id, project_id, data):
def get_issues_on_the_fly(funnel_id, user_id, project_id, data):
first_stage = data.get("firstStage")
last_stage = data.get("lastStage")
data["events"] = filter_stages(data.get("events", []))
# data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
@ -220,7 +228,7 @@ def get_issues_on_the_fly(funnel_id, project_id, data):
last_stage=last_stage))}
def get(funnel_id, project_id):
def get(funnel_id, project_id, user_id, flatten=True):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
@ -230,8 +238,9 @@ def get(funnel_id, project_id):
FROM public.funnels
WHERE project_id = %(project_id)s
AND deleted_at IS NULL
AND funnel_id = %(funnel_id)s;""",
{"funnel_id": funnel_id, "project_id": project_id}
AND funnel_id = %(funnel_id)s
AND (user_id = %(user_id)s OR is_public);""",
{"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id}
)
)
@ -240,22 +249,27 @@ def get(funnel_id, project_id):
return None
f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"])
f["filter"]["events"] = filter_stages(stages=f["filter"]["events"])
# f["filter"]["events"] = filter_stages(stages=f["filter"]["events"])
if flatten:
f["filter"] = helper.old_search_payload_to_flat(f["filter"])
return f
@dev.timed
def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None):
if len(data.get("events", [])) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
def search_by_issue(user_id, project_id, funnel_id, issue_id, data: schemas.FunnelSearchPayloadSchema, range_value=None,
start_date=None, end_date=None):
if len(data.events) == 0:
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date),
end_date=data.get('endDate', end_date))
data = f["filter"]
data.startDate = data.startDate if data.startDate is not None else start_date
data.endDate = data.endDate if data.endDate is not None else end_date
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.startDate,
end_date=data.endDate)
data = schemas.FunnelSearchPayloadSchema.parse_obj(f["filter"])
# insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id)
issues = get_issues_on_the_fly(funnel_id=funnel_id, project_id=project_id, data=data).get("issues", {})
issues = get_issues_on_the_fly(funnel_id=funnel_id, user_id=user_id, project_id=project_id, data=data.dict()) \
.get("issues", {})
issues = issues.get("significant", []) + issues.get("insignificant", [])
issue = None
for i in issues:

View file

@ -1,3 +1,4 @@
import schemas
from chalicelib.core import sessions_metas
from chalicelib.utils import helper, dev
from chalicelib.utils import pg_client
@ -45,7 +46,7 @@ def journey(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=
elif f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
event_table = JOURNEY_TYPES[f["value"]]["table"]
event_column = JOURNEY_TYPES[f["value"]]["column"]
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query_subset.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
@ -300,7 +301,7 @@ def feature_retention(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]
@ -390,7 +391,7 @@ def feature_acquisition(project_id, startTimestamp=TimeUTC.now(delta_days=-70),
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]
@ -477,7 +478,7 @@ def feature_popularity_frequency(project_id, startTimestamp=TimeUTC.now(delta_da
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
event_table = JOURNEY_TYPES[f["value"]]["table"]
event_column = JOURNEY_TYPES[f["value"]]["column"]
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
@ -543,7 +544,7 @@ def feature_adoption(project_id, startTimestamp=TimeUTC.now(delta_days=-70), end
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]
@ -613,7 +614,7 @@ def feature_adoption_top_users(project_id, startTimestamp=TimeUTC.now(delta_days
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]
@ -674,7 +675,7 @@ def feature_adoption_daily_usage(project_id, startTimestamp=TimeUTC.now(delta_da
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]
@ -737,7 +738,7 @@ def feature_intensity(project_id, startTimestamp=TimeUTC.now(delta_days=-70), en
if f["type"] == "EVENT_TYPE" and JOURNEY_TYPES.get(f["value"]):
event_table = JOURNEY_TYPES[f["value"]]["table"]
event_column = JOURNEY_TYPES[f["value"]]["column"]
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
pg_sub_query.append(f"length({event_column})>2")
@ -772,7 +773,7 @@ def users_active(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTime
for f in filters:
if f["type"] == "PERIOD" and f["value"] in ["DAY", "WEEK"]:
period = f["value"]
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query_chart.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
@ -844,7 +845,7 @@ def users_slipping(project_id, startTimestamp=TimeUTC.now(delta_days=-70), endTi
elif f["type"] == "EVENT_VALUE":
event_value = f["value"]
default = False
elif f["type"] in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
elif f["type"] in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
pg_sub_query.append(f"sessions.user_id = %(user_id)s")
extra_values["user_id"] = f["value"]
event_table = JOURNEY_TYPES[event_type]["table"]

View file

@ -0,0 +1,15 @@
import schemas
def get_col(perf: schemas.PerformanceEventType):
return {
schemas.PerformanceEventType.location_dom_complete: {"column": "dom_building_time", "extraJoin": None},
schemas.PerformanceEventType.location_ttfb: {"column": "ttfb", "extraJoin": None},
schemas.PerformanceEventType.location_avg_cpu_load: {"column": "avg_cpu", "extraJoin": "events.performance"},
schemas.PerformanceEventType.location_avg_memory_usage: {"column": "avg_used_js_heap_size",
"extraJoin": "events.performance"},
schemas.PerformanceEventType.fetch_failed: {"column": "success", "extraJoin": None},
# schemas.PerformanceEventType.fetch_duration: {"column": "duration", "extraJoin": None},
schemas.PerformanceEventType.location_largest_contentful_paint_time: {"column": "first_contentful_paint_time",
"extraJoin": None}
}.get(perf)

View file

@ -0,0 +1,122 @@
import json
import schemas
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
def create(project_id, user_id, data: schemas.SavedSearchSchema):
with pg_client.PostgresClient() as cur:
data = data.dict()
data["filter"] = json.dumps(data["filter"])
query = cur.mogrify("""\
INSERT INTO public.searches (project_id, user_id, name, filter,is_public)
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s)
RETURNING *;""", {"user_id": user_id, "project_id": project_id, **data})
cur.execute(
query
)
r = cur.fetchone()
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r["filter"] = helper.old_search_payload_to_flat(r["filter"])
r = helper.dict_to_camel_case(r)
return {"data": r}
def update(search_id, project_id, user_id, data: schemas.SavedSearchSchema):
with pg_client.PostgresClient() as cur:
data = data.dict()
data["filter"] = json.dumps(data["filter"])
query = cur.mogrify(f"""\
UPDATE public.searches
SET name = %(name)s,
filter = %(filter)s,
is_public = %(is_public)s
WHERE search_id=%(search_id)s
AND project_id= %(project_id)s
AND (user_id = %(user_id)s OR is_public)
RETURNING *;""", {"search_id": search_id, "project_id": project_id, "user_id": user_id, **data})
cur.execute(
query
)
r = cur.fetchone()
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r["filter"] = helper.old_search_payload_to_flat(r["filter"])
r = helper.dict_to_camel_case(r)
# r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"])
return r
def get_all(project_id, user_id, details=False):
with pg_client.PostgresClient() as cur:
print(cur.mogrify(
f"""\
SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public
{",filter" if details else ""}
FROM public.searches
WHERE project_id = %(project_id)s
AND deleted_at IS NULL
AND (user_id = %(user_id)s OR is_public);""",
{"project_id": project_id, "user_id": user_id}
))
cur.execute(
cur.mogrify(
f"""\
SELECT search_id, project_id, user_id, name, created_at, deleted_at, is_public
{",filter" if details else ""}
FROM public.searches
WHERE project_id = %(project_id)s
AND deleted_at IS NULL
AND (user_id = %(user_id)s OR is_public);""",
{"project_id": project_id, "user_id": user_id}
)
)
rows = cur.fetchall()
rows = helper.list_to_camel_case(rows)
for row in rows:
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
if details:
if isinstance(row["filter"], list) and len(row["filter"]) == 0:
row["filter"] = {}
row["filter"] = helper.old_search_payload_to_flat(row["filter"])
return rows
def delete(project_id, search_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
UPDATE public.searches
SET deleted_at = timezone('utc'::text, now())
WHERE project_id = %(project_id)s
AND search_id = %(search_id)s
AND (user_id = %(user_id)s OR is_public);""",
{"search_id": search_id, "project_id": project_id, "user_id": user_id})
)
return {"state": "success"}
def get(search_id, project_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT
*
FROM public.searches
WHERE project_id = %(project_id)s
AND deleted_at IS NULL
AND search_id = %(search_id)s
AND (user_id = %(user_id)s OR is_public);""",
{"search_id": search_id, "project_id": project_id, "user_id": user_id}
)
)
f = helper.dict_to_camel_case(cur.fetchone())
if f is None:
return None
f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"])
f["filter"] = helper.old_search_payload_to_flat(f["filter"])
return f

File diff suppressed because it is too large Load diff

View file

@ -1,3 +1,4 @@
import schemas
from chalicelib.utils import pg_client, helper
from chalicelib.utils.event_filter_definition import SupportedFilter
@ -8,40 +9,47 @@ def get_key_values(project_id):
cur.mogrify(
f"""\
SELECT ARRAY_AGG(DISTINCT s.user_os
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {meta_type.USEROS},
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_os},
ARRAY_AGG(DISTINCT s.user_browser
ORDER BY s.user_browser)
FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {meta_type.USERBROWSER},
FILTER ( WHERE s.user_browser IS NOT NULL AND s.platform='web') AS {schemas.FilterType.user_browser},
ARRAY_AGG(DISTINCT s.user_device
ORDER BY s.user_device)
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {meta_type.USERDEVICE},
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='web') AS {schemas.FilterType.user_device},
ARRAY_AGG(DISTINCT s.user_country
ORDER BY s.user_country)
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {meta_type.USERCOUNTRY},
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='web')::text[] AS {schemas.FilterType.user_country},
ARRAY_AGG(DISTINCT s.user_id
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {meta_type.USERID},
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='web') AS {schemas.FilterType.user_id},
ARRAY_AGG(DISTINCT s.user_anonymous_id
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {meta_type.USERANONYMOUSID},
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='web') AS {schemas.FilterType.user_anonymous_id},
ARRAY_AGG(DISTINCT s.rev_id
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {meta_type.REVID},
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='web') AS {schemas.FilterType.rev_id},
ARRAY_AGG(DISTINCT p.referrer
ORDER BY p.referrer)
FILTER ( WHERE p.referrer != '' ) AS {meta_type.REFERRER},
FILTER ( WHERE p.referrer != '' ) AS {schemas.FilterType.referrer},
ARRAY_AGG(DISTINCT s.utm_source
ORDER BY s.utm_source) FILTER ( WHERE s.utm_source IS NOT NULL AND s.utm_source != 'none' AND s.utm_source != '') AS {schemas.FilterType.utm_source},
ARRAY_AGG(DISTINCT s.utm_medium
ORDER BY s.utm_medium) FILTER ( WHERE s.utm_medium IS NOT NULL AND s.utm_medium != 'none' AND s.utm_medium != '') AS {schemas.FilterType.utm_medium},
ARRAY_AGG(DISTINCT s.utm_campaign
ORDER BY s.utm_campaign) FILTER ( WHERE s.utm_campaign IS NOT NULL AND s.utm_campaign != 'none' AND s.utm_campaign != '') AS {schemas.FilterType.utm_campaign},
ARRAY_AGG(DISTINCT s.user_os
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {meta_type.USEROS_IOS},
ORDER BY s.user_os) FILTER ( WHERE s.user_os IS NOT NULL AND s.platform='ios' ) AS {schemas.FilterType.user_os_ios},
ARRAY_AGG(DISTINCT s.user_device
ORDER BY s.user_device)
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {meta_type.USERDEVICE},
FILTER ( WHERE s.user_device IS NOT NULL AND s.user_device != '' AND s.platform='ios') AS {schemas.FilterType.user_device_ios},
ARRAY_AGG(DISTINCT s.user_country
ORDER BY s.user_country)
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {meta_type.USERCOUNTRY_IOS},
FILTER ( WHERE s.user_country IS NOT NULL AND s.platform='ios')::text[] AS {schemas.FilterType.user_country_ios},
ARRAY_AGG(DISTINCT s.user_id
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {meta_type.USERID_IOS},
ORDER BY s.user_id) FILTER ( WHERE s.user_id IS NOT NULL AND s.user_id != 'none' AND s.user_id != '' AND s.platform='ios') AS {schemas.FilterType.user_id_ios},
ARRAY_AGG(DISTINCT s.user_anonymous_id
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {meta_type.USERANONYMOUSID_IOS},
ORDER BY s.user_anonymous_id) FILTER ( WHERE s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != 'none' AND s.user_anonymous_id != '' AND s.platform='ios') AS {schemas.FilterType.user_anonymous_id_ios},
ARRAY_AGG(DISTINCT s.rev_id
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {meta_type.REVID_IOS}
ORDER BY s.rev_id) FILTER ( WHERE s.rev_id IS NOT NULL AND s.platform='ios') AS {schemas.FilterType.rev_id_ios}
FROM public.sessions AS s
LEFT JOIN events.pages AS p USING (session_id)
WHERE s.project_id = %(site_id)s;""",
@ -108,119 +116,137 @@ def __generic_autocomplete(typename):
return f
class meta_type:
USEROS = "USEROS"
USERBROWSER = "USERBROWSER"
USERDEVICE = "USERDEVICE"
USERCOUNTRY = "USERCOUNTRY"
USERID = "USERID"
USERANONYMOUSID = "USERANONYMOUSID"
REFERRER = "REFERRER"
REVID = "REVID"
# IOS
USEROS_IOS = "USEROS_IOS"
USERDEVICE_IOS = "USERDEVICE_IOS"
USERCOUNTRY_IOS = "USERCOUNTRY_IOS"
USERID_IOS = "USERID_IOS"
USERANONYMOUSID_IOS = "USERANONYMOUSID_IOS"
REVID_IOS = "REVID_IOS"
SUPPORTED_TYPES = {
meta_type.USEROS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS),
query=__generic_query(typename=meta_type.USEROS),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
meta_type.USERBROWSER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERBROWSER),
query=__generic_query(typename=meta_type.USERBROWSER),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
meta_type.USERDEVICE: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE),
query=__generic_query(typename=meta_type.USERDEVICE),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
meta_type.USERCOUNTRY: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY),
query=__generic_query(typename=meta_type.USERCOUNTRY),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
meta_type.USERID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID),
query=__generic_query(typename=meta_type.USERID),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
meta_type.USERANONYMOUSID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID),
query=__generic_query(typename=meta_type.USERANONYMOUSID),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
meta_type.REVID: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID),
query=__generic_query(typename=meta_type.REVID),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
meta_type.REFERRER: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REFERRER),
query=__generic_query(typename=meta_type.REFERRER),
value_limit=5,
starts_with="/",
starts_limit=5,
ignore_if_starts_with=[]),
schemas.FilterType.user_os: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_os),
query=__generic_query(typename=schemas.FilterType.user_os),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_browser: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_browser),
query=__generic_query(typename=schemas.FilterType.user_browser),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_device: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_device),
query=__generic_query(typename=schemas.FilterType.user_device),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_country: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_country),
query=__generic_query(typename=schemas.FilterType.user_country),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_id),
query=__generic_query(typename=schemas.FilterType.user_id),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_anonymous_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id),
query=__generic_query(typename=schemas.FilterType.user_anonymous_id),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
schemas.FilterType.rev_id: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.rev_id),
query=__generic_query(typename=schemas.FilterType.rev_id),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.referrer: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.referrer),
query=__generic_query(typename=schemas.FilterType.referrer),
value_limit=5,
starts_with="/",
starts_limit=5,
ignore_if_starts_with=[]),
schemas.FilterType.utm_campaign: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_campaign),
query=__generic_query(typename=schemas.FilterType.utm_campaign),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.utm_medium: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_medium),
query=__generic_query(typename=schemas.FilterType.utm_medium),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.utm_source: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.utm_source),
query=__generic_query(typename=schemas.FilterType.utm_source),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
# IOS
meta_type.USEROS_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USEROS_IOS),
query=__generic_query(typename=meta_type.USEROS_IOS),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
meta_type.USERDEVICE_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERDEVICE_IOS),
query=__generic_query(typename=meta_type.USERDEVICE_IOS),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
meta_type.USERCOUNTRY_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERCOUNTRY_IOS),
query=__generic_query(typename=meta_type.USERCOUNTRY_IOS),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
meta_type.USERID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERID_IOS),
query=__generic_query(typename=meta_type.USERID_IOS),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
meta_type.USERANONYMOUSID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.USERANONYMOUSID_IOS),
query=__generic_query(typename=meta_type.USERANONYMOUSID_IOS),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
meta_type.REVID_IOS: SupportedFilter(get=__generic_autocomplete(typename=meta_type.REVID_IOS),
query=__generic_query(typename=meta_type.REVID_IOS),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_os_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_os_ios),
query=__generic_query(typename=schemas.FilterType.user_os_ios),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_device_ios: SupportedFilter(
get=__generic_autocomplete(
typename=schemas.FilterType.user_device_ios),
query=__generic_query(typename=schemas.FilterType.user_device_ios),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_country_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_country_ios),
query=__generic_query(typename=schemas.FilterType.user_country_ios),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_id_ios),
query=__generic_query(typename=schemas.FilterType.user_id_ios),
value_limit=2,
starts_with="",
starts_limit=2,
ignore_if_starts_with=["/"]),
schemas.FilterType.user_anonymous_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.user_anonymous_id_ios),
query=__generic_query(typename=schemas.FilterType.user_anonymous_id_ios),
value_limit=3,
starts_with="",
starts_limit=3,
ignore_if_starts_with=["/"]),
schemas.FilterType.rev_id_ios: SupportedFilter(
get=__generic_autocomplete(typename=schemas.FilterType.rev_id_ios),
query=__generic_query(typename=schemas.FilterType.rev_id_ios),
value_limit=0,
starts_with="",
starts_limit=0,
ignore_if_starts_with=["/"]),
}
def search(text, meta_type, project_id):
rows = []
if meta_type.upper() not in list(SUPPORTED_TYPES.keys()):
if meta_type not in list(SUPPORTED_TYPES.keys()):
return {"errors": ["unsupported type"]}
rows += SUPPORTED_TYPES[meta_type.upper()].get(project_id=project_id, text=text)
if meta_type.upper() + "_IOS" in list(SUPPORTED_TYPES.keys()):
rows += SUPPORTED_TYPES[meta_type.upper() + "_IOS"].get(project_id=project_id, text=text)
rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
return {"data": rows}

View file

@ -1,7 +1,8 @@
__author__ = "AZNAUROV David"
__maintainer__ = "KRAIEM Taha Yassine"
from chalicelib.core import events, sessions_metas, metadata, sessions
import schemas
from chalicelib.core import events, metadata, sessions
from chalicelib.utils import dev
"""
@ -30,87 +31,109 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
:param filter_d: dict contains events&filters&...
:return:
"""
stages = filter_d["events"]
filters = filter_d.get("filters", [])
stages: [dict] = filter_d["events"]
filters: [dict] = filter_d.get("filters", [])
filter_issues = filter_d.get("issueTypes")
if filter_issues is None or len(filter_issues) == 0:
filter_issues = []
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
first_stage_extra_constraints = ["s.project_id=%(project_id)s", "s.start_ts >= %(startTimestamp)s",
"s.start_ts <= %(endTimestamp)s"]
extra_from = ""
filter_extra_from = []
n_stages_query = []
values = {}
if len(filters) > 0:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
meta_keys = None
for i, f in enumerate(filters):
if not isinstance(f.get("value"), list):
if isinstance(f.get("value"), tuple):
f["value"] = list(f.get("value"))
else:
f["value"] = [f.get("value")]
if len(f["value"]) == 0 or f["value"][0] is None:
if not isinstance(f["value"], list):
f.value = [f["value"]]
if len(f["value"]) == 0 or f["value"] is None:
continue
filter_type = f["type"].upper()
values[f"f_value_{i}"] = sessions.__get_sql_value_multiple(f["value"])
if filter_type == sessions_metas.meta_type.USERBROWSER:
op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(f's.user_browser {op} %({f"f_value_{i}"})s')
f["value"] = helper.values_for_operator(value=f["value"], op=f["operator"])
# filter_args = _multiple_values(f["value"])
op = sessions.__get_sql_operator(f["operator"])
elif filter_type in [sessions_metas.meta_type.USEROS, sessions_metas.meta_type.USEROS_IOS]:
op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(f's.user_os {op} %({f"f_value_{i}"})s')
filter_type = f["type"]
# values[f_k] = sessions.__get_sql_value_multiple(f["value"])
f_k = f"f_value{i}"
values = {**values,
**sessions._multiple_values(helper.values_for_operator(value=f["value"], op=f["operator"]),
value_key=f_k)}
if filter_type == schemas.FilterType.user_browser:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_browser {op} %({f_k})s', f["value"], value_key=f_k))
elif filter_type in [sessions_metas.meta_type.USERDEVICE, sessions_metas.meta_type.USERDEVICE_IOS]:
op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(f's.user_device {op} %({f"f_value_{i}"})s')
elif filter_type in [schemas.FilterType.user_os, schemas.FilterType.user_os_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_os {op} %({f_k})s', f["value"], value_key=f_k))
elif filter_type in [sessions_metas.meta_type.USERCOUNTRY, sessions_metas.meta_type.USERCOUNTRY_IOS]:
op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(f's.user_country {op} %({f"f_value_{i}"})s')
elif filter_type == "duration".upper():
elif filter_type in [schemas.FilterType.user_device, schemas.FilterType.user_device_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_device {op} %({f_k})s', f["value"], value_key=f_k))
elif filter_type in [schemas.FilterType.user_country, schemas.FilterType.user_country_ios]:
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_country {op} %({f_k})s', f["value"], value_key=f_k))
elif filter_type == schemas.FilterType.duration:
if len(f["value"]) > 0 and f["value"][0] is not None:
first_stage_extra_constraints.append(f's.duration >= %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = f["value"][0]
if len(f["value"]) > 1 and f["value"][1] is not None and f["value"][1] > 0:
first_stage_extra_constraints.append('s.duration <= %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = f["value"][1]
elif filter_type == sessions_metas.meta_type.REFERRER:
first_stage_extra_constraints.append(f's.duration >= %(minDuration)s')
values["minDuration"] = f["value"][0]
if len(f["value"]) > 1 and f["value"][1] is not None and int(f["value"][1]) > 0:
first_stage_extra_constraints.append('s.duration <= %(maxDuration)s')
values["maxDuration"] = f["value"][1]
elif filter_type == schemas.FilterType.referrer:
# events_query_part = events_query_part + f"INNER JOIN events.pages AS p USING(session_id)"
extra_from += f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"
op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(f"p.base_referrer {op} %(referrer)s")
filter_extra_from = [f"INNER JOIN {events.event_type.LOCATION.table} AS p USING(session_id)"]
# op = sessions.__get_sql_operator_multiple(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f"p.base_referrer {op} %({f_k})s", f["value"], value_key=f_k))
elif filter_type == events.event_type.METADATA.ui_type:
op = sessions.__get_sql_operator(f["operator"])
if meta_keys is None:
meta_keys = metadata.get(project_id=project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
# op = sessions.__get_sql_operator(f["operator"])
if f.get("key") in meta_keys.keys():
first_stage_extra_constraints.append(
f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [sessions_metas.meta_type.USERID, sessions_metas.meta_type.USERID_IOS]:
op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(f's.user_id {op} %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [sessions_metas.meta_type.USERANONYMOUSID,
sessions_metas.meta_type.USERANONYMOUSID_IOS]:
op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(f's.user_anonymous_id {op} %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [sessions_metas.meta_type.REVID, sessions_metas.meta_type.REVID_IOS]:
op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(f's.rev_id {op} %({f"f_value_{i}"})s')
values[f"f_value_{i}"] = helper.string_to_sql_like_with_op(f["value"][0], op)
sessions._multiple_conditions(
f's.{metadata.index_to_colname(meta_keys[f["key"]])} {op} %({f_k})s', f["value"],
value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_id, schemas.FilterType.user_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_id {op} %({f_k})s', f["value"], value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.user_anonymous_id,
schemas.FilterType.user_anonymous_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.user_anonymous_id {op} %({f_k})s', f["value"], value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
elif filter_type in [schemas.FilterType.rev_id, schemas.FilterType.rev_id_ios]:
# op = sessions.__get_sql_operator(f["operator"])
first_stage_extra_constraints.append(
sessions._multiple_conditions(f's.rev_id {op} %({f_k})s', f["value"], value_key=f_k))
# values[f_k] = helper.string_to_sql_like_with_op(f["value"][0], op)
for i, s in enumerate(stages):
if i == 0:
extra_from = ["INNER JOIN public.sessions AS s USING (session_id)"]
extra_from = filter_extra_from + ["INNER JOIN public.sessions AS s USING (session_id)"]
else:
extra_from = []
if s.get("operator") is None:
s["operator"] = "is"
if not isinstance(s["value"], list):
s["value"] = [s["value"]]
is_any = sessions._isAny_opreator(s["operator"])
if not is_any and isinstance(s["value"], list) and len(s["value"]) == 0:
continue
op = sessions.__get_sql_operator(s["operator"])
event_type = s["type"].upper()
next_label = s["value"]
if event_type == events.event_type.CLICK.ui_type:
next_table = events.event_type.CLICK.table
next_col_name = events.event_type.CLICK.column
@ -140,7 +163,8 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
print("=================UNDEFINED")
continue
values[f"value{i + 1}"] = helper.string_to_sql_like_with_op(next_label, op)
values = {**values, **sessions._multiple_values(helper.values_for_operator(value=s["value"], op=s["operator"]),
value_key=f"value{i + 1}")}
if sessions.__is_negation_operator(op) and i > 0:
op = sessions.__reverse_sql_operator(op)
main_condition = "left_not.session_id ISNULL"
@ -150,7 +174,11 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
AND s_main.timestamp >= T{i}.stage{i}_timestamp
AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""")
else:
main_condition = f"""main.{next_col_name} {op} %(value{i + 1})s"""
if is_any:
main_condition = "TRUE"
else:
main_condition = sessions._multiple_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
values=s["value"], value_key=f"value{i + 1}")
n_stages_query.append(f"""
(SELECT main.session_id,
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp,
@ -535,7 +563,8 @@ def get_top_insights(filter_d, project_id):
"dropDueToIssues": 0
}]
counts = sessions.search2_pg(data=filter_d, project_id=project_id, user_id=None, count_only=True)
counts = sessions.search2_pg(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id,
user_id=None, count_only=True)
output[0]["sessionsCount"] = counts["countSessions"]
output[0]["usersCount"] = counts["countUsers"]
return output, 0
@ -590,6 +619,15 @@ def get_overview(filter_d, project_id, first_stage=None, last_stage=None):
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
if len(rows) == 0:
# PS: not sure what to return if rows are empty
output["stages"] = [{
"type": stages[0]["type"],
"value": stages[0]["value"],
"sessionsCount": None,
"dropPercentage": None,
"usersCount": None
}]
output['criticalIssuesCount'] = 0
return output
# Obtain the first part of the output
stages_list = get_stages(stages, rows)

View file

@ -1,6 +1,9 @@
import logging
import requests
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
import requests
def get_by_id(webhook_id):
@ -76,12 +79,6 @@ def update(tenant_id, webhook_id, changes, replace_none=False):
allow_update = ["name", "index", "authHeader", "endpoint"]
with pg_client.PostgresClient() as cur:
sub_query = [f"{helper.key_to_snake_case(k)} = %({k})s" for k in changes.keys() if k in allow_update]
print(cur.mogrify(f"""\
UPDATE public.webhooks
SET {','.join(sub_query)}
WHERE webhook_id =%(id)s AND deleted_at ISNULL
RETURNING webhook_id AS integration_id, webhook_id AS id,*;""",
{"id": webhook_id, **changes}))
cur.execute(
cur.mogrify(f"""\
UPDATE public.webhooks
@ -150,28 +147,24 @@ def trigger_batch(data_list):
for w in data_list:
if w["destination"] not in webhooks_map:
webhooks_map[w["destination"]] = get_by_id(webhook_id=w["destination"])
__trigger(hook=webhooks_map[w["destination"]], data=w["data"])
if webhooks_map[w["destination"]] is None:
logging.error(f"!!Error webhook not found: webhook_id={w['destination']}")
else:
__trigger(hook=webhooks_map[w["destination"]], data=w["data"])
def __trigger(hook, data):
if hook["type"] == 'webhook':
if hook is not None and hook["type"] == 'webhook':
headers = {}
if hook["authHeader"] is not None and len(hook["authHeader"]) > 0:
headers = {"Authorization": hook["authHeader"]}
# body = {
# "webhookId": hook["id"],
# "createdAt": TimeUTC.now(),
# "event": event,
# "data": data
# }
r = requests.post(url=hook["endpoint"], json=data, headers=headers)
if r.status_code != 200:
print("=======> webhook: something went wrong")
print(r)
print(r.status_code)
print(r.text)
logging.error("=======> webhook: something went wrong")
logging.error(r)
logging.error(r.status_code)
logging.error(r.text)
return
response = None
try:
@ -180,5 +173,5 @@ def __trigger(hook, data):
try:
response = r.text
except:
print("no response found")
logging.info("no response found")
return response

View file

@ -29,7 +29,7 @@ def edit_config(user_id, weekly_report):
def cron():
with pg_client.PostgresClient() as cur:
with pg_client.PostgresClient(long_query=True) as cur:
params = {"3_days_ago": TimeUTC.midnight(delta_days=-3),
"1_week_ago": TimeUTC.midnight(delta_days=-7),
"2_week_ago": TimeUTC.midnight(delta_days=-14),

View file

@ -1,6 +1,7 @@
import random
import re
import string
from typing import Union
import math
import requests
@ -168,39 +169,56 @@ def string_to_sql_like(value):
def string_to_sql_like_with_op(value, op):
if isinstance(value, list) and len(value) > 0:
_value = value[0]
if isinstance(value, list):
r = []
for v in value:
r.append(string_to_sql_like_with_op(v, op))
return r
else:
_value = value
if _value is None:
return _value
if op.upper() != 'ILIKE':
if _value is None:
return _value
if op.upper() != 'ILIKE':
return _value.replace("%", "%%")
_value = _value.replace("*", "%")
if _value.startswith("^"):
_value = _value[1:]
elif not _value.startswith("%"):
_value = '%' + _value
if _value.endswith("$"):
_value = _value[:-1]
elif not _value.endswith("%"):
_value = _value + '%'
return _value.replace("%", "%%")
_value = _value.replace("*", "%")
if _value.startswith("^"):
_value = _value[1:]
elif not _value.startswith("%"):
_value = '%' + _value
if _value.endswith("$"):
_value = _value[:-1]
elif not _value.endswith("%"):
_value = _value + '%'
return _value.replace("%", "%%")
def string_to_op(value: str, op: schemas.SearchEventOperator):
if isinstance(value, list) and len(value) > 0:
_value = value[0]
likable_operators = [schemas.SearchEventOperator._starts_with, schemas.SearchEventOperator._ends_with,
schemas.SearchEventOperator._contains, schemas.SearchEventOperator._not_contains]
def is_likable(op: schemas.SearchEventOperator):
return op in likable_operators
def values_for_operator(value: Union[str, list], op: schemas.SearchEventOperator):
if not is_likable(op):
return value
if isinstance(value, list):
r = []
for v in value:
r.append(values_for_operator(v, op))
return r
else:
_value = value
if _value is None:
return _value
if op == schemas.SearchEventOperator._starts_with:
_value = '^' + _value
elif op == schemas.SearchEventOperator._ends_with:
_value = _value + '$'
return _value
if value is None:
return value
if op == schemas.SearchEventOperator._starts_with:
return value + '%'
elif op == schemas.SearchEventOperator._ends_with:
return '%' + value
elif op == schemas.SearchEventOperator._contains:
return '%' + value + '%'
return value
def is_valid_email(email):
@ -348,3 +366,14 @@ def has_smtp():
def get_edition():
return "ee" if "ee" in config("ENTERPRISE_BUILD", default="").lower() else "foss"
def old_search_payload_to_flat(values):
# in case the old search body was passed
if values.get("events") is not None:
for v in values["events"]:
v["isEvent"] = True
for v in values.get("filters", []):
v["isEvent"] = False
values["filters"] = values.pop("events") + values.get("filters", [])
return values

View file

@ -38,7 +38,7 @@
<div style="border-top:1px dotted rgba(0,0,0,0.2); display: block; margin-top: 20px"></div>
<center>
<p style="font-size: 12px; font-family: -apple-system,BlinkMacSystemFont,'Segoe UI',Roboto,Oxygen-Sans,Ubuntu,Cantarell,'Helvetica Neue',sans-serif; color: #6c757d">
Sent with &#9825; from OpenReplay &copy; 2021 - All rights reserved.<br><br>
Sent with &#9825; from OpenReplay &copy; 2022 - All rights reserved.<br><br>
<a href="https://openreplay.com" target="_blank"
style="text-decoration: none; color: #6c757d">https://openreplay.com/</a>
</p>

View file

@ -1,15 +1,17 @@
from threading import Semaphore
import psycopg2
import psycopg2.extras
from decouple import config
from psycopg2 import pool
PG_CONFIG = {"host": config("pg_host"),
"database": config("pg_dbname"),
"user": config("pg_user"),
"password": config("pg_password"),
"port": config("pg_port", cast=int)}
from psycopg2 import pool
from threading import Semaphore
if config("pg_timeout", cast=int, default=0) > 0:
PG_CONFIG["options"] = f"-c statement_timeout={config('pg_timeout', cast=int) * 1000}"
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
@ -19,28 +21,51 @@ class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
def getconn(self, *args, **kwargs):
self._semaphore.acquire()
return super().getconn(*args, **kwargs)
try:
return super().getconn(*args, **kwargs)
except psycopg2.pool.PoolError as e:
if str(e) == "connection pool is closed":
make_pool()
raise e
def putconn(self, *args, **kwargs):
super().putconn(*args, **kwargs)
self._semaphore.release()
try:
postgreSQL_pool = ORThreadedConnectionPool(50, 100, **PG_CONFIG)
if (postgreSQL_pool):
print("Connection pool created successfully")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
raise error
postgreSQL_pool: ORThreadedConnectionPool = None
def make_pool():
global postgreSQL_pool
if postgreSQL_pool is not None:
try:
postgreSQL_pool.closeall()
except (Exception, psycopg2.DatabaseError) as error:
print("Error while closing all connexions to PostgreSQL", error)
try:
postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20), 100, **PG_CONFIG)
if (postgreSQL_pool):
print("Connection pool created successfully")
except (Exception, psycopg2.DatabaseError) as error:
print("Error while connecting to PostgreSQL", error)
raise error
make_pool()
class PostgresClient:
connection = None
cursor = None
long_query = False
def __init__(self):
self.connection = postgreSQL_pool.getconn()
def __init__(self, long_query=False):
self.long_query = long_query
if long_query:
self.connection = psycopg2.connect(**PG_CONFIG)
else:
self.connection = postgreSQL_pool.getconn()
def __enter__(self):
if self.cursor is None:
@ -51,11 +76,18 @@ class PostgresClient:
try:
self.connection.commit()
self.cursor.close()
if self.long_query:
self.connection.close()
except Exception as error:
print("Error while committing/closing PG-connection", error)
raise error
if str(error) == "connection already closed":
print("Recreating the connexion pool")
make_pool()
else:
raise error
finally:
postgreSQL_pool.putconn(self.connection)
if not self.long_query:
postgreSQL_pool.putconn(self.connection)
def close():

View file

@ -1,2 +1,2 @@
#!/bin/bash
uvicorn app:app --host 0.0.0.0
uvicorn app:app --host 0.0.0.0 --reload

View file

@ -1,4 +1,4 @@
from typing import Union, Optional
from typing import Union
from decouple import config
from fastapi import Depends, Body
@ -10,7 +10,8 @@ from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assig
log_tool_stackdriver, reset_password, sessions_favorite_viewed, \
log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \
log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \
assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, slack, users
assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \
custom_metrics, saved_search
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import email_helper
from chalicelib.utils.TimeUTC import TimeUTC
@ -98,17 +99,16 @@ def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schem
@app.get('/{projectId}/events/search', tags=["events"])
def events_search(projectId: int, q: str, type: str = None, key: str = None, source: str = None,
context: schemas.CurrentContext = Depends(OR_context)):
def events_search(projectId: int, q: str, type: Union[schemas.FilterType, schemas.EventType] = None, key: str = None,
source: str = None, context: schemas.CurrentContext = Depends(OR_context)):
if len(q) == 0:
return {"data": []}
result = events.search_pg2(text=q, event_type=type, project_id=projectId, source=source,
key=key)
result = events.search_pg2(text=q, event_type=type, project_id=projectId, source=source, key=key)
return result
@app.post('/{projectId}/sessions/search2', tags=["sessions"])
def sessions_search2(projectId: int, data: schemas.SessionsSearchPayloadSchema = Body(...),
def sessions_search2(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = sessions.search2_pg(data, projectId, user_id=context.user_id)
return {'data': data}
@ -613,7 +613,7 @@ def errors_merge(context: schemas.CurrentContext = Depends(OR_context)):
@app.put('/{projectId}/alerts', tags=["alerts"])
def create_alert(projectId: int, data: schemas.AlertSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return alerts.create(projectId, data.dict())
return alerts.create(projectId, data)
@app.get('/{projectId}/alerts', tags=["alerts"])
@ -621,6 +621,12 @@ def get_all_alerts(projectId: int, context: schemas.CurrentContext = Depends(OR_
return {"data": alerts.get_all(projectId)}
@app.get('/{projectId}/alerts/triggers', tags=["alerts", "customMetrics"])
def get_alerts_triggers(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": alerts.get_predefined_values() \
+ custom_metrics.get_series_for_alert(project_id=projectId, user_id=context.user_id)}
@app.get('/{projectId}/alerts/{alertId}', tags=["alerts"])
def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": alerts.get(alertId)}
@ -630,7 +636,7 @@ def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = De
@app.put('/{projectId}/alerts/{alertId}', tags=["alerts"])
def update_alert(projectId: int, alertId: int, data: schemas.AlertSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return alerts.update(alertId, data.dict())
return alerts.update(alertId, data)
@app.delete('/{projectId}/alerts/{alertId}', tags=["alerts"])
@ -645,7 +651,7 @@ def add_funnel(projectId: int, data: schemas.FunnelSchema = Body(...),
return funnels.create(project_id=projectId,
user_id=context.user_id,
name=data.name,
filter=data.filter.dict(),
filter=data.filter,
is_public=data.is_public)
@ -678,32 +684,31 @@ def get_possible_issue_types(projectId: int, context: schemas.CurrentContext = D
@app.get('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"])
def get_funnel_insights(projectId: int, funnelId: int, rangeValue: str = None, startDate: int = None,
endDate: int = None, context: schemas.CurrentContext = Depends(OR_context)):
return funnels.get_top_insights(funnel_id=funnelId, project_id=projectId,
range_value=rangeValue,
start_date=startDate,
end_date=endDate)
return funnels.get_top_insights(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
range_value=rangeValue, start_date=startDate, end_date=endDate)
@app.post('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"])
@app.put('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"])
def get_funnel_insights_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelInsightsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict())
return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
data=data.dict())
@app.get('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"])
def get_funnel_issues(projectId: int, funnelId, rangeValue: str = None, startDate: int = None, endDate: int = None,
context: schemas.CurrentContext = Depends(OR_context)):
return funnels.get_issues(funnel_id=funnelId, project_id=projectId,
range_value=rangeValue,
start_date=startDate, end_date=endDate)
return funnels.get_issues(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
range_value=rangeValue, start_date=startDate, end_date=endDate)
@app.post('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"])
@app.put('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"])
def get_funnel_issues_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, project_id=projectId, data=data.dict())}
return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
data=data.dict())}
@app.get('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"])
@ -720,7 +725,7 @@ def get_funnel_sessions(projectId: int, funnelId: int, rangeValue: str = None, s
def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": funnels.get_sessions_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
data=data.dict())}
data=data)}
@app.get('/{projectId}/funnels/issues/{issueId}/sessions', tags=["funnels"])
@ -740,7 +745,7 @@ def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str,
data: schemas.FunnelSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = funnels.search_by_issue(project_id=projectId, user_id=context.user_id, issue_id=issueId,
funnel_id=funnelId, data=data.dict())
funnel_id=funnelId, data=data)
if "errors" in data:
return data
if data.get("issue") is None:
@ -752,7 +757,7 @@ def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str,
@app.get('/{projectId}/funnels/{funnelId}', tags=["funnels"])
def get_funnel(projectId: int, funnelId: int, context: schemas.CurrentContext = Depends(OR_context)):
data = funnels.get(funnel_id=funnelId, project_id=projectId)
data = funnels.get(funnel_id=funnelId, project_id=projectId, user_id=context.user_id)
if data is None:
return {"errors": ["funnel not found"]}
return {"data": data}
@ -766,7 +771,8 @@ def edit_funnel(projectId: int, funnelId: int, data: schemas.UpdateFunnelSchema
user_id=context.user_id,
name=data.name,
filter=data.filter.dict(),
is_public=data.is_public)
is_public=data.is_public,
project_id=projectId)
@app.delete('/{projectId}/funnels/{funnelId}', tags=["funnels"])
@ -838,13 +844,6 @@ def signup_handler(data: schemas.UserSignupSchema = Body(...)):
return signup.create_step1(data)
@app.get('/projects', tags=['projects'])
def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,
stack_integrations=True, version=True,
last_tracker_version=last_tracker_version)}
@app.post('/projects', tags=['projects'])
@app.put('/projects', tags=['projects'])
def create_project(data: schemas.CreateProjectSchema = Body(...),
@ -864,18 +863,6 @@ def delete_project(projectId, context: schemas.CurrentContext = Depends(OR_conte
return projects.delete(tenant_id=context.tenant_id, user_id=context.user_id, project_id=projectId)
@app.get('/client', tags=['projects'])
def get_client(context: schemas.CurrentContext = Depends(OR_context)):
r = tenants.get_by_tenant_id(context.tenant_id)
if r is not None:
r.pop("createdAt")
r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True,
stack_integrations=True, version=True)
return {
'data': r
}
@app.get('/client/new_api_key', tags=['client'])
def generate_new_tenant_token(context: schemas.CurrentContext = Depends(OR_context)):
return {
@ -953,19 +940,6 @@ def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDa
return {"errors": ["undefined action"]}
@public_app.post('/async/alerts/notifications/{step}', tags=["async", "alerts"])
@public_app.put('/async/alerts/notifications/{step}', tags=["async", "alerts"])
def send_alerts_notification_async(step: str, data: schemas.AlertNotificationSchema = Body(...)):
if data.auth != config("async_Token"):
return {"errors": ["missing auth"]}
if step == "slack":
slack.send_batch(notifications_list=data.notifications)
elif step == "email":
alerts.send_by_email_batch(notifications_list=data.notifications)
elif step == "webhook":
webhook.trigger_batch(data_list=data.notifications)
@app.get('/notifications', tags=['notifications'])
def get_notifications(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)}
@ -1087,3 +1061,84 @@ def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
return users.change_password(email=context.email, old_password=data.old_password,
new_password=data.new_password, tenant_id=context.tenant_id,
user_id=context.user_id)
@app.post('/{projectId}/custom_metrics/try', tags=["customMetrics"])
@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"])
def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.try_live(project_id=projectId, data=data)}
@app.post('/{projectId}/custom_metrics/chart', tags=["customMetrics"])
@app.put('/{projectId}/custom_metrics/chart', tags=["customMetrics"])
def get_custom_metric_chart(projectId: int, data: schemas.CustomMetricChartPayloadSchema2 = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=data.metric_id,
data=data)}
@app.post('/{projectId}/custom_metrics', tags=["customMetrics"])
@app.put('/{projectId}/custom_metrics', tags=["customMetrics"])
def add_custom_metric(projectId: int, data: schemas.CreateCustomMetricsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return custom_metrics.create(project_id=projectId, user_id=context.user_id, data=data)
@app.get('/{projectId}/custom_metrics', tags=["customMetrics"])
def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"])
def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.make_chart(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
data=data)}
@app.post('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
@app.put('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
def update_custom_metric(projectId: int, metric_id: int, data: schemas.UpdateCustomMetricsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {
"data": custom_metrics.update(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)}
@app.delete('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
def delete_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.delete(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
@app.post('/{projectId}/saved_search', tags=["savedSearch"])
@app.put('/{projectId}/saved_search', tags=["savedSearch"])
def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return saved_search.create(project_id=projectId, user_id=context.user_id, data=data)
@app.get('/{projectId}/saved_search', tags=["savedSearch"])
def get_saved_searches(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": saved_search.get_all(project_id=projectId, user_id=context.user_id, details=True)}
@app.get('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
def get_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": saved_search.get(project_id=projectId, search_id=search_id, user_id=context.user_id)}
@app.post('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
@app.put('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
def update_saved_search(projectId: int, search_id: int, data: schemas.SavedSearchSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": saved_search.update(user_id=context.user_id, search_id=search_id, data=data, project_id=projectId)}
@app.delete('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
def delete_saved_search(projectId: int, search_id: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": saved_search.delete(project_id=projectId, user_id=context.user_id, search_id=search_id)}

View file

@ -8,7 +8,7 @@ import schemas
from chalicelib.core import assist
from chalicelib.core import integrations_manager
from chalicelib.core import sessions
from chalicelib.core import tenants, users, metadata, projects, license, alerts
from chalicelib.core import tenants, users, metadata, projects, license
from chalicelib.core import webhook
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import captcha
@ -209,13 +209,25 @@ def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)):
}
@public_app.post('/alerts/notifications', tags=["alerts"])
@public_app.put('/alerts/notifications', tags=["alerts"])
def send_alerts_notifications(background_tasks: BackgroundTasks, data: schemas.AlertNotificationSchema = Body(...)):
# TODO: validate token
return {"data": alerts.process_notifications(data.notifications, background_tasks=background_tasks)}
@public_app.get('/general_stats', tags=["private"], include_in_schema=False)
def get_general_stats():
return {"data": {"sessions:": sessions.count_all()}}
@app.get('/client', tags=['projects'])
def get_client(context: schemas.CurrentContext = Depends(OR_context)):
r = tenants.get_by_tenant_id(context.tenant_id)
if r is not None:
r.pop("createdAt")
r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True,
stack_integrations=True, version=True)
return {
'data': r
}
@app.get('/projects', tags=['projects'])
def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,
stack_integrations=True, version=True,
last_tracker_version=last_tracker_version)}

View file

@ -1,7 +1,7 @@
from enum import Enum
from typing import Optional, List, Literal, Union
from typing import Optional, List, Union, Literal
from pydantic import BaseModel, Field, EmailStr, HttpUrl
from pydantic import BaseModel, Field, EmailStr, HttpUrl, root_validator
from chalicelib.utils.TimeUTC import TimeUTC
@ -88,16 +88,6 @@ class SearchErrorsSchema(BaseModel):
order: Optional[str] = Field(None)
class EmailNotificationSchema(BaseModel):
notification: str = Field(...)
destination: str = Field(...)
class AlertNotificationSchema(BaseModel):
auth: str = Field(...)
notifications: List[EmailNotificationSchema] = Field(...)
class CreateNotificationSchema(BaseModel):
token: str = Field(...)
notifications: List = Field(...)
@ -276,12 +266,18 @@ class _AlertMessageSchema(BaseModel):
value: str = Field(...)
class AlertDetectionChangeType(str, Enum):
percent = "percent"
change = "change"
class _AlertOptionSchema(BaseModel):
message: List[_AlertMessageSchema] = Field([])
currentPeriod: int = Field(...)
previousPeriod: int = Field(...)
currentPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(...)
previousPeriod: Literal[15, 30, 60, 120, 240, 1440] = Field(15)
lastNotification: Optional[int] = Field(None)
renotifyInterval: Optional[int] = Field(720)
change: Optional[AlertDetectionChangeType] = Field(None)
class AlertColumn(str, Enum):
@ -304,35 +300,133 @@ class AlertColumn(str, Enum):
performance__crashes__count = "performance.crashes.count"
errors__javascript__count = "errors.javascript.count"
errors__backend__count = "errors.backend.count"
custom = "CUSTOM"
class MathOperator(str, Enum):
_equal = "="
_less = "<"
_greater = ">"
_less_eq = "<="
_greater_eq = ">="
class _AlertQuerySchema(BaseModel):
left: AlertColumn = Field(...)
right: float = Field(...)
operator: Literal["<", ">", "<=", ">="] = Field(...)
# operator: Literal["<", ">", "<=", ">="] = Field(...)
operator: MathOperator = Field(...)
class AlertDetectionMethod(str, Enum):
threshold = "threshold"
change = "change"
class AlertSchema(BaseModel):
name: str = Field(...)
detectionMethod: str = Field(...)
detection_method: AlertDetectionMethod = Field(...)
description: Optional[str] = Field(None)
options: _AlertOptionSchema = Field(...)
query: _AlertQuerySchema = Field(...)
series_id: Optional[int] = Field(None)
@root_validator
def alert_validator(cls, values):
if values.get("query") is not None and values["query"].left == AlertColumn.custom:
assert values.get("series_id") is not None, "series_id should not be null for CUSTOM alert"
if values.get("detectionMethod") is not None \
and values["detectionMethod"] == AlertDetectionMethod.change \
and values.get("options") is not None:
assert values["options"].change is not None, \
"options.change should not be null for detection method 'change'"
return values
class Config:
alias_generator = attribute_to_camel_case
class SourcemapUploadPayloadSchema(BaseModel):
urls: List[str] = Field(..., alias="URL")
class ErrorSource(str, Enum):
js_exception = "js_exception"
bugsnag = "bugsnag"
cloudwatch = "cloudwatch"
datadog = "datadog"
newrelic = "newrelic"
rollbar = "rollbar"
sentry = "sentry"
stackdriver = "stackdriver"
sumologic = "sumologic"
class EventType(str, Enum):
click = "CLICK"
input = "INPUT"
location = "LOCATION"
custom = "CUSTOM"
request = "REQUEST"
graphql = "GRAPHQL"
state_action = "STATEACTION"
error = "ERROR"
metadata = "METADATA"
click_ios = "CLICK_IOS"
input_ios = "INPUT_IOS"
view_ios = "VIEW_IOS"
custom_ios = "CUSTOM_IOS"
request_ios = "REQUEST_IOS"
error_ios = "ERROR_IOS"
class PerformanceEventType(str, Enum):
location_dom_complete = "DOM_COMPLETE"
location_largest_contentful_paint_time = "LARGEST_CONTENTFUL_PAINT_TIME"
time_between_events = "TIME_BETWEEN_EVENTS"
location_ttfb = "TTFB"
location_avg_cpu_load = "AVG_CPU_LOAD"
location_avg_memory_usage = "AVG_MEMORY_USAGE"
fetch_failed = "FETCH_FAILED"
# fetch_duration = "FETCH_DURATION"
class FilterType(str, Enum):
user_os = "USEROS"
user_browser = "USERBROWSER"
user_device = "USERDEVICE"
user_country = "USERCOUNTRY"
user_id = "USERID"
user_anonymous_id = "USERANONYMOUSID"
referrer = "REFERRER"
rev_id = "REVID"
# IOS
user_os_ios = "USEROS_IOS"
user_device_ios = "USERDEVICE_IOS"
user_country_ios = "USERCOUNTRY_IOS"
user_id_ios = "USERID_IOS"
user_anonymous_id_ios = "USERANONYMOUSID_IOS"
rev_id_ios = "REVID_IOS"
#
duration = "DURATION"
platform = "PLATFORM"
metadata = "METADATA"
issue = "ISSUE"
events_count = "EVENTS_COUNT"
utm_source = "UTM_SOURCE"
utm_medium = "UTM_MEDIUM"
utm_campaign = "UTM_CAMPAIGN"
class SearchEventOperator(str, Enum):
_is = "is"
_is_any = "isAny"
_on = "on"
_on_any = "onAny"
_isnot = "isNot"
_noton = "notOn"
_is_not = "isNot"
_not_on = "notOn"
_contains = "contains"
_notcontains = "notContains"
_not_contains = "notContains"
_starts_with = "startsWith"
_ends_with = "endsWith"
@ -340,34 +434,146 @@ class SearchEventOperator(str, Enum):
class PlatformType(str, Enum):
mobile = "mobile"
desktop = "desktop"
tablet = "tablet"
class _SessionSearchEventSchema(BaseModel):
custom: Optional[str] = Field(None)
key: Optional[str] = Field(None)
value: Union[Optional[str], Optional[List[str]]] = Field(...)
type: str = Field(...)
class SearchEventOrder(str, Enum):
_then = "then"
_or = "or"
_and = "and"
class IssueType(str, Enum):
click_rage = 'click_rage'
dead_click = 'dead_click'
excessive_scrolling = 'excessive_scrolling'
bad_request = 'bad_request'
missing_resource = 'missing_resource'
memory = 'memory'
cpu = 'cpu'
slow_resource = 'slow_resource'
slow_page_load = 'slow_page_load'
crash = 'crash'
custom = 'custom'
js_exception = 'js_exception'
class __MixedSearchFilter(BaseModel):
is_event: bool = Field(...)
class Config:
alias_generator = attribute_to_camel_case
class _SessionSearchEventRaw(__MixedSearchFilter):
is_event: bool = Field(True, const=True)
value: Union[str, List[str]] = Field(...)
type: Union[EventType, PerformanceEventType] = Field(...)
operator: SearchEventOperator = Field(...)
source: Optional[str] = Field(...)
source: Optional[Union[ErrorSource,List[Union[int, str]]]] = Field(default=ErrorSource.js_exception)
sourceOperator: Optional[MathOperator] = Field(None)
@root_validator
def event_validator(cls, values):
if isinstance(values.get("type"), PerformanceEventType):
if values.get("type") == PerformanceEventType.fetch_failed:
return values
assert values.get("source") is not None, "source should not be null for PerformanceEventType"
assert values.get("sourceOperator") is not None \
, "sourceOperator should not be null for PerformanceEventType"
if values["type"] == PerformanceEventType.time_between_events:
assert len(values.get("value", [])) == 2, \
f"must provide 2 Events as value for {PerformanceEventType.time_between_events}"
assert isinstance(values["value"][0], _SessionSearchEventRaw) \
and isinstance(values["value"][1], _SessionSearchEventRaw) \
, f"event should be of type _SessionSearchEventRaw for {PerformanceEventType.time_between_events}"
else:
for c in values["source"]:
assert isinstance(c, int), f"source value should be of type int for {values.get('type')}"
return values
class _SessionSearchFilterSchema(_SessionSearchEventSchema):
value: List[str] = Field(...)
class _SessionSearchEventSchema(_SessionSearchEventRaw):
value: Union[List[_SessionSearchEventRaw], str, List[str]] = Field(...)
class _SessionSearchFilterSchema(__MixedSearchFilter):
is_event: bool = Field(False, const=False)
value: Union[Optional[Union[IssueType, PlatformType, int, str]],
Optional[List[Union[IssueType, PlatformType, int, str]]]] = Field(...)
type: FilterType = Field(...)
operator: Union[SearchEventOperator, MathOperator] = Field(...)
source: Optional[Union[ErrorSource, str]] = Field(default=ErrorSource.js_exception)
@root_validator
def filter_validator(cls, values):
if values.get("type") == FilterType.metadata:
assert values.get("source") is not None and len(values["source"]) > 0, \
"must specify a valid 'source' for metadata filter"
elif values.get("type") == FilterType.issue:
for v in values.get("value"):
assert isinstance(v, IssueType), f"value should be of type IssueType for {values.get('type')} filter"
elif values.get("type") == FilterType.platform:
for v in values.get("value"):
assert isinstance(v, PlatformType), \
f"value should be of type PlatformType for {values.get('type')} filter"
elif values.get("type") == FilterType.events_count:
assert isinstance(values.get("operator"), MathOperator), \
f"operator should be of type MathOperator for {values.get('type')} filter"
for v in values.get("value"):
assert isinstance(v, int), f"value should be of type int for {values.get('type')} filter"
else:
assert isinstance(values.get("operator"), SearchEventOperator), \
f"operator should be of type SearchEventOperator for {values.get('type')} filter"
return values
class SessionsSearchPayloadSchema(BaseModel):
events: List[_SessionSearchEventSchema] = Field([])
filters: List[_SessionSearchFilterSchema] = Field([])
# custom:dict=Field(...)
# rangeValue:str=Field(...)
startDate: int = Field(None)
endDate: int = Field(None)
sort: str = Field(...)
sort: str = Field(default="startTs")
order: str = Field(default="DESC")
platform: Optional[PlatformType] = Field(None)
events_order: Optional[SearchEventOrder] = Field(default=SearchEventOrder._then)
class Config:
alias_generator = attribute_to_camel_case
class FunnelSearchPayloadSchema(SessionsSearchPayloadSchema):
class FlatSessionsSearchPayloadSchema(SessionsSearchPayloadSchema):
events: Optional[List[_SessionSearchEventSchema]] = Field([])
filters: List[Union[_SessionSearchFilterSchema, _SessionSearchEventSchema]] = Field([])
@root_validator(pre=True)
def flat_to_original(cls, values):
# in case the old search body was passed
if len(values.get("events", [])) > 0:
for v in values["events"]:
v["isEvent"] = True
for v in values.get("filters", []):
v["isEvent"] = False
else:
n_filters = []
n_events = []
for v in values.get("filters", []):
if v.get("isEvent"):
n_events.append(v)
else:
n_filters.append(v)
values["events"] = n_events
values["filters"] = n_filters
return values
class SessionsSearchCountSchema(FlatSessionsSearchPayloadSchema):
# class SessionsSearchCountSchema(SessionsSearchPayloadSchema):
sort: Optional[str] = Field(default=None)
order: Optional[str] = Field(default=None)
class FunnelSearchPayloadSchema(FlatSessionsSearchPayloadSchema):
# class FunnelSearchPayloadSchema(SessionsSearchPayloadSchema):
range_value: Optional[str] = Field(None)
sort: Optional[str] = Field(None)
order: Optional[str] = Field(None)
@ -391,7 +597,8 @@ class UpdateFunnelSchema(FunnelSchema):
is_public: Optional[bool] = Field(None)
class FunnelInsightsPayloadSchema(SessionsSearchPayloadSchema):
class FunnelInsightsPayloadSchema(FlatSessionsSearchPayloadSchema):
# class FunnelInsightsPayloadSchema(SessionsSearchPayloadSchema):
sort: Optional[str] = Field(None)
order: Optional[str] = Field(None)
@ -419,3 +626,64 @@ class SentrySchema(BaseModel):
class MobileSignPayloadSchema(BaseModel):
keys: List[str] = Field(...)
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema):
# class CustomMetricSeriesFilterSchema(SessionsSearchPayloadSchema):
startDate: Optional[int] = Field(None)
endDate: Optional[int] = Field(None)
sort: Optional[str] = Field(None)
order: Optional[str] = Field(None)
class CustomMetricCreateSeriesSchema(BaseModel):
name: Optional[str] = Field(None)
index: Optional[int] = Field(None)
filter: Optional[CustomMetricSeriesFilterSchema] = Field([])
class CreateCustomMetricsSchema(BaseModel):
name: str = Field(...)
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
is_public: Optional[bool] = Field(False)
class Config:
alias_generator = attribute_to_camel_case
class MetricViewType(str, Enum):
line_chart = "lineChart"
progress = "progress"
class CustomMetricChartPayloadSchema(BaseModel):
startDate: int = Field(TimeUTC.now(-7))
endDate: int = Field(TimeUTC.now())
density: int = Field(7)
viewType: MetricViewType = Field(MetricViewType.line_chart)
class Config:
alias_generator = attribute_to_camel_case
class CustomMetricChartPayloadSchema2(CustomMetricChartPayloadSchema):
metric_id: int = Field(...)
class TryCustomMetricsSchema(CreateCustomMetricsSchema, CustomMetricChartPayloadSchema):
name: Optional[str] = Field(None)
class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema):
series_id: Optional[int] = Field(None)
class Config:
alias_generator = attribute_to_camel_case
class UpdateCustomMetricsSchema(CreateCustomMetricsSchema):
series: List[CustomMetricUpdateSeriesSchema] = Field(..., min_items=1)
class SavedSearchSchema(FunnelSchema):
pass

View file

@ -38,7 +38,7 @@ func (c *PGCache) InsertUserID(sessionID uint64, userID *IOSUserID) error {
if err != nil {
return err
}
session.UserID = &userID.Value
session.UserID = userID.Value
return nil
}

View file

@ -30,6 +30,7 @@ func (c *PGCache) InsertWebSessionStart(sessionID uint64, s *SessionStart) error
UserDeviceType: s.UserDeviceType,
UserDeviceMemorySize: s.UserDeviceMemorySize,
UserDeviceHeapSize: s.UserDeviceHeapSize,
UserID: s.UserID,
}
if err := c.Conn.InsertSessionStart(sessionID, c.sessions[ sessionID ]); err != nil {
c.sessions[ sessionID ] = nil

View file

@ -47,7 +47,8 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error {
rev_id,
tracker_version, issue_score,
platform,
user_agent, user_browser, user_browser_version, user_device_memory_size, user_device_heap_size
user_agent, user_browser, user_browser_version, user_device_memory_size, user_device_heap_size,
user_id
) VALUES (
$1, $2, $3,
$4, $5, $6, $7,
@ -55,7 +56,8 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error {
NULLIF($10, ''),
$11, $12,
$13,
NULLIF($14, ''), NULLIF($15, ''), NULLIF($16, ''), NULLIF($17, 0), NULLIF($18, 0::bigint)
NULLIF($14, ''), NULLIF($15, ''), NULLIF($16, ''), NULLIF($17, 0), NULLIF($18, 0::bigint),
NULLIF($19, '')
)`,
sessionID, s.ProjectID, s.Timestamp,
s.UserUUID, s.UserDevice, s.UserDeviceType, s.UserCountry,
@ -64,6 +66,7 @@ func (conn *Conn) InsertSessionStart(sessionID uint64, s *types.Session) error {
s.TrackerVersion, s.Timestamp/1000,
s.Platform,
s.UserAgent, s.UserBrowser, s.UserBrowserVersion, s.UserDeviceMemorySize, s.UserDeviceHeapSize,
s.UserID,
); err != nil {
return err;
}

View file

@ -16,7 +16,7 @@ type Session struct {
PagesCount int
EventsCount int
ErrorsCount int
UserID *string
UserID string // pointer??
UserAnonymousID *string
Metadata1 *string
Metadata2 *string

View file

@ -63,9 +63,10 @@ UserDeviceType string
UserDeviceMemorySize uint64
UserDeviceHeapSize uint64
UserCountry string
UserID string
}
func (msg *SessionStart) Encode() []byte{
buf := make([]byte, 151 + len(msg.TrackerVersion)+ len(msg.RevID)+ len(msg.UserUUID)+ len(msg.UserAgent)+ len(msg.UserOS)+ len(msg.UserOSVersion)+ len(msg.UserBrowser)+ len(msg.UserBrowserVersion)+ len(msg.UserDevice)+ len(msg.UserDeviceType)+ len(msg.UserCountry))
buf := make([]byte, 161 + len(msg.TrackerVersion)+ len(msg.RevID)+ len(msg.UserUUID)+ len(msg.UserAgent)+ len(msg.UserOS)+ len(msg.UserOSVersion)+ len(msg.UserBrowser)+ len(msg.UserBrowserVersion)+ len(msg.UserDevice)+ len(msg.UserDeviceType)+ len(msg.UserCountry)+ len(msg.UserID))
buf[0] = 1
p := 1
p = WriteUint(msg.Timestamp, buf, p)
@ -83,6 +84,7 @@ p = WriteString(msg.UserDeviceType, buf, p)
p = WriteUint(msg.UserDeviceMemorySize, buf, p)
p = WriteUint(msg.UserDeviceHeapSize, buf, p)
p = WriteString(msg.UserCountry, buf, p)
p = WriteString(msg.UserID, buf, p)
return buf[:p]
}

View file

@ -42,6 +42,7 @@ if msg.UserDeviceType, err = ReadString(reader); err != nil { return nil, err }
if msg.UserDeviceMemorySize, err = ReadUint(reader); err != nil { return nil, err }
if msg.UserDeviceHeapSize, err = ReadUint(reader); err != nil { return nil, err }
if msg.UserCountry, err = ReadString(reader); err != nil { return nil, err }
if msg.UserID, err = ReadString(reader); err != nil { return nil, err }
return msg, nil
case 2:

View file

@ -1,86 +0,0 @@
package main
import (
"database/sql"
"log"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/env"
_ "github.com/lib/pq"
)
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
POSTGRES_STRING := env.String("POSTGRES_STRING")
NOTIFICATIONS_STRING := env.String("ALERT_NOTIFICATION_STRING")
log.Printf("Notifications: %s \nPG: %s\n", NOTIFICATIONS_STRING, POSTGRES_STRING)
pg := postgres.NewConn(POSTGRES_STRING)
defer pg.Close()
pgs, err := sql.Open("postgres", POSTGRES_STRING+ "?sslmode=disable")
if err != nil {
log.Fatal(err)
}
defer pgs.Close()
manager := NewManager(NOTIFICATIONS_STRING, POSTGRES_STRING, pgs, pg)
if err := pg.IterateAlerts(func(a *postgres.Alert, err error) {
if err != nil {
log.Printf("Postgres error: %v\n", err)
return
}
log.Printf("Alert initialization: %+v\n", *a)
//log.Printf("CreatedAt: %s\n", *a.CreatedAt)
err = manager.Update(a)
if err != nil {
log.Printf("Alert parse error: %v | Alert: %+v\n", err, *a)
return
}
}); err != nil {
log.Fatalf("Postgres error: %v\n", err)
}
listener, err := postgres.NewAlertsListener(POSTGRES_STRING)
if err != nil {
log.Fatalf("Postgres listener error: %v\n", err)
}
defer listener.Close()
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tickAlert := time.Tick(1 * time.Minute)
log.Printf("Alert service started\n")
manager.RequestAll()
//return
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
listener.Close()
pg.Close()
os.Exit(0)
case <-tickAlert:
log.Printf("Requesting all...%d alerts\n", manager.Length())
manager.RequestAll()
case iPointer := <-listener.Alerts:
log.Printf("Alert update: %+v\n", *iPointer)
//log.Printf("CreatedAt: %s\n", *iPointer.CreatedAt)
//log.Printf("Notification received for AlertId: %d\n", iPointer.AlertID)
err := manager.Update(iPointer)
if err != nil {
log.Printf("Alert parse error: %+v | Alert: %v\n", err, *iPointer)
}
case err := <-listener.Errors:
log.Printf("listener error: %v\n", err)
if err.Error() == "conn closed" {
panic("Listener conn lost")
}
}
}
}

View file

@ -1,171 +0,0 @@
package main
import (
"database/sql"
"fmt"
"log"
"sync"
"sync/atomic"
"time"
"openreplay/backend/pkg/db/postgres"
)
const PGParallelLimit = 2
var pgCount int64
type manager struct {
postgresString string
notificationsUrl string
alertsCache map[uint32]*postgres.Alert
cacheMutex sync.Mutex
pgParallel chan bool
pgs *sql.DB
pg *postgres.Conn
pgMutex sync.Mutex
notifications map[uint32]*postgres.TenantNotification
notificationsGo *sync.WaitGroup
notificationsMutex sync.Mutex
}
func NewManager(notificationsUrl string, postgresString string, pgs *sql.DB, pg *postgres.Conn) *manager {
return &manager{
postgresString: postgresString,
notificationsUrl: notificationsUrl,
alertsCache: make(map[uint32]*postgres.Alert),
cacheMutex: sync.Mutex{},
pgParallel: make(chan bool, PGParallelLimit),
pgs: pgs,
pg: pg,
pgMutex: sync.Mutex{},
notifications: make(map[uint32]*postgres.TenantNotification),
notificationsGo: &sync.WaitGroup{},
notificationsMutex: sync.Mutex{},
}
}
func (m *manager) Length() int {
return len(m.alertsCache)
}
func (m *manager) Update(a *postgres.Alert) error {
m.cacheMutex.Lock()
defer m.cacheMutex.Unlock()
_, exists := m.alertsCache[a.AlertID]
if exists && a.DeletedAt != nil {
log.Println("deleting alert from memory")
delete(m.alertsCache, a.AlertID)
return nil
} else {
m.alertsCache[a.AlertID] = a
}
return nil
}
func (m *manager) processAlert(a *postgres.Alert) {
defer func() {
defer m.notificationsGo.Done()
<-m.pgParallel
}()
if !a.CanCheck() {
log.Printf("cannot check %s", a.Name)
return
}
//log.Printf("checking %+v", a)
log.Printf("quering %s", a.Name)
//--- For stats:
atomic.AddInt64(&pgCount, 1)
q, err := a.Build()
if err != nil {
log.Println(err)
return
}
rows, err := q.RunWith(m.pgs).Query()
if err != nil {
log.Println(err)
return
}
defer rows.Close()
for rows.Next() {
var (
value sql.NullFloat64
valid bool
)
if err := rows.Scan(&value, &valid); err != nil {
log.Println(err)
continue
}
if valid && value.Valid {
log.Printf("%s: valid", a.Name)
m.notificationsMutex.Lock()
m.notifications[a.AlertID] = &postgres.TenantNotification{
TenantId: a.TenantId,
Title: a.Name,
Description: fmt.Sprintf("has been triggered, %s = %.0f (%s %.0f).", a.Query.Left, value.Float64, a.Query.Operator, a.Query.Right),
ButtonText: "Check metrics for more details",
ButtonUrl: fmt.Sprintf("/%d/metrics", a.ProjectID),
ImageUrl: nil,
Options: map[string]interface{}{"source": "ALERT", "sourceId": a.AlertID, "sourceMeta": a.DetectionMethod, "message": a.Options.Message, "projectId": a.ProjectID, "data": map[string]interface{}{"title": a.Name, "limitValue": a.Query.Right, "actualValue": value.Float64, "operator": a.Query.Operator, "trigger": a.Query.Left, "alertId": a.AlertID, "detectionMethod": a.DetectionMethod, "currentPeriod": a.Options.CurrentPeriod, "previousPeriod": a.Options.PreviousPeriod, "createdAt": time.Now().Unix() * 1000}},
}
m.notificationsMutex.Unlock()
}
}
}
func (m *manager) RequestAll() {
now := time.Now().Unix()
m.cacheMutex.Lock()
for _, a := range m.alertsCache {
m.pgParallel <- true
m.notificationsGo.Add(1)
go m.processAlert(a)
//m.processAlert(a)
}
//log.Println("releasing cache")
m.cacheMutex.Unlock()
//log.Println("waiting for all alerts to finish")
m.notificationsGo.Wait()
log.Printf("done %d PG queries in: %ds", pgCount, time.Now().Unix()-now)
pgCount = 0
//log.Printf("Processing %d Notifications", len(m.notifications))
m.notificationsMutex.Lock()
go m.ProcessNotifications(m.notifications)
m.notificationsMutex.Unlock()
m.notifications = make(map[uint32]*postgres.TenantNotification)
//log.Printf("Notifications purged: %d", len(m.notifications))
}
func (m *manager) ProcessNotifications(allNotifications map[uint32]*postgres.TenantNotification) {
if len(allNotifications) == 0 {
log.Println("No notifications to process")
return
}
log.Printf("sending %d notifications", len(allNotifications))
allIds := make([]uint32, 0, len(allNotifications))
toSend := postgres.Notifications{
Notifications: []*postgres.TenantNotification{},
}
for k, n := range allNotifications {
//log.Printf("notification for %d", k)
allIds = append(allIds, k)
toSend.Notifications = append(toSend.Notifications, n)
}
toSend.Send(m.notificationsUrl)
if err := m.pg.SaveLastNotification(allIds); err != nil {
log.Printf("Error saving LastNotification time: %v", err)
if err.Error() == "conn closed" {
m.pg = postgres.NewConn(m.postgresString)
//if err != nil {
// panic(fmt.Sprintf("Postgres renew notifications connection error: %v\n", err))
//}
if err := m.pg.SaveLastNotification(allIds); err != nil {
panic(fmt.Sprintf("Error saving LastNotification time, suicide: %v", err))
}
}
}
}

View file

@ -27,6 +27,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"`
ProjectKey *string `json:"projectKey"`
Reset bool `json:"reset"`
UserID string `json:"userID"`
}
type response struct {
Timestamp int64 `json:"timestamp"`
@ -101,6 +102,7 @@ func startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
UserCountry: country,
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.JsHeapSizeLimit,
UserID: req.UserID,
}))
}

View file

@ -1,36 +1,4 @@
The OpenReplay Enterprise license (the “Enterprise License”)
Copyright (c) 2021 Asayer SAS.
Copyright (c) 2022 Asayer SAS.
With regard to the OpenReplay Software:
This software and associated documentation files (the "Software") may only be
used in production, if you (and any entity that you represent) have agreed to,
and are in compliance with, the OpenReplay Subscription Terms of Service, available
at https://openreplay.com/terms.html (the “Enterprise Edition”), or other
agreement governing the use of the Software, as agreed by you and OpenReplay,
and otherwise have a valid OpenReplay Enterprise license for the
correct usage. Subject to the foregoing sentence, you are free to
modify this Software and publish patches to the Software. You agree that OpenReplay
and/or its licensors (as applicable) retain all right, title and interest in and
to all such modifications and/or patches, and all such modifications and/or
patches may only be used, copied, modified, displayed, distributed, or otherwise
exploited with a valid OpenReplay Enterprise license for the correct
number of user seats and profiles. Notwithstanding the foregoing, you may copy and modify
the Software for development and testing purposes, without requiring a
subscription. You agree that OpenReplay and/or its licensors (as applicable) retain
all right, title and interest in and to all such modifications. You are not
granted any other rights beyond what is expressly stated herein. Subject to the
foregoing, it is forbidden to copy, merge, publish, distribute, sublicense,
and/or sell the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
For all third party components incorporated into the OpenReplay Software, those
components are licensed under the original license provided by the owner of the
applicable component.
To license the Enterprise Edition of OpenReplay, and take advantage of its additional features, functionality and support, you must agree to the terms of the OpenReplay Enterprise License Agreement. Please contact OpenReplay at [sales@openreplay.com](mailto:sales@openreplay.com).

View file

@ -1,81 +0,0 @@
{
"version": "2.0",
"app_name": "parrot",
"environment_variables": {
},
"stages": {
"default-ee": {
"api_gateway_stage": "default-ee",
"manage_iam_role": false,
"iam_role_arn": "",
"autogen_policy": true,
"environment_variables": {
"isFOS": "false",
"isEE": "true",
"stage": "default-ee",
"jwt_issuer": "openreplay-default-ee",
"sentryURL": "",
"pg_host": "127.0.0.1",
"pg_port": "9202",
"pg_dbname": "app",
"pg_user": "",
"pg_password": "",
"ch_host": "",
"ch_port": "",
"alert_ntf": "http://127.0.0.1:8000/async/alerts/notifications/%s",
"email_signup": "http://127.0.0.1:8000/async/email_signup/%s",
"email_funnel": "http://127.0.0.1:8000/async/funnel/%s",
"email_plans": "http://127.0.0.1:8000/async/plans/%s",
"email_basic": "http://127.0.0.1:8000/async/basic/%s",
"assign_link": "http://127.0.0.1:8000/async/email_assignment",
"captcha_server": "",
"captcha_key": "",
"sessions_bucket": "mobs",
"sessions_region": "us-east-1",
"put_S3_TTL": "20",
"sourcemaps_reader": "http://utilities-openreplay.app.svc.cluster.local:9000/sourcemaps",
"sourcemaps_bucket": "sourcemaps",
"peers": "http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers",
"js_cache_bucket": "sessions-assets",
"async_Token": "",
"EMAIL_HOST": "",
"EMAIL_PORT": "587",
"EMAIL_USER": "",
"EMAIL_PASSWORD": "",
"EMAIL_USE_TLS": "true",
"EMAIL_USE_SSL": "false",
"EMAIL_SSL_KEY": "",
"EMAIL_SSL_CERT": "",
"EMAIL_FROM": "OpenReplay<do-not-reply@openreplay.com>",
"SITE_URL": "",
"announcement_url": "",
"jwt_secret": "SET A RANDOM STRING HERE",
"jwt_algorithm": "HS512",
"jwt_exp_delta_seconds": "2592000",
"S3_HOST": "",
"S3_KEY": "",
"S3_SECRET": "",
"LICENSE_KEY": "",
"SAML2_MD_URL": "",
"idp_entityId": "",
"idp_sso_url": "",
"idp_x509cert": "",
"idp_sls_url": "",
"idp_name": "",
"sso_exp_delta_seconds": "172800",
"sso_landing": "/login?jwt=%s",
"invitation_link": "/api/users/invitation?token=%s",
"change_password_link": "/reset-password?invitation=%s&&pass=%s",
"iosBucket": "openreplay-ios-images",
"version_number": "1.3.6",
"assist_secret": ""
},
"lambda_timeout": 150,
"lambda_memory_size": 400,
"subnet_ids": [
],
"security_group_ids": [
]
}
}
}

View file

@ -38,11 +38,13 @@ jwt_exp_delta_seconds=2592000
jwt_issuer=openreplay-default-ee
jwt_secret="SET A RANDOM STRING HERE"
peers=http://utilities-openreplay.app.svc.cluster.local:9000/assist/%s/peers
pg_dbname=app
pg_host=127.0.0.1
pg_password=
pg_port=9202
pg_user=
pg_dbname=postgres
pg_host=postgresql.db.svc.cluster.local
pg_password=asayerPostgres
pg_port=5432
pg_user=postgres
pg_timeout=30
pg_minconn=45
put_S3_TTL=20
sentryURL=
sessions_bucket=mobs

9
ee/api/.gitignore vendored
View file

@ -178,6 +178,7 @@ README/*
Pipfile
/chalicelib/core/alerts.py
/chalicelib/core/alerts_processor.py
/chalicelib/core/announcements.py
/chalicelib/blueprints/bp_app_api.py
/chalicelib/blueprints/bp_core.py
@ -186,6 +187,7 @@ Pipfile
/chalicelib/core/errors_favorite_viewed.py
/chalicelib/core/events.py
/chalicelib/core/events_ios.py
/chalicelib/core/funnels.py
/chalicelib/core/integration_base.py
/chalicelib/core/integration_base_issue.py
/chalicelib/core/integration_github.py
@ -251,9 +253,14 @@ Pipfile
/db_changes.sql
/Dockerfile.bundle
/entrypoint.bundle.sh
/entrypoint.sh
#/entrypoint.sh
/chalicelib/core/heatmaps.py
/routers/subs/insights.py
/schemas.py
/chalicelib/blueprints/app/v1_api.py
/routers/app/v1_api.py
/chalicelib/core/custom_metrics.py
/chalicelib/core/performance_event.py
/chalicelib/core/saved_search.py
/app_alerts.py
/build_alerts.sh

19
ee/api/Dockerfile.alerts Normal file
View file

@ -0,0 +1,19 @@
FROM python:3.9.7-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apt-get update && apt-get install -y pkg-config libxmlsec1-dev gcc && rm -rf /var/lib/apt/lists/*
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env && mv app_alerts.py app.py
ENV pg_minconn 2
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh

View file

@ -1,6 +1,8 @@
import logging
import queue
from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI, Request
from fastapi.middleware.cors import CORSMiddleware
from starlette import status
@ -75,7 +77,10 @@ for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
app.schedule.add_job(id=job["func"].__name__, **job)
from chalicelib.core import traces
app.schedule.add_job(id="trace_worker",**traces.cron_jobs[0])
app.schedule.add_job(id="trace_worker", **traces.cron_jobs[0])
for job in app.schedule.get_jobs():
print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))

View file

@ -16,10 +16,12 @@ class ProjectAuthorizer:
return
current_user: schemas.CurrentContext = await OR_context(request)
project_identifier = request.path_params[self.project_identifier]
user_id = current_user.user_id if request.state.authorizer_identity == "jwt" else None
if (self.project_identifier == "projectId" \
and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id)) \
and not projects.is_authorized(project_id=project_identifier, tenant_id=current_user.tenant_id,
user_id=user_id)) \
or (self.project_identifier.lower() == "projectKey" \
and not projects.is_authorized(project_id=projects.get_internal_project_id(project_identifier),
tenant_id=current_user.tenant_id)):
tenant_id=current_user.tenant_id, user_id=user_id)):
print("unauthorized project")
raise HTTPException(status_code=status.HTTP_401_UNAUTHORIZED, detail="unauthorized project.")

View file

@ -0,0 +1,27 @@
from chalicelib.utils import pg_client, helper
def get_all_alerts():
with pg_client.PostgresClient(long_query=True) as cur:
query = """SELECT tenant_id,
alert_id,
project_id,
detection_method,
query,
options,
(EXTRACT(EPOCH FROM alerts.created_at) * 1000)::BIGINT AS created_at,
alerts.name,
alerts.series_id,
filter
FROM public.alerts
LEFT JOIN metric_series USING (series_id)
INNER JOIN projects USING (project_id)
WHERE alerts.deleted_at ISNULL
AND alerts.active
AND projects.active
AND projects.deleted_at ISNULL
AND (alerts.series_id ISNULL OR metric_series.deleted_at ISNULL)
ORDER BY alerts.created_at;"""
cur.execute(query=query)
all_alerts = helper.list_to_camel_case(cur.fetchall())
return all_alerts

View file

@ -1,275 +0,0 @@
import chalicelib.utils.helper
from chalicelib.core import events, significance, sessions
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils import helper, pg_client
from chalicelib.utils import dev
import json
REMOVE_KEYS = ["key", "_key", "startDate", "endDate"]
ALLOW_UPDATE_FOR = ["name", "filter"]
def filter_stages(stages):
ALLOW_TYPES = [events.event_type.CLICK.ui_type, events.event_type.INPUT.ui_type,
events.event_type.LOCATION.ui_type, events.event_type.CUSTOM.ui_type,
events.event_type.CLICK_IOS.ui_type, events.event_type.INPUT_IOS.ui_type,
events.event_type.VIEW_IOS.ui_type, events.event_type.CUSTOM_IOS.ui_type, ]
return [s for s in stages if s["type"] in ALLOW_TYPES and s.get("value") is not None]
def create(project_id, user_id, name, filter, is_public):
helper.delete_keys_from_dict(filter, REMOVE_KEYS)
filter["events"] = filter_stages(stages=filter.get("events", []))
with pg_client.PostgresClient() as cur:
query = cur.mogrify("""\
INSERT INTO public.funnels (project_id, user_id, name, filter,is_public)
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(filter)s::jsonb,%(is_public)s)
RETURNING *;""",
{"user_id": user_id, "project_id": project_id, "name": name, "filter": json.dumps(filter),
"is_public": is_public})
cur.execute(
query
)
r = cur.fetchone()
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r = helper.dict_to_camel_case(r)
r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"])
return {"data": r}
def update(funnel_id, user_id, name=None, filter=None, is_public=None):
s_query = []
if filter is not None:
helper.delete_keys_from_dict(filter, REMOVE_KEYS)
s_query.append("filter = %(filter)s::jsonb")
if name is not None and len(name) > 0:
s_query.append("name = %(name)s")
if is_public is not None:
s_query.append("is_public = %(is_public)s")
if len(s_query) == 0:
return {"errors": ["Nothing to update"]}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""\
UPDATE public.funnels
SET {" , ".join(s_query)}
WHERE funnel_id=%(funnel_id)s
RETURNING *;""",
{"user_id": user_id, "funnel_id": funnel_id, "name": name,
"filter": json.dumps(filter) if filter is not None else None, "is_public": is_public})
# print("--------------------")
# print(query)
# print("--------------------")
cur.execute(
query
)
r = cur.fetchone()
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r = helper.dict_to_camel_case(r)
r["filter"]["startDate"], r["filter"]["endDate"] = TimeUTC.get_start_end_from_range(r["filter"]["rangeValue"])
return {"data": r}
def get_by_user(project_id, user_id, range_value=None, start_date=None, end_date=None, details=False):
with pg_client.PostgresClient() as cur:
team_query = """INNER JOIN
(
SELECT collaborators.user_id
FROM public.users AS creator
INNER JOIN public.users AS collaborators USING (tenant_id)
WHERE creator.user_id=%(user_id)s
) AS team USING (user_id)"""
cur.execute(
cur.mogrify(
f"""\
SELECT DISTINCT ON (funnels.funnel_id) funnel_id,project_id, user_id, name, created_at, deleted_at, is_public
{",filter" if details else ""}
FROM public.funnels {team_query}
WHERE project_id = %(project_id)s
AND funnels.deleted_at IS NULL
AND (funnels.user_id = %(user_id)s OR funnels.is_public);""",
{"project_id": project_id, "user_id": user_id}
)
)
rows = cur.fetchall()
rows = helper.list_to_camel_case(rows)
for row in rows:
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
if details:
row["filter"]["events"] = filter_stages(row["filter"]["events"])
get_start_end_time(filter_d=row["filter"], range_value=range_value, start_date=start_date,
end_date=end_date)
counts = sessions.search2_pg(data=row["filter"], project_id=project_id, user_id=None, count_only=True)
row["sessionsCount"] = counts["countSessions"]
row["usersCount"] = counts["countUsers"]
overview = significance.get_overview(filter_d=row["filter"], project_id=project_id)
row["stages"] = overview["stages"]
row.pop("filter")
row["stagesCount"] = len(row["stages"])
# TODO: ask david to count it alone
row["criticalIssuesCount"] = overview["criticalIssuesCount"]
row["missedConversions"] = 0 if len(row["stages"]) < 2 \
else row["stages"][0]["sessionsCount"] - row["stages"][-1]["sessionsCount"]
return rows
def get_possible_issue_types(project_id):
return [{"type": t, "title": chalicelib.utils.helper.get_issue_title(t)} for t in
['click_rage', 'dead_click', 'excessive_scrolling',
'bad_request', 'missing_resource', 'memory', 'cpu',
'slow_resource', 'slow_page_load', 'crash', 'custom_event_error',
'js_error']]
def get_start_end_time(filter_d, range_value, start_date, end_date):
if start_date is not None and end_date is not None:
filter_d["startDate"], filter_d["endDate"] = start_date, end_date
elif range_value is not None and len(range_value) > 0:
filter_d["rangeValue"] = range_value
filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(range_value)
else:
filter_d["startDate"], filter_d["endDate"] = TimeUTC.get_start_end_from_range(filter_d["rangeValue"])
def delete(project_id, funnel_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
UPDATE public.funnels
SET deleted_at = timezone('utc'::text, now())
WHERE project_id = %(project_id)s
AND funnel_id = %(funnel_id)s;""",
{"funnel_id": funnel_id, "project_id": project_id, "user_id": user_id})
)
return {"data": {"state": "success"}}
def get_sessions(project_id, funnel_id, user_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
return sessions.search2_pg(data=f["filter"], project_id=project_id, user_id=user_id)
def get_sessions_on_the_fly(funnel_id, project_id, user_id, data):
data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
start_date=data.get('startDate', None),
end_date=data.get('endDate', None))
data = f["filter"]
return sessions.search2_pg(data=data, project_id=project_id, user_id=user_id)
def get_top_insights(project_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=f["filter"], project_id=project_id)
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
return {"data": {"stages": helper.list_to_camel_case(insights),
"totalDropDueToIssues": total_drop_due_to_issues}}
def get_top_insights_on_the_fly(funnel_id, project_id, data):
data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
start_date=data.get('startDate', None),
end_date=data.get('endDate', None))
data = f["filter"]
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id)
if len(insights) > 0:
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
return {"data": {"stages": helper.list_to_camel_case(insights),
"totalDropDueToIssues": total_drop_due_to_issues}}
def get_issues(project_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=start_date, end_date=end_date)
return {"data": {
"issues": helper.dict_to_camel_case(significance.get_issues_list(filter_d=f["filter"], project_id=project_id))
}}
@dev.timed
def get_issues_on_the_fly(funnel_id, project_id, data):
first_stage = data.get("firstStage")
last_stage = data.get("lastStage")
data["events"] = filter_stages(data.get("events", []))
if len(data["events"]) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=data.get("rangeValue", None),
start_date=data.get('startDate', None),
end_date=data.get('endDate', None))
data = f["filter"]
return {
"issues": helper.dict_to_camel_case(
significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=first_stage,
last_stage=last_stage))}
def get(funnel_id, project_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""\
SELECT
*
FROM public.funnels
WHERE project_id = %(project_id)s
AND deleted_at IS NULL
AND funnel_id = %(funnel_id)s;""",
{"funnel_id": funnel_id, "project_id": project_id}
)
)
f = helper.dict_to_camel_case(cur.fetchone())
if f is None:
return None
f["createdAt"] = TimeUTC.datetime_to_timestamp(f["createdAt"])
f["filter"]["events"] = filter_stages(stages=f["filter"]["events"])
return f
@dev.timed
def search_by_issue(user_id, project_id, funnel_id, issue_id, data, range_value=None, start_date=None, end_date=None):
if len(data.get("events", [])) == 0:
f = get(funnel_id=funnel_id, project_id=project_id)
if f is None:
return {"errors": ["funnel not found"]}
get_start_end_time(filter_d=f["filter"], range_value=range_value, start_date=data.get('startDate', start_date),
end_date=data.get('endDate', end_date))
data = f["filter"]
# insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data, project_id=project_id)
issues = get_issues_on_the_fly(funnel_id=funnel_id, project_id=project_id, data=data).get("issues", {})
issues = issues.get("significant", []) + issues.get("insignificant", [])
issue = None
for i in issues:
if i.get("issueId", "") == issue_id:
issue = i
break
return {"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id, issue=issue,
data=data) if issue is not None else {"total": 0, "sessions": []},
# "stages": helper.list_to_camel_case(insights),
# "totalDropDueToIssues": total_drop_due_to_issues,
"issue": issue}

View file

@ -1,5 +1,6 @@
import json
import schemas
from chalicelib.core import users
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
@ -41,7 +42,7 @@ def __create(tenant_id, name):
def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, stack_integrations=False, version=False,
last_tracker_version=None):
last_tracker_version=None, user_id=None):
with pg_client.PostgresClient() as cur:
tracker_query = ""
if last_tracker_version is not None and len(last_tracker_version) > 0:
@ -53,6 +54,15 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
elif version:
tracker_query = ",(SELECT tracker_version FROM public.sessions WHERE sessions.project_id = s.project_id ORDER BY start_ts DESC LIMIT 1) AS tracker_version"
role_query = """INNER JOIN LATERAL (SELECT 1
FROM users
INNER JOIN roles USING (role_id)
LEFT JOIN roles_projects USING (role_id)
WHERE users.user_id = %(user_id)s
AND users.deleted_at ISNULL
AND users.tenant_id = %(tenant_id)s
AND (roles.all_projects OR roles_projects.project_id = s.project_id)
) AS role_project ON (TRUE)"""
cur.execute(
cur.mogrify(f"""\
SELECT
@ -63,10 +73,11 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
{tracker_query}
FROM public.projects AS s
{'LEFT JOIN LATERAL (SELECT COUNT(*) AS count FROM public.integrations WHERE s.project_id = integrations.project_id LIMIT 1) AS stack_integrations ON TRUE' if stack_integrations else ''}
{role_query if user_id is not None else ""}
WHERE s.tenant_id =%(tenant_id)s
AND s.deleted_at IS NULL
ORDER BY s.project_id;""",
{"tenant_id": tenant_id})
{"tenant_id": tenant_id, "user_id": user_id})
)
rows = cur.fetchall()
if recording_state:
@ -104,8 +115,8 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=
query = cur.mogrify(f"""\
SELECT
s.project_id,
s.name,
s.project_key
s.project_key,
s.name
{",(SELECT max(ss.start_ts) FROM public.sessions AS ss WHERE ss.project_id = %(project_id)s) AS last_recorded_session_at" if include_last_session else ""}
{',s.gdpr' if include_gdpr else ''}
{tracker_query}
@ -123,26 +134,52 @@ def get_project(tenant_id, project_id, include_last_session=False, include_gdpr=
return helper.dict_to_camel_case(row)
def is_authorized(project_id, tenant_id):
def is_authorized(project_id, tenant_id, user_id=None):
if project_id is None or not str(project_id).isdigit():
return False
return get_project(tenant_id=tenant_id, project_id=project_id) is not None
with pg_client.PostgresClient() as cur:
role_query = """INNER JOIN LATERAL (SELECT 1
FROM users
INNER JOIN roles USING (role_id)
LEFT JOIN roles_projects USING (role_id)
WHERE users.user_id = %(user_id)s
AND users.deleted_at ISNULL
AND users.tenant_id = %(tenant_id)s
AND (roles.all_projects OR roles_projects.project_id = %(project_id)s)
) AS role_project ON (TRUE)"""
query = cur.mogrify(f"""\
SELECT project_id
FROM public.projects AS s
{role_query if user_id is not None else ""}
where s.tenant_id =%(tenant_id)s
AND s.project_id =%(project_id)s
AND s.deleted_at IS NULL
LIMIT 1;""",
{"tenant_id": tenant_id, "project_id": project_id, "user_id": user_id})
cur.execute(
query=query
)
row = cur.fetchone()
return row is not None
def create(tenant_id, user_id, data, skip_authorization=False):
def create(tenant_id, user_id, data: schemas.CreateProjectSchema, skip_authorization=False):
if not skip_authorization:
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
return {"data": __create(tenant_id=tenant_id, name=data.get("name", "my first project"))}
if admin["roleId"] is not None and not admin["allProjects"]:
return {"errors": ["unauthorized: you need allProjects permission to create a new project"]}
return {"data": __create(tenant_id=tenant_id, name=data.name)}
def edit(tenant_id, user_id, project_id, data):
def edit(tenant_id, user_id, project_id, data: schemas.CreateProjectSchema):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
return {"data": __update(tenant_id=tenant_id, project_id=project_id,
changes={"name": data.get("name", "my first project")})}
changes={"name": data.name})}
def delete(tenant_id, user_id, project_id):
@ -152,8 +189,7 @@ def delete(tenant_id, user_id, project_id):
return {"errors": ["unauthorized"]}
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
UPDATE public.projects
cur.mogrify("""UPDATE public.projects
SET
deleted_at = timezone('utc'::text, now()),
active = FALSE
@ -274,3 +310,22 @@ def get_project_by_key(tenant_id, project_key, include_last_session=False, inclu
)
row = cur.fetchone()
return helper.dict_to_camel_case(row)
def is_authorized_batch(project_ids, tenant_id):
if project_ids is None or not len(project_ids):
return False
with pg_client.PostgresClient() as cur:
query = cur.mogrify("""\
SELECT project_id
FROM public.projects
where tenant_id =%(tenant_id)s
AND project_id IN %(project_ids)s
AND deleted_at IS NULL;""",
{"tenant_id": tenant_id, "project_ids": tuple(project_ids)})
cur.execute(
query=query
)
rows = cur.fetchall()
return [r["project_id"] for r in rows]

View file

@ -1,64 +1,111 @@
from chalicelib.core import users
import schemas_ee
from chalicelib.core import users, projects
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
def update(tenant_id, user_id, role_id, changes):
def update(tenant_id, user_id, role_id, data: schemas_ee.RolePayloadSchema):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
if len(changes.keys()) == 0:
return None
ALLOW_EDIT = ["name", "description", "permissions"]
sub_query = []
for key in changes.keys():
if key in ALLOW_EDIT:
sub_query.append(f"{helper.key_to_snake_case(key)} = %({key})s")
if not data.all_projects and (data.projects is None or len(data.projects) == 0):
return {"errors": ["must specify a project or all projects"]}
if data.projects is not None and len(data.projects) > 0 and not data.all_projects:
data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id)
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
cur.mogrify("""SELECT 1
FROM public.roles
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND protected = TRUE
LIMIT 1;""",
{"tenant_id": tenant_id, "role_id": role_id})
)
if cur.fetchone() is not None:
return {"errors": ["this role is protected"]}
cur.execute(
cur.mogrify("""\
UPDATE public.roles
SET {" ,".join(sub_query)}
SET name= %(name)s,
description= %(description)s,
permissions= %(permissions)s,
all_projects= %(all_projects)s
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND deleted_at ISNULL
AND protected = FALSE
RETURNING *;""",
{"tenant_id": tenant_id, "role_id": role_id, **changes})
RETURNING *, COALESCE((SELECT ARRAY_AGG(project_id)
FROM roles_projects WHERE roles_projects.role_id=%(role_id)s),'{}') AS projects;""",
{"tenant_id": tenant_id, "role_id": role_id, **data.dict()})
)
row = cur.fetchone()
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
if not data.all_projects:
d_projects = [i for i in row["projects"] if i not in data.projects]
if len(d_projects) > 0:
cur.execute(
cur.mogrify(
"DELETE FROM roles_projects WHERE role_id=%(role_id)s AND project_id IN %(project_ids)s",
{"role_id": role_id, "project_ids": tuple(d_projects)})
)
n_projects = [i for i in data.projects if i not in row["projects"]]
if len(n_projects) > 0:
cur.execute(
cur.mogrify(
f"""INSERT INTO roles_projects(role_id, project_id)
VALUES {",".join([f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(n_projects))])}""",
{"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(n_projects)}})
)
row["projects"] = data.projects
return helper.dict_to_camel_case(row)
def create(tenant_id, user_id, name, description, permissions):
def create(tenant_id, user_id, data: schemas_ee.RolePayloadSchema):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"errors": ["unauthorized"]}
if not data.all_projects and (data.projects is None or len(data.projects) == 0):
return {"errors": ["must specify a project or all projects"]}
if data.projects is not None and len(data.projects) > 0 and not data.all_projects:
data.projects = projects.is_authorized_batch(project_ids=data.projects, tenant_id=tenant_id)
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions)
VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[])
cur.mogrify("""INSERT INTO roles(tenant_id, name, description, permissions, all_projects)
VALUES (%(tenant_id)s, %(name)s, %(description)s, %(permissions)s::text[], %(all_projects)s)
RETURNING *;""",
{"tenant_id": tenant_id, "name": name, "description": description, "permissions": permissions})
{"tenant_id": tenant_id, "name": data.name, "description": data.description,
"permissions": data.permissions, "all_projects": data.all_projects})
)
row = cur.fetchone()
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
if not data.all_projects:
role_id = row["role_id"]
cur.execute(
cur.mogrify(f"""INSERT INTO roles_projects(role_id, project_id)
VALUES {",".join(f"(%(role_id)s,%(project_id_{i})s)" for i in range(len(data.projects)))};""",
{"role_id": role_id, **{f"project_id_{i}": p for i, p in enumerate(data.projects)}})
)
return helper.dict_to_camel_case(row)
def get_roles(tenant_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT *
FROM public.roles
where tenant_id =%(tenant_id)s
AND deleted_at IS NULL
ORDER BY role_id;""",
cur.mogrify("""SELECT roles.*, COALESCE(projects, '{}') AS projects
FROM public.roles
LEFT JOIN LATERAL (SELECT array_agg(project_id) AS projects
FROM roles_projects
INNER JOIN projects USING (project_id)
WHERE roles_projects.role_id = roles.role_id
AND projects.deleted_at ISNULL ) AS role_projects ON (TRUE)
WHERE tenant_id =%(tenant_id)s
AND deleted_at IS NULL
ORDER BY role_id;""",
{"tenant_id": tenant_id})
)
rows = cur.fetchall()
@ -71,11 +118,10 @@ def get_role_by_name(tenant_id, name):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT *
FROM public.roles
where tenant_id =%(tenant_id)s
AND deleted_at IS NULL
AND name ILIKE %(name)s
;""",
FROM public.roles
where tenant_id =%(tenant_id)s
AND deleted_at IS NULL
AND name ILIKE %(name)s;""",
{"tenant_id": tenant_id, "name": name})
)
row = cur.fetchone()
@ -92,11 +138,11 @@ def delete(tenant_id, user_id, role_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""SELECT 1
FROM public.roles
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND protected = TRUE
LIMIT 1;""",
FROM public.roles
WHERE role_id = %(role_id)s
AND tenant_id = %(tenant_id)s
AND protected = TRUE
LIMIT 1;""",
{"tenant_id": tenant_id, "role_id": role_id})
)
if cur.fetchone() is not None:

View file

@ -274,6 +274,7 @@ def get(user_id, tenant_id):
role_id,
roles.name AS role_name,
roles.permissions,
roles.all_projects,
basic_authentication.password IS NOT NULL AS has_password
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
LEFT JOIN public.roles USING (role_id)
@ -482,7 +483,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
c = tenants.get_by_tenant_id(tenant_id)
c.pop("createdAt")
c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True,
stack_integrations=True)
stack_integrations=True, user_id=user_id)
c["smtp"] = helper.has_smtp()
c["iceServers"] = assist.get_ice_servers()
return {
@ -510,7 +511,7 @@ def set_password_invitation(tenant_id, user_id, new_password):
c = tenants.get_by_tenant_id(tenant_id)
c.pop("createdAt")
c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True,
stack_integrations=True)
stack_integrations=True, user_id=user_id)
c["smtp"] = helper.has_smtp()
c["iceServers"] = assist.get_ice_servers()
return {
@ -735,3 +736,57 @@ def create_sso_user(tenant_id, email, admin, name, origin, role_id, internal_id=
query
)
return helper.dict_to_camel_case(cur.fetchone())
def restore_sso_user(user_id, tenant_id, email, admin, name, origin, role_id, internal_id=None):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""\
WITH u AS (
UPDATE public.users
SET tenant_id= %(tenantId)s,
role= %(role)s,
name= %(name)s,
data= %(data)s,
origin= %(origin)s,
internal_id= %(internal_id)s,
role_id= %(role_id)s,
deleted_at= NULL,
created_at= default,
api_key= default,
jwt_iat= NULL,
appearance= default,
weekly_report= default
WHERE user_id = %(user_id)s
RETURNING *
),
au AS (
UPDATE public.basic_authentication
SET password= default,
generated_password= default,
invitation_token= default,
invited_at= default,
change_pwd_token= default,
change_pwd_expire_at= default,
changed_at= NULL
WHERE user_id = %(user_id)s
RETURNING user_id
)
SELECT u.user_id AS id,
u.email,
u.role,
u.name,
TRUE AS change_password,
(CASE WHEN u.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member,
u.appearance,
origin
FROM u;""",
{"tenantId": tenant_id, "email": email, "internal_id": internal_id,
"role": "admin" if admin else "member", "name": name, "origin": origin,
"role_id": role_id, "data": json.dumps({"lastAnnouncementView": TimeUTC.now()}),
"user_id": user_id})
cur.execute(
query
)
return helper.dict_to_camel_case(cur.fetchone())

View file

@ -12,11 +12,11 @@ SAML2 = {
"sp": {
"entityId": config("SITE_URL") + "/api/sso/saml2/metadata/",
"assertionConsumerService": {
"url": config("SITE_URL") + "/api/sso/saml2/acs",
"url": config("SITE_URL") + "/api/sso/saml2/acs/",
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-POST"
},
"singleLogoutService": {
"url": config("SITE_URL") + "/api/sso/saml2/sls",
"url": config("SITE_URL") + "/api/sso/saml2/sls/",
"binding": "urn:oasis:names:tc:SAML:2.0:bindings:HTTP-Redirect"
},
"NameIDFormat": "urn:oasis:names:tc:SAML:1.1:nameid-format:emailAddress",
@ -25,6 +25,12 @@ SAML2 = {
},
"idp": None
}
# in case tenantKey is included in the URL
sp_acs = config("idp_tenantKey", default="")
if sp_acs is not None and len(sp_acs) > 0:
SAML2["sp"]["assertionConsumerService"]["url"] += sp_acs + "/"
idp = None
# SAML2 config handler
if config("SAML2_MD_URL", default=None) is not None and len(config("SAML2_MD_URL")) > 0:
@ -60,12 +66,9 @@ else:
def init_saml_auth(req):
# auth = OneLogin_Saml2_Auth(req, custom_base_path=environ['SAML_PATH'])
if idp is None:
raise Exception("No SAML2 config provided")
auth = OneLogin_Saml2_Auth(req, old_settings=SAML2)
return auth
return OneLogin_Saml2_Auth(req, old_settings=SAML2)
async def prepare_request(request: Request):
@ -86,12 +89,20 @@ async def prepare_request(request: Request):
session = {}
# If server is behind proxys or balancers use the HTTP_X_FORWARDED fields
headers = request.headers
url_data = urlparse('%s://%s' % (headers.get('x-forwarded-proto', 'http'), headers['host']))
proto = headers.get('x-forwarded-proto', 'http')
if headers.get('x-forwarded-proto') is not None:
print(f"x-forwarded-proto: {proto}")
url_data = urlparse('%s://%s' % (proto, headers['host']))
path = request.url.path
# add / to /acs
if not path.endswith("/"):
path = path + '/'
return {
'https': 'on' if request.headers.get('x-forwarded-proto', 'http') == 'https' else 'off',
'https': 'on' if proto == 'https' else 'off',
'http_host': request.headers['host'],
'server_port': url_data.port,
'script_name': "/api" + request.url.path,
'script_name': "/api" + path,
'get_data': request.args.copy(),
# Uncomment if using ADFS as IdP, https://github.com/onelogin/python-saml/pull/144
# 'lowercase_urlencoding': True,

2
ee/api/entrypoint.sh Executable file
View file

@ -0,0 +1,2 @@
#!/bin/bash
uvicorn app:app --host 0.0.0.0 --reload

View file

@ -8,7 +8,7 @@ import schemas
import schemas_ee
from chalicelib.core import integrations_manager
from chalicelib.core import sessions
from chalicelib.core import tenants, users, metadata, projects, license, alerts, assist
from chalicelib.core import tenants, users, metadata, projects, license, assist
from chalicelib.core import webhook
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import captcha, SAML2_helper
@ -52,7 +52,7 @@ def login(data: schemas.UserLoginSchema = Body(...)):
c = tenants.get_by_tenant_id(tenant_id)
c.pop("createdAt")
c["projects"] = projects.get_projects(tenant_id=tenant_id, recording_state=True, recorded=True,
stack_integrations=True, version=True)
stack_integrations=True, version=True, user_id=r["id"])
c["smtp"] = helper.has_smtp()
c["iceServers"] = assist.get_ice_servers()
r["smtp"] = c["smtp"]
@ -195,7 +195,8 @@ def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] =
if key is None or value is None or len(value) == 0 and len(key) == 0:
return {"errors": ["please provide a key&value for search"]}
if projectId is not None and not projects.is_authorized(project_id=projectId, tenant_id=context.tenant_id):
if projectId is not None and not projects.is_authorized(project_id=projectId, tenant_id=context.tenant_id,
user_id=context.user_id):
return {"errors": ["unauthorized project"]}
if len(value) == 0:
return {"errors": ["please provide a value for search"]}
@ -213,13 +214,25 @@ def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)):
}
@public_app.post('/alerts/notifications', tags=["alerts"])
@public_app.put('/alerts/notifications', tags=["alerts"])
def send_alerts_notifications(background_tasks: BackgroundTasks, data: schemas.AlertNotificationSchema = Body(...)):
# TODO: validate token
return {"data": alerts.process_notifications(data.notifications, background_tasks=background_tasks)}
@public_app.get('/general_stats', tags=["private"], include_in_schema=False)
def get_general_stats():
return {"data": {"sessions:": sessions.count_all()}}
@app.get('/client', tags=['projects'])
def get_client(context: schemas.CurrentContext = Depends(OR_context)):
r = tenants.get_by_tenant_id(context.tenant_id)
if r is not None:
r.pop("createdAt")
r["projects"] = projects.get_projects(tenant_id=context.tenant_id, recording_state=True, recorded=True,
stack_integrations=True, version=True, user_id=context.user_id)
return {
'data': r
}
@app.get('/projects', tags=['projects'])
def get_projects(last_tracker_version: Optional[str] = None, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,
stack_integrations=True, version=True,
last_tracker_version=last_tracker_version, user_id=context.user_id)}

View file

@ -23,7 +23,7 @@ def get_roles(context: schemas.CurrentContext = Depends(OR_context)):
@app.post('/client/roles', tags=["client", "roles"])
@app.put('/client/roles', tags=["client", "roles"])
def add_role(data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.CurrentContext = Depends(OR_context)):
data = roles.create(tenant_id=context.tenant_id, user_id=context.user_id, **data.dict())
data = roles.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
if "errors" in data:
return data
@ -36,7 +36,7 @@ def add_role(data: schemas_ee.RolePayloadSchema = Body(...), context: schemas.Cu
@app.put('/client/roles/{roleId}', tags=["client", "roles"])
def edit_role(roleId: int, data: schemas_ee.RolePayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = roles.update(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId, changes=data.dict())
data = roles.update(tenant_id=context.tenant_id, user_id=context.user_id, role_id=roleId, data=data)
if "errors" in data:
return data

View file

@ -16,6 +16,7 @@ from starlette import status
@public_app.get("/sso/saml2", tags=["saml2"])
@public_app.get("/sso/saml2/", tags=["saml2"])
async def start_sso(request: Request):
request.path = ''
req = await prepare_request(request=request)
@ -24,8 +25,8 @@ async def start_sso(request: Request):
return RedirectResponse(url=sso_built_url)
# @public_app.post('/sso/saml2/acs', tags=["saml2"], content_types=['application/x-www-form-urlencoded'])
@public_app.post('/sso/saml2/acs', tags=["saml2"])
@public_app.post('/sso/saml2/acs/', tags=["saml2"])
async def process_sso_assertion(request: Request):
req = await prepare_request(request=request)
session = req["cookie"]["session"]
@ -44,6 +45,8 @@ async def process_sso_assertion(request: Request):
user_data = auth.get_attributes()
elif auth.get_settings().is_debug_active():
error_reason = auth.get_last_error_reason()
print("SAML2 error:")
print(error_reason)
return {"errors": [error_reason]}
email = auth.get_nameid()
@ -77,11 +80,102 @@ async def process_sso_assertion(request: Request):
or admin_privileges[0].lower() == "false")
if existing is None:
print("== new user ==")
users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges,
origin=SAML2_helper.get_saml2_provider(),
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id, role_id=role["roleId"])
deleted = users.get_deleted_user_by_email(auth.get_nameid())
if deleted is not None:
print("== restore deleted user ==")
users.restore_sso_user(user_id=deleted["userId"], tenant_id=t['tenantId'], email=email,
admin=admin_privileges, origin=SAML2_helper.get_saml2_provider(),
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id, role_id=role["roleId"])
else:
print("== new user ==")
users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges,
origin=SAML2_helper.get_saml2_provider(),
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id, role_id=role["roleId"])
else:
if t['tenantId'] != existing["tenantId"]:
print("user exists for a different tenant")
return {"errors": ["user exists for a different tenant"]}
if existing.get("origin") is None:
print(f"== migrating user to {SAML2_helper.get_saml2_provider()} ==")
users.update(tenant_id=t['tenantId'], user_id=existing["id"],
changes={"origin": SAML2_helper.get_saml2_provider(), "internal_id": internal_id})
expiration = auth.get_session_expiration()
expiration = expiration if expiration is not None and expiration > 10 * 60 \
else int(config("sso_exp_delta_seconds", cast=int, default=24 * 60 * 60))
jwt = users.authenticate_sso(email=email, internal_id=internal_id, exp=expiration)
if jwt is None:
return {"errors": ["null JWT"]}
return Response(
status_code=status.HTTP_302_FOUND,
headers={'Location': SAML2_helper.get_landing_URL(jwt)})
@public_app.post('/sso/saml2/acs/{tenantKey}', tags=["saml2"])
@public_app.post('/sso/saml2/acs/{tenantKey}/', tags=["saml2"])
async def process_sso_assertion_tk(tenantKey: str, request: Request):
req = await prepare_request(request=request)
session = req["cookie"]["session"]
auth = init_saml_auth(req)
request_id = None
if 'AuthNRequestID' in session:
request_id = session['AuthNRequestID']
auth.process_response(request_id=request_id)
errors = auth.get_errors()
user_data = {}
if len(errors) == 0:
if 'AuthNRequestID' in session:
del session['AuthNRequestID']
user_data = auth.get_attributes()
elif auth.get_settings().is_debug_active():
error_reason = auth.get_last_error_reason()
print("SAML2 error:")
print(error_reason)
return {"errors": [error_reason]}
email = auth.get_nameid()
print("received nameId:")
print(email)
existing = users.get_by_email_only(auth.get_nameid())
internal_id = next(iter(user_data.get("internalId", [])), None)
t = tenants.get_by_tenant_key(tenantKey)
if t is None:
print("invalid tenantKey, please copy the correct value from Preferences > Account")
return {"errors": ["invalid tenantKey, please copy the correct value from Preferences > Account"]}
print(user_data)
role_name = user_data.get("role", [])
if len(role_name) == 0:
print("No role specified, setting role to member")
role_name = ["member"]
role_name = role_name[0]
role = roles.get_role_by_name(tenant_id=t['tenantId'], name=role_name)
if role is None:
return {"errors": [f"role {role_name} not found, please create it in openreplay first"]}
admin_privileges = user_data.get("adminPrivileges", [])
admin_privileges = not (len(admin_privileges) == 0
or admin_privileges[0] is None
or admin_privileges[0].lower() == "false")
if existing is None:
deleted = users.get_deleted_user_by_email(auth.get_nameid())
if deleted is not None:
print("== restore deleted user ==")
users.restore_sso_user(user_id=deleted["userId"], tenant_id=t['tenantId'], email=email,
admin=admin_privileges, origin=SAML2_helper.get_saml2_provider(),
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id, role_id=role["roleId"])
else:
print("== new user ==")
users.create_sso_user(tenant_id=t['tenantId'], email=email, admin=admin_privileges,
origin=SAML2_helper.get_saml2_provider(),
name=" ".join(user_data.get("firstName", []) + user_data.get("lastName", [])),
internal_id=internal_id, role_id=role["roleId"])
else:
if t['tenantId'] != existing["tenantId"]:
print("user exists for a different tenant")
@ -102,6 +196,7 @@ async def process_sso_assertion(request: Request):
@public_app.get('/sso/saml2/sls', tags=["saml2"])
@public_app.get('/sso/saml2/sls/', tags=["saml2"])
async def process_sls_assertion(request: Request):
req = await prepare_request(request=request)
session = req["cookie"]["session"]
@ -137,6 +232,7 @@ async def process_sls_assertion(request: Request):
@public_app.get('/sso/saml2/metadata', tags=["saml2"])
@public_app.get('/sso/saml2/metadata/', tags=["saml2"])
async def saml2_metadata(request: Request):
req = await prepare_request(request=request)
auth = init_saml_auth(req)

View file

@ -9,6 +9,11 @@ class RolePayloadSchema(BaseModel):
name: str = Field(...)
description: Optional[str] = Field(None)
permissions: List[str] = Field(...)
all_projects: bool = Field(True)
projects: List[int] = Field([])
class Config:
alias_generator = schemas.attribute_to_camel_case
class CreateMemberSchema(schemas.CreateMemberSchema):

View file

@ -0,0 +1,4 @@
ALTER TABLE sessions
ADD COLUMN IF NOT EXISTS utm_source Nullable(String),
ADD COLUMN IF NOT EXISTS utm_medium Nullable(String),
ADD COLUMN IF NOT EXISTS utm_campaign Nullable(String);

View file

@ -1,21 +1,21 @@
CREATE TABLE clicks
CREATE TABLE IF NOT EXISTS clicks
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
label String,
hesitation_time Nullable(UInt32)
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
label String,
hesitation_time Nullable(UInt32)
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,4 +1,4 @@
CREATE TABLE customs
CREATE TABLE IF NOT EXISTS customs
(
session_id UInt64,
project_id UInt32,

View file

@ -1,22 +1,23 @@
CREATE TABLE errors (
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
source Enum8('js_exception'=0, 'bugsnag'=1, 'cloudwatch'=2, 'datadog'=3, 'elasticsearch'=4, 'newrelic'=5, 'rollbar'=6, 'sentry'=7, 'stackdriver'=8, 'sumologic'=9),
name Nullable(String),
message String,
error_id String
CREATE TABLE IF NOT EXISTS errors
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
source Enum8('js_exception'=0, 'bugsnag'=1, 'cloudwatch'=2, 'datadog'=3, 'elasticsearch'=4, 'newrelic'=5, 'rollbar'=6, 'sentry'=7, 'stackdriver'=8, 'sumologic'=9),
name Nullable(String),
message String,
error_id String
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,20 +1,20 @@
CREATE TABLE inputs
CREATE TABLE IF NOT EXISTS inputs
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
label String
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
label String
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,26 +1,26 @@
CREATE TABLE longtasks
CREATE TABLE IF NOT EXISTS longtasks
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
duration UInt16,
context Enum8('unknown'=0, 'self'=1, 'same-origin-ancestor'=2, 'same-origin-descendant'=3, 'same-origin'=4, 'cross-origin-ancestor'=5, 'cross-origin-descendant'=6, 'cross-origin-unreachable'=7, 'multiple-contexts'=8),
container_type Enum8('window'=0, 'iframe'=1, 'embed'=2, 'object'=3),
container_id String,
container_name String,
container_src String
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
duration UInt16,
context Enum8('unknown'=0, 'self'=1, 'same-origin-ancestor'=2, 'same-origin-descendant'=3, 'same-origin'=4, 'cross-origin-ancestor'=5, 'cross-origin-descendant'=6, 'cross-origin-unreachable'=7, 'multiple-contexts'=8),
container_type Enum8('window'=0, 'iframe'=1, 'embed'=2, 'object'=3),
container_id String,
container_name String,
container_src String
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,213 +1,215 @@
CREATE TABLE negatives_buffer (
sessionid UInt64,
clickevent_hesitationtime Nullable(UInt64),
clickevent_label Nullable(String),
clickevent_messageid Nullable(UInt64),
clickevent_timestamp Nullable(Datetime),
connectioninformation_downlink Nullable(UInt64),
connectioninformation_type Nullable(String),
consolelog_level Nullable(String),
consolelog_value Nullable(String),
cpuissue_duration Nullable(UInt64),
cpuissue_rate Nullable(UInt64),
cpuissue_timestamp Nullable(Datetime),
createdocument Nullable(UInt8),
createelementnode_id Nullable(UInt64),
createelementnode_parentid Nullable(UInt64),
cssdeleterule_index Nullable(UInt64),
cssdeleterule_stylesheetid Nullable(UInt64),
cssinsertrule_index Nullable(UInt64),
cssinsertrule_rule Nullable(String),
cssinsertrule_stylesheetid Nullable(UInt64),
customevent_messageid Nullable(UInt64),
customevent_name Nullable(String),
customevent_payload Nullable(String),
customevent_timestamp Nullable(Datetime),
domdrop_timestamp Nullable(Datetime),
errorevent_message Nullable(String),
errorevent_messageid Nullable(UInt64),
errorevent_name Nullable(String),
errorevent_payload Nullable(String),
errorevent_source Nullable(String),
errorevent_timestamp Nullable(Datetime),
fetch_duration Nullable(UInt64),
fetch_method Nullable(String),
fetch_request Nullable(String),
fetch_status Nullable(UInt64),
fetch_timestamp Nullable(Datetime),
fetch_url Nullable(String),
graphql_operationkind Nullable(String),
graphql_operationname Nullable(String),
graphql_response Nullable(String),
graphql_variables Nullable(String),
graphqlevent_messageid Nullable(UInt64),
graphqlevent_name Nullable(String),
graphqlevent_timestamp Nullable(Datetime),
inputevent_label Nullable(String),
inputevent_messageid Nullable(UInt64),
inputevent_timestamp Nullable(Datetime),
inputevent_value Nullable(String),
inputevent_valuemasked Nullable(UInt8),
jsexception_message Nullable(String),
jsexception_name Nullable(String),
jsexception_payload Nullable(String),
longtasks_timestamp Nullable(Datetime),
longtasks_duration Nullable(UInt64),
longtasks_containerid Nullable(String),
longtasks_containersrc Nullable(String),
memoryissue_duration Nullable(UInt64),
memoryissue_rate Nullable(UInt64),
memoryissue_timestamp Nullable(Datetime),
metadata_key Nullable(String),
metadata_value Nullable(String),
mobx_payload Nullable(String),
mobx_type Nullable(String),
mouseclick_id Nullable(UInt64),
mouseclick_hesitationtime Nullable(UInt64),
mouseclick_label Nullable(String),
mousemove_x Nullable(UInt64),
mousemove_y Nullable(UInt64),
movenode_id Nullable(UInt64),
movenode_index Nullable(UInt64),
movenode_parentid Nullable(UInt64),
ngrx_action Nullable(String),
ngrx_duration Nullable(UInt64),
ngrx_state Nullable(String),
pageevent_domcontentloadedeventend Nullable(UInt64),
pageevent_domcontentloadedeventstart Nullable(UInt64),
pageevent_firstcontentfulpaint Nullable(UInt64),
pageevent_firstpaint Nullable(UInt64),
pageevent_loaded Nullable(UInt8),
pageevent_loadeventend Nullable(UInt64),
pageevent_loadeventstart Nullable(UInt64),
pageevent_messageid Nullable(UInt64),
pageevent_referrer Nullable(String),
pageevent_requeststart Nullable(UInt64),
pageevent_responseend Nullable(UInt64),
pageevent_responsestart Nullable(UInt64),
pageevent_speedindex Nullable(UInt64),
pageevent_timestamp Nullable(Datetime),
pageevent_url Nullable(String),
pageloadtiming_domcontentloadedeventend Nullable(UInt64),
pageloadtiming_domcontentloadedeventstart Nullable(UInt64),
pageloadtiming_firstcontentfulpaint Nullable(UInt64),
pageloadtiming_firstpaint Nullable(UInt64),
pageloadtiming_loadeventend Nullable(UInt64),
pageloadtiming_loadeventstart Nullable(UInt64),
pageloadtiming_requeststart Nullable(UInt64),
pageloadtiming_responseend Nullable(UInt64),
pageloadtiming_responsestart Nullable(UInt64),
pagerendertiming_speedindex Nullable(UInt64),
pagerendertiming_timetointeractive Nullable(UInt64),
pagerendertiming_visuallycomplete Nullable(UInt64),
performancetrack_frames Nullable(Int64),
performancetrack_ticks Nullable(Int64),
performancetrack_totaljsheapsize Nullable(UInt64),
performancetrack_usedjsheapsize Nullable(UInt64),
performancetrackaggr_avgcpu Nullable(UInt64),
performancetrackaggr_avgfps Nullable(UInt64),
performancetrackaggr_avgtotaljsheapsize Nullable(UInt64),
performancetrackaggr_avgusedjsheapsize Nullable(UInt64),
performancetrackaggr_maxcpu Nullable(UInt64),
performancetrackaggr_maxfps Nullable(UInt64),
performancetrackaggr_maxtotaljsheapsize Nullable(UInt64),
performancetrackaggr_maxusedjsheapsize Nullable(UInt64),
performancetrackaggr_mincpu Nullable(UInt64),
performancetrackaggr_minfps Nullable(UInt64),
performancetrackaggr_mintotaljsheapsize Nullable(UInt64),
performancetrackaggr_minusedjsheapsize Nullable(UInt64),
performancetrackaggr_timestampend Nullable(Datetime),
performancetrackaggr_timestampstart Nullable(Datetime),
profiler_args Nullable(String),
profiler_duration Nullable(UInt64),
profiler_name Nullable(String),
profiler_result Nullable(String),
rawcustomevent_name Nullable(String),
rawcustomevent_payload Nullable(String),
rawerrorevent_message Nullable(String),
rawerrorevent_name Nullable(String),
rawerrorevent_payload Nullable(String),
rawerrorevent_source Nullable(String),
rawerrorevent_timestamp Nullable(Datetime),
redux_action Nullable(String),
redux_duration Nullable(UInt64),
redux_state Nullable(String),
removenode_id Nullable(UInt64),
removenodeattribute_id Nullable(UInt64),
removenodeattribute_name Nullable(String),
resourceevent_decodedbodysize Nullable(UInt64),
resourceevent_duration Nullable(UInt64),
resourceevent_encodedbodysize Nullable(UInt64),
resourceevent_headersize Nullable(UInt64),
resourceevent_messageid Nullable(UInt64),
resourceevent_method Nullable(String),
resourceevent_status Nullable(UInt64),
resourceevent_success Nullable(UInt8),
resourceevent_timestamp Nullable(Datetime),
resourceevent_ttfb Nullable(UInt64),
resourceevent_type Nullable(String),
resourceevent_url Nullable(String),
resourcetiming_decodedbodysize Nullable(UInt64),
resourcetiming_duration Nullable(UInt64),
resourcetiming_encodedbodysize Nullable(UInt64),
resourcetiming_headersize Nullable(UInt64),
resourcetiming_initiator Nullable(String),
resourcetiming_timestamp Nullable(Datetime),
resourcetiming_ttfb Nullable(UInt64),
resourcetiming_url Nullable(String),
sessiondisconnect Nullable(UInt8),
sessiondisconnect_timestamp Nullable(Datetime),
sessionend Nullable(UInt8),
sessionend_timestamp Nullable(Datetime),
sessionstart_projectid Nullable(UInt64),
sessionstart_revid Nullable(String),
sessionstart_timestamp Nullable(Datetime),
sessionstart_trackerversion Nullable(String),
sessionstart_useragent Nullable(String),
sessionstart_userbrowser Nullable(String),
sessionstart_userbrowserversion Nullable(String),
sessionstart_usercountry Nullable(String),
sessionstart_userdevice Nullable(String),
sessionstart_userdeviceheapsize Nullable(UInt64),
sessionstart_userdevicememorysize Nullable(UInt64),
sessionstart_userdevicetype Nullable(String),
sessionstart_useros Nullable(String),
sessionstart_userosversion Nullable(String),
sessionstart_useruuid Nullable(String),
setcssdata_data Nullable(UInt64),
setcssdata_id Nullable(UInt64),
setinputchecked_checked Nullable(UInt64),
setinputchecked_id Nullable(UInt64),
setinputtarget_id Nullable(UInt64),
setinputtarget_label Nullable(UInt64),
setinputvalue_id Nullable(UInt64),
setinputvalue_mask Nullable(UInt64),
setinputvalue_value Nullable(UInt64),
setnodeattribute_id Nullable(UInt64),
setnodeattribute_name Nullable(UInt64),
setnodeattribute_value Nullable(UInt64),
setnodedata_data Nullable(UInt64),
setnodedata_id Nullable(UInt64),
setnodescroll_id Nullable(UInt64),
setnodescroll_x Nullable(Int64),
setnodescroll_y Nullable(Int64),
setpagelocation_navigationstart Nullable(UInt64),
setpagelocation_referrer Nullable(String),
setpagelocation_url Nullable(String),
setpagevisibility_hidden Nullable(UInt8),
setviewportscroll_x Nullable(Int64),
setviewportscroll_y Nullable(Int64),
setviewportsize_height Nullable(UInt64),
setviewportsize_width Nullable(UInt64),
stateaction_type Nullable(String),
stateactionevent_messageid Nullable(UInt64),
stateactionevent_timestamp Nullable(Datetime),
stateactionevent_type Nullable(String),
timestamp_timestamp Nullable(Datetime),
useranonymousid_id Nullable(String),
userid_id Nullable(String),
vuex_mutation Nullable(String),
vuex_state Nullable(String),
received_at Datetime,
batch_order_number Int64)
ENGINE = Buffer(default, negatives, 16, 10, 120, 10000, 1000000, 10000, 100000000);
CREATE TABLE IF NOT EXISTS negatives_buffer
(
sessionid UInt64,
clickevent_hesitationtime Nullable(UInt64),
clickevent_label Nullable(String),
clickevent_messageid Nullable(UInt64),
clickevent_timestamp Nullable(Datetime),
connectioninformation_downlink Nullable(UInt64),
connectioninformation_type Nullable(String),
consolelog_level Nullable(String),
consolelog_value Nullable(String),
cpuissue_duration Nullable(UInt64),
cpuissue_rate Nullable(UInt64),
cpuissue_timestamp Nullable(Datetime),
createdocument Nullable(UInt8),
createelementnode_id Nullable(UInt64),
createelementnode_parentid Nullable(UInt64),
cssdeleterule_index Nullable(UInt64),
cssdeleterule_stylesheetid Nullable(UInt64),
cssinsertrule_index Nullable(UInt64),
cssinsertrule_rule Nullable(String),
cssinsertrule_stylesheetid Nullable(UInt64),
customevent_messageid Nullable(UInt64),
customevent_name Nullable(String),
customevent_payload Nullable(String),
customevent_timestamp Nullable(Datetime),
domdrop_timestamp Nullable(Datetime),
errorevent_message Nullable(String),
errorevent_messageid Nullable(UInt64),
errorevent_name Nullable(String),
errorevent_payload Nullable(String),
errorevent_source Nullable(String),
errorevent_timestamp Nullable(Datetime),
fetch_duration Nullable(UInt64),
fetch_method Nullable(String),
fetch_request Nullable(String),
fetch_status Nullable(UInt64),
fetch_timestamp Nullable(Datetime),
fetch_url Nullable(String),
graphql_operationkind Nullable(String),
graphql_operationname Nullable(String),
graphql_response Nullable(String),
graphql_variables Nullable(String),
graphqlevent_messageid Nullable(UInt64),
graphqlevent_name Nullable(String),
graphqlevent_timestamp Nullable(Datetime),
inputevent_label Nullable(String),
inputevent_messageid Nullable(UInt64),
inputevent_timestamp Nullable(Datetime),
inputevent_value Nullable(String),
inputevent_valuemasked Nullable(UInt8),
jsexception_message Nullable(String),
jsexception_name Nullable(String),
jsexception_payload Nullable(String),
longtasks_timestamp Nullable(Datetime),
longtasks_duration Nullable(UInt64),
longtasks_containerid Nullable(String),
longtasks_containersrc Nullable(String),
memoryissue_duration Nullable(UInt64),
memoryissue_rate Nullable(UInt64),
memoryissue_timestamp Nullable(Datetime),
metadata_key Nullable(String),
metadata_value Nullable(String),
mobx_payload Nullable(String),
mobx_type Nullable(String),
mouseclick_id Nullable(UInt64),
mouseclick_hesitationtime Nullable(UInt64),
mouseclick_label Nullable(String),
mousemove_x Nullable(UInt64),
mousemove_y Nullable(UInt64),
movenode_id Nullable(UInt64),
movenode_index Nullable(UInt64),
movenode_parentid Nullable(UInt64),
ngrx_action Nullable(String),
ngrx_duration Nullable(UInt64),
ngrx_state Nullable(String),
pageevent_domcontentloadedeventend Nullable(UInt64),
pageevent_domcontentloadedeventstart Nullable(UInt64),
pageevent_firstcontentfulpaint Nullable(UInt64),
pageevent_firstpaint Nullable(UInt64),
pageevent_loaded Nullable(UInt8),
pageevent_loadeventend Nullable(UInt64),
pageevent_loadeventstart Nullable(UInt64),
pageevent_messageid Nullable(UInt64),
pageevent_referrer Nullable(String),
pageevent_requeststart Nullable(UInt64),
pageevent_responseend Nullable(UInt64),
pageevent_responsestart Nullable(UInt64),
pageevent_speedindex Nullable(UInt64),
pageevent_timestamp Nullable(Datetime),
pageevent_url Nullable(String),
pageloadtiming_domcontentloadedeventend Nullable(UInt64),
pageloadtiming_domcontentloadedeventstart Nullable(UInt64),
pageloadtiming_firstcontentfulpaint Nullable(UInt64),
pageloadtiming_firstpaint Nullable(UInt64),
pageloadtiming_loadeventend Nullable(UInt64),
pageloadtiming_loadeventstart Nullable(UInt64),
pageloadtiming_requeststart Nullable(UInt64),
pageloadtiming_responseend Nullable(UInt64),
pageloadtiming_responsestart Nullable(UInt64),
pagerendertiming_speedindex Nullable(UInt64),
pagerendertiming_timetointeractive Nullable(UInt64),
pagerendertiming_visuallycomplete Nullable(UInt64),
performancetrack_frames Nullable(Int64),
performancetrack_ticks Nullable(Int64),
performancetrack_totaljsheapsize Nullable(UInt64),
performancetrack_usedjsheapsize Nullable(UInt64),
performancetrackaggr_avgcpu Nullable(UInt64),
performancetrackaggr_avgfps Nullable(UInt64),
performancetrackaggr_avgtotaljsheapsize Nullable(UInt64),
performancetrackaggr_avgusedjsheapsize Nullable(UInt64),
performancetrackaggr_maxcpu Nullable(UInt64),
performancetrackaggr_maxfps Nullable(UInt64),
performancetrackaggr_maxtotaljsheapsize Nullable(UInt64),
performancetrackaggr_maxusedjsheapsize Nullable(UInt64),
performancetrackaggr_mincpu Nullable(UInt64),
performancetrackaggr_minfps Nullable(UInt64),
performancetrackaggr_mintotaljsheapsize Nullable(UInt64),
performancetrackaggr_minusedjsheapsize Nullable(UInt64),
performancetrackaggr_timestampend Nullable(Datetime),
performancetrackaggr_timestampstart Nullable(Datetime),
profiler_args Nullable(String),
profiler_duration Nullable(UInt64),
profiler_name Nullable(String),
profiler_result Nullable(String),
rawcustomevent_name Nullable(String),
rawcustomevent_payload Nullable(String),
rawerrorevent_message Nullable(String),
rawerrorevent_name Nullable(String),
rawerrorevent_payload Nullable(String),
rawerrorevent_source Nullable(String),
rawerrorevent_timestamp Nullable(Datetime),
redux_action Nullable(String),
redux_duration Nullable(UInt64),
redux_state Nullable(String),
removenode_id Nullable(UInt64),
removenodeattribute_id Nullable(UInt64),
removenodeattribute_name Nullable(String),
resourceevent_decodedbodysize Nullable(UInt64),
resourceevent_duration Nullable(UInt64),
resourceevent_encodedbodysize Nullable(UInt64),
resourceevent_headersize Nullable(UInt64),
resourceevent_messageid Nullable(UInt64),
resourceevent_method Nullable(String),
resourceevent_status Nullable(UInt64),
resourceevent_success Nullable(UInt8),
resourceevent_timestamp Nullable(Datetime),
resourceevent_ttfb Nullable(UInt64),
resourceevent_type Nullable(String),
resourceevent_url Nullable(String),
resourcetiming_decodedbodysize Nullable(UInt64),
resourcetiming_duration Nullable(UInt64),
resourcetiming_encodedbodysize Nullable(UInt64),
resourcetiming_headersize Nullable(UInt64),
resourcetiming_initiator Nullable(String),
resourcetiming_timestamp Nullable(Datetime),
resourcetiming_ttfb Nullable(UInt64),
resourcetiming_url Nullable(String),
sessiondisconnect Nullable(UInt8),
sessiondisconnect_timestamp Nullable(Datetime),
sessionend Nullable(UInt8),
sessionend_timestamp Nullable(Datetime),
sessionstart_projectid Nullable(UInt64),
sessionstart_revid Nullable(String),
sessionstart_timestamp Nullable(Datetime),
sessionstart_trackerversion Nullable(String),
sessionstart_useragent Nullable(String),
sessionstart_userbrowser Nullable(String),
sessionstart_userbrowserversion Nullable(String),
sessionstart_usercountry Nullable(String),
sessionstart_userdevice Nullable(String),
sessionstart_userdeviceheapsize Nullable(UInt64),
sessionstart_userdevicememorysize Nullable(UInt64),
sessionstart_userdevicetype Nullable(String),
sessionstart_useros Nullable(String),
sessionstart_userosversion Nullable(String),
sessionstart_useruuid Nullable(String),
setcssdata_data Nullable(UInt64),
setcssdata_id Nullable(UInt64),
setinputchecked_checked Nullable(UInt64),
setinputchecked_id Nullable(UInt64),
setinputtarget_id Nullable(UInt64),
setinputtarget_label Nullable(UInt64),
setinputvalue_id Nullable(UInt64),
setinputvalue_mask Nullable(UInt64),
setinputvalue_value Nullable(UInt64),
setnodeattribute_id Nullable(UInt64),
setnodeattribute_name Nullable(UInt64),
setnodeattribute_value Nullable(UInt64),
setnodedata_data Nullable(UInt64),
setnodedata_id Nullable(UInt64),
setnodescroll_id Nullable(UInt64),
setnodescroll_x Nullable(Int64),
setnodescroll_y Nullable(Int64),
setpagelocation_navigationstart Nullable(UInt64),
setpagelocation_referrer Nullable(String),
setpagelocation_url Nullable(String),
setpagevisibility_hidden Nullable(UInt8),
setviewportscroll_x Nullable(Int64),
setviewportscroll_y Nullable(Int64),
setviewportsize_height Nullable(UInt64),
setviewportsize_width Nullable(UInt64),
stateaction_type Nullable(String),
stateactionevent_messageid Nullable(UInt64),
stateactionevent_timestamp Nullable(Datetime),
stateactionevent_type Nullable(String),
timestamp_timestamp Nullable(Datetime),
useranonymousid_id Nullable(String),
userid_id Nullable(String),
vuex_mutation Nullable(String),
vuex_state Nullable(String),
received_at Datetime,
batch_order_number Int64
)
ENGINE = Buffer(default, negatives, 16, 10, 120, 10000, 1000000, 10000, 100000000);

View file

@ -1,216 +1,218 @@
create table negatives (
sessionid UInt64,
clickevent_hesitationtime Nullable(UInt64),
clickevent_label Nullable(String),
clickevent_messageid Nullable(UInt64),
clickevent_timestamp Nullable(Datetime),
connectioninformation_downlink Nullable(UInt64),
connectioninformation_type Nullable(String),
consolelog_level Nullable(String),
consolelog_value Nullable(String),
cpuissue_duration Nullable(UInt64),
cpuissue_rate Nullable(UInt64),
cpuissue_timestamp Nullable(Datetime),
createdocument Nullable(UInt8),
createelementnode_id Nullable(UInt64),
createelementnode_parentid Nullable(UInt64),
cssdeleterule_index Nullable(UInt64),
cssdeleterule_stylesheetid Nullable(UInt64),
cssinsertrule_index Nullable(UInt64),
cssinsertrule_rule Nullable(String),
cssinsertrule_stylesheetid Nullable(UInt64),
customevent_messageid Nullable(UInt64),
customevent_name Nullable(String),
customevent_payload Nullable(String),
customevent_timestamp Nullable(Datetime),
domdrop_timestamp Nullable(Datetime),
errorevent_message Nullable(String),
errorevent_messageid Nullable(UInt64),
errorevent_name Nullable(String),
errorevent_payload Nullable(String),
errorevent_source Nullable(String),
errorevent_timestamp Nullable(Datetime),
fetch_duration Nullable(UInt64),
fetch_method Nullable(String),
fetch_request Nullable(String),
fetch_status Nullable(UInt64),
fetch_timestamp Nullable(Datetime),
fetch_url Nullable(String),
graphql_operationkind Nullable(String),
graphql_operationname Nullable(String),
graphql_response Nullable(String),
graphql_variables Nullable(String),
graphqlevent_messageid Nullable(UInt64),
graphqlevent_name Nullable(String),
graphqlevent_timestamp Nullable(Datetime),
inputevent_label Nullable(String),
inputevent_messageid Nullable(UInt64),
inputevent_timestamp Nullable(Datetime),
inputevent_value Nullable(String),
inputevent_valuemasked Nullable(UInt8),
jsexception_message Nullable(String),
jsexception_name Nullable(String),
jsexception_payload Nullable(String),
longtasks_timestamp Nullable(Datetime),
longtasks_duration Nullable(UInt64),
longtasks_containerid Nullable(String),
longtasks_containersrc Nullable(String),
memoryissue_duration Nullable(UInt64),
memoryissue_rate Nullable(UInt64),
memoryissue_timestamp Nullable(Datetime),
metadata_key Nullable(String),
metadata_value Nullable(String),
mobx_payload Nullable(String),
mobx_type Nullable(String),
mouseclick_id Nullable(UInt64),
mouseclick_hesitationtime Nullable(UInt64),
mouseclick_label Nullable(String),
mousemove_x Nullable(UInt64),
mousemove_y Nullable(UInt64),
movenode_id Nullable(UInt64),
movenode_index Nullable(UInt64),
movenode_parentid Nullable(UInt64),
ngrx_action Nullable(String),
ngrx_duration Nullable(UInt64),
ngrx_state Nullable(String),
pageevent_domcontentloadedeventend Nullable(UInt64),
pageevent_domcontentloadedeventstart Nullable(UInt64),
pageevent_firstcontentfulpaint Nullable(UInt64),
pageevent_firstpaint Nullable(UInt64),
pageevent_loaded Nullable(UInt8),
pageevent_loadeventend Nullable(UInt64),
pageevent_loadeventstart Nullable(UInt64),
pageevent_messageid Nullable(UInt64),
pageevent_referrer Nullable(String),
pageevent_requeststart Nullable(UInt64),
pageevent_responseend Nullable(UInt64),
pageevent_responsestart Nullable(UInt64),
pageevent_speedindex Nullable(UInt64),
pageevent_timestamp Nullable(Datetime),
pageevent_url Nullable(String),
pageloadtiming_domcontentloadedeventend Nullable(UInt64),
pageloadtiming_domcontentloadedeventstart Nullable(UInt64),
pageloadtiming_firstcontentfulpaint Nullable(UInt64),
pageloadtiming_firstpaint Nullable(UInt64),
pageloadtiming_loadeventend Nullable(UInt64),
pageloadtiming_loadeventstart Nullable(UInt64),
pageloadtiming_requeststart Nullable(UInt64),
pageloadtiming_responseend Nullable(UInt64),
pageloadtiming_responsestart Nullable(UInt64),
pagerendertiming_speedindex Nullable(UInt64),
pagerendertiming_timetointeractive Nullable(UInt64),
pagerendertiming_visuallycomplete Nullable(UInt64),
performancetrack_frames Nullable(Int64),
performancetrack_ticks Nullable(Int64),
performancetrack_totaljsheapsize Nullable(UInt64),
performancetrack_usedjsheapsize Nullable(UInt64),
performancetrackaggr_avgcpu Nullable(UInt64),
performancetrackaggr_avgfps Nullable(UInt64),
performancetrackaggr_avgtotaljsheapsize Nullable(UInt64),
performancetrackaggr_avgusedjsheapsize Nullable(UInt64),
performancetrackaggr_maxcpu Nullable(UInt64),
performancetrackaggr_maxfps Nullable(UInt64),
performancetrackaggr_maxtotaljsheapsize Nullable(UInt64),
performancetrackaggr_maxusedjsheapsize Nullable(UInt64),
performancetrackaggr_mincpu Nullable(UInt64),
performancetrackaggr_minfps Nullable(UInt64),
performancetrackaggr_mintotaljsheapsize Nullable(UInt64),
performancetrackaggr_minusedjsheapsize Nullable(UInt64),
performancetrackaggr_timestampend Nullable(Datetime),
performancetrackaggr_timestampstart Nullable(Datetime),
profiler_args Nullable(String),
profiler_duration Nullable(UInt64),
profiler_name Nullable(String),
profiler_result Nullable(String),
rawcustomevent_name Nullable(String),
rawcustomevent_payload Nullable(String),
rawerrorevent_message Nullable(String),
rawerrorevent_name Nullable(String),
rawerrorevent_payload Nullable(String),
rawerrorevent_source Nullable(String),
rawerrorevent_timestamp Nullable(Datetime),
redux_action Nullable(String),
redux_duration Nullable(UInt64),
redux_state Nullable(String),
removenode_id Nullable(UInt64),
removenodeattribute_id Nullable(UInt64),
removenodeattribute_name Nullable(String),
resourceevent_decodedbodysize Nullable(UInt64),
resourceevent_duration Nullable(UInt64),
resourceevent_encodedbodysize Nullable(UInt64),
resourceevent_headersize Nullable(UInt64),
resourceevent_messageid Nullable(UInt64),
resourceevent_method Nullable(String),
resourceevent_status Nullable(UInt64),
resourceevent_success Nullable(UInt8),
resourceevent_timestamp Nullable(Datetime),
resourceevent_ttfb Nullable(UInt64),
resourceevent_type Nullable(String),
resourceevent_url Nullable(String),
resourcetiming_decodedbodysize Nullable(UInt64),
resourcetiming_duration Nullable(UInt64),
resourcetiming_encodedbodysize Nullable(UInt64),
resourcetiming_headersize Nullable(UInt64),
resourcetiming_initiator Nullable(String),
resourcetiming_timestamp Nullable(Datetime),
resourcetiming_ttfb Nullable(UInt64),
resourcetiming_url Nullable(String),
sessiondisconnect Nullable(UInt8),
sessiondisconnect_timestamp Nullable(Datetime),
sessionend Nullable(UInt8),
sessionend_timestamp Nullable(Datetime),
sessionstart_projectid Nullable(UInt64),
sessionstart_revid Nullable(String),
sessionstart_timestamp Nullable(Datetime),
sessionstart_trackerversion Nullable(String),
sessionstart_useragent Nullable(String),
sessionstart_userbrowser Nullable(String),
sessionstart_userbrowserversion Nullable(String),
sessionstart_usercountry Nullable(String),
sessionstart_userdevice Nullable(String),
sessionstart_userdeviceheapsize Nullable(UInt64),
sessionstart_userdevicememorysize Nullable(UInt64),
sessionstart_userdevicetype Nullable(String),
sessionstart_useros Nullable(String),
sessionstart_userosversion Nullable(String),
sessionstart_useruuid Nullable(String),
setcssdata_data Nullable(UInt64),
setcssdata_id Nullable(UInt64),
setinputchecked_checked Nullable(UInt64),
setinputchecked_id Nullable(UInt64),
setinputtarget_id Nullable(UInt64),
setinputtarget_label Nullable(UInt64),
setinputvalue_id Nullable(UInt64),
setinputvalue_mask Nullable(UInt64),
setinputvalue_value Nullable(UInt64),
setnodeattribute_id Nullable(UInt64),
setnodeattribute_name Nullable(UInt64),
setnodeattribute_value Nullable(UInt64),
setnodedata_data Nullable(UInt64),
setnodedata_id Nullable(UInt64),
setnodescroll_id Nullable(UInt64),
setnodescroll_x Nullable(Int64),
setnodescroll_y Nullable(Int64),
setpagelocation_navigationstart Nullable(UInt64),
setpagelocation_referrer Nullable(String),
setpagelocation_url Nullable(String),
setpagevisibility_hidden Nullable(UInt8),
setviewportscroll_x Nullable(Int64),
setviewportscroll_y Nullable(Int64),
setviewportsize_height Nullable(UInt64),
setviewportsize_width Nullable(UInt64),
stateaction_type Nullable(String),
stateactionevent_messageid Nullable(UInt64),
stateactionevent_timestamp Nullable(Datetime),
stateactionevent_type Nullable(String),
timestamp_timestamp Nullable(Datetime),
useranonymousid_id Nullable(String),
userid_id Nullable(String),
vuex_mutation Nullable(String),
vuex_state Nullable(String),
received_at Datetime,
batch_order_number Int64)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(received_at)
ORDER BY (received_at, batch_order_number)
SETTINGS min_bytes_for_wide_part=1, use_minimalistic_part_header_in_zookeeper=1;
CREATE TABLE IF NOT EXISTS negatives
(
sessionid UInt64,
clickevent_hesitationtime Nullable(UInt64),
clickevent_label Nullable(String),
clickevent_messageid Nullable(UInt64),
clickevent_timestamp Nullable(Datetime),
connectioninformation_downlink Nullable(UInt64),
connectioninformation_type Nullable(String),
consolelog_level Nullable(String),
consolelog_value Nullable(String),
cpuissue_duration Nullable(UInt64),
cpuissue_rate Nullable(UInt64),
cpuissue_timestamp Nullable(Datetime),
createdocument Nullable(UInt8),
createelementnode_id Nullable(UInt64),
createelementnode_parentid Nullable(UInt64),
cssdeleterule_index Nullable(UInt64),
cssdeleterule_stylesheetid Nullable(UInt64),
cssinsertrule_index Nullable(UInt64),
cssinsertrule_rule Nullable(String),
cssinsertrule_stylesheetid Nullable(UInt64),
customevent_messageid Nullable(UInt64),
customevent_name Nullable(String),
customevent_payload Nullable(String),
customevent_timestamp Nullable(Datetime),
domdrop_timestamp Nullable(Datetime),
errorevent_message Nullable(String),
errorevent_messageid Nullable(UInt64),
errorevent_name Nullable(String),
errorevent_payload Nullable(String),
errorevent_source Nullable(String),
errorevent_timestamp Nullable(Datetime),
fetch_duration Nullable(UInt64),
fetch_method Nullable(String),
fetch_request Nullable(String),
fetch_status Nullable(UInt64),
fetch_timestamp Nullable(Datetime),
fetch_url Nullable(String),
graphql_operationkind Nullable(String),
graphql_operationname Nullable(String),
graphql_response Nullable(String),
graphql_variables Nullable(String),
graphqlevent_messageid Nullable(UInt64),
graphqlevent_name Nullable(String),
graphqlevent_timestamp Nullable(Datetime),
inputevent_label Nullable(String),
inputevent_messageid Nullable(UInt64),
inputevent_timestamp Nullable(Datetime),
inputevent_value Nullable(String),
inputevent_valuemasked Nullable(UInt8),
jsexception_message Nullable(String),
jsexception_name Nullable(String),
jsexception_payload Nullable(String),
longtasks_timestamp Nullable(Datetime),
longtasks_duration Nullable(UInt64),
longtasks_containerid Nullable(String),
longtasks_containersrc Nullable(String),
memoryissue_duration Nullable(UInt64),
memoryissue_rate Nullable(UInt64),
memoryissue_timestamp Nullable(Datetime),
metadata_key Nullable(String),
metadata_value Nullable(String),
mobx_payload Nullable(String),
mobx_type Nullable(String),
mouseclick_id Nullable(UInt64),
mouseclick_hesitationtime Nullable(UInt64),
mouseclick_label Nullable(String),
mousemove_x Nullable(UInt64),
mousemove_y Nullable(UInt64),
movenode_id Nullable(UInt64),
movenode_index Nullable(UInt64),
movenode_parentid Nullable(UInt64),
ngrx_action Nullable(String),
ngrx_duration Nullable(UInt64),
ngrx_state Nullable(String),
pageevent_domcontentloadedeventend Nullable(UInt64),
pageevent_domcontentloadedeventstart Nullable(UInt64),
pageevent_firstcontentfulpaint Nullable(UInt64),
pageevent_firstpaint Nullable(UInt64),
pageevent_loaded Nullable(UInt8),
pageevent_loadeventend Nullable(UInt64),
pageevent_loadeventstart Nullable(UInt64),
pageevent_messageid Nullable(UInt64),
pageevent_referrer Nullable(String),
pageevent_requeststart Nullable(UInt64),
pageevent_responseend Nullable(UInt64),
pageevent_responsestart Nullable(UInt64),
pageevent_speedindex Nullable(UInt64),
pageevent_timestamp Nullable(Datetime),
pageevent_url Nullable(String),
pageloadtiming_domcontentloadedeventend Nullable(UInt64),
pageloadtiming_domcontentloadedeventstart Nullable(UInt64),
pageloadtiming_firstcontentfulpaint Nullable(UInt64),
pageloadtiming_firstpaint Nullable(UInt64),
pageloadtiming_loadeventend Nullable(UInt64),
pageloadtiming_loadeventstart Nullable(UInt64),
pageloadtiming_requeststart Nullable(UInt64),
pageloadtiming_responseend Nullable(UInt64),
pageloadtiming_responsestart Nullable(UInt64),
pagerendertiming_speedindex Nullable(UInt64),
pagerendertiming_timetointeractive Nullable(UInt64),
pagerendertiming_visuallycomplete Nullable(UInt64),
performancetrack_frames Nullable(Int64),
performancetrack_ticks Nullable(Int64),
performancetrack_totaljsheapsize Nullable(UInt64),
performancetrack_usedjsheapsize Nullable(UInt64),
performancetrackaggr_avgcpu Nullable(UInt64),
performancetrackaggr_avgfps Nullable(UInt64),
performancetrackaggr_avgtotaljsheapsize Nullable(UInt64),
performancetrackaggr_avgusedjsheapsize Nullable(UInt64),
performancetrackaggr_maxcpu Nullable(UInt64),
performancetrackaggr_maxfps Nullable(UInt64),
performancetrackaggr_maxtotaljsheapsize Nullable(UInt64),
performancetrackaggr_maxusedjsheapsize Nullable(UInt64),
performancetrackaggr_mincpu Nullable(UInt64),
performancetrackaggr_minfps Nullable(UInt64),
performancetrackaggr_mintotaljsheapsize Nullable(UInt64),
performancetrackaggr_minusedjsheapsize Nullable(UInt64),
performancetrackaggr_timestampend Nullable(Datetime),
performancetrackaggr_timestampstart Nullable(Datetime),
profiler_args Nullable(String),
profiler_duration Nullable(UInt64),
profiler_name Nullable(String),
profiler_result Nullable(String),
rawcustomevent_name Nullable(String),
rawcustomevent_payload Nullable(String),
rawerrorevent_message Nullable(String),
rawerrorevent_name Nullable(String),
rawerrorevent_payload Nullable(String),
rawerrorevent_source Nullable(String),
rawerrorevent_timestamp Nullable(Datetime),
redux_action Nullable(String),
redux_duration Nullable(UInt64),
redux_state Nullable(String),
removenode_id Nullable(UInt64),
removenodeattribute_id Nullable(UInt64),
removenodeattribute_name Nullable(String),
resourceevent_decodedbodysize Nullable(UInt64),
resourceevent_duration Nullable(UInt64),
resourceevent_encodedbodysize Nullable(UInt64),
resourceevent_headersize Nullable(UInt64),
resourceevent_messageid Nullable(UInt64),
resourceevent_method Nullable(String),
resourceevent_status Nullable(UInt64),
resourceevent_success Nullable(UInt8),
resourceevent_timestamp Nullable(Datetime),
resourceevent_ttfb Nullable(UInt64),
resourceevent_type Nullable(String),
resourceevent_url Nullable(String),
resourcetiming_decodedbodysize Nullable(UInt64),
resourcetiming_duration Nullable(UInt64),
resourcetiming_encodedbodysize Nullable(UInt64),
resourcetiming_headersize Nullable(UInt64),
resourcetiming_initiator Nullable(String),
resourcetiming_timestamp Nullable(Datetime),
resourcetiming_ttfb Nullable(UInt64),
resourcetiming_url Nullable(String),
sessiondisconnect Nullable(UInt8),
sessiondisconnect_timestamp Nullable(Datetime),
sessionend Nullable(UInt8),
sessionend_timestamp Nullable(Datetime),
sessionstart_projectid Nullable(UInt64),
sessionstart_revid Nullable(String),
sessionstart_timestamp Nullable(Datetime),
sessionstart_trackerversion Nullable(String),
sessionstart_useragent Nullable(String),
sessionstart_userbrowser Nullable(String),
sessionstart_userbrowserversion Nullable(String),
sessionstart_usercountry Nullable(String),
sessionstart_userdevice Nullable(String),
sessionstart_userdeviceheapsize Nullable(UInt64),
sessionstart_userdevicememorysize Nullable(UInt64),
sessionstart_userdevicetype Nullable(String),
sessionstart_useros Nullable(String),
sessionstart_userosversion Nullable(String),
sessionstart_useruuid Nullable(String),
setcssdata_data Nullable(UInt64),
setcssdata_id Nullable(UInt64),
setinputchecked_checked Nullable(UInt64),
setinputchecked_id Nullable(UInt64),
setinputtarget_id Nullable(UInt64),
setinputtarget_label Nullable(UInt64),
setinputvalue_id Nullable(UInt64),
setinputvalue_mask Nullable(UInt64),
setinputvalue_value Nullable(UInt64),
setnodeattribute_id Nullable(UInt64),
setnodeattribute_name Nullable(UInt64),
setnodeattribute_value Nullable(UInt64),
setnodedata_data Nullable(UInt64),
setnodedata_id Nullable(UInt64),
setnodescroll_id Nullable(UInt64),
setnodescroll_x Nullable(Int64),
setnodescroll_y Nullable(Int64),
setpagelocation_navigationstart Nullable(UInt64),
setpagelocation_referrer Nullable(String),
setpagelocation_url Nullable(String),
setpagevisibility_hidden Nullable(UInt8),
setviewportscroll_x Nullable(Int64),
setviewportscroll_y Nullable(Int64),
setviewportsize_height Nullable(UInt64),
setviewportsize_width Nullable(UInt64),
stateaction_type Nullable(String),
stateactionevent_messageid Nullable(UInt64),
stateactionevent_timestamp Nullable(Datetime),
stateactionevent_type Nullable(String),
timestamp_timestamp Nullable(Datetime),
useranonymousid_id Nullable(String),
userid_id Nullable(String),
vuex_mutation Nullable(String),
vuex_state Nullable(String),
received_at Datetime,
batch_order_number Int64
)
ENGINE = MergeTree()
PARTITION BY toYYYYMM(received_at)
ORDER BY (received_at, batch_order_number)
SETTINGS min_bytes_for_wide_part = 1, use_minimalistic_part_header_in_zookeeper = 1;

View file

@ -1,38 +1,39 @@
CREATE TABLE pages (
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
url String,
url_host String MATERIALIZED lower(domain(url)),
url_path String MATERIALIZED lower(pathFull(url)),
request_start Nullable(UInt16),
response_start Nullable(UInt16),
response_end Nullable(UInt16),
dom_content_loaded_event_start Nullable(UInt16),
dom_content_loaded_event_end Nullable(UInt16),
load_event_start Nullable(UInt16),
load_event_end Nullable(UInt16),
first_paint Nullable(UInt16),
first_contentful_paint Nullable(UInt16),
speed_index Nullable(UInt16),
visually_complete Nullable(UInt16),
time_to_interactive Nullable(UInt16),
ttfb Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_start, request_start), minus(response_start, request_start), Null),
ttlb Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_end, request_start), minus(response_end, request_start), Null),
response_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(response_end, response_start), minus(response_end, response_start), Null),
dom_building_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(dom_content_loaded_event_start, response_end), minus(dom_content_loaded_event_start, response_end), Null),
dom_content_loaded_event_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(dom_content_loaded_event_end, dom_content_loaded_event_start), minus(dom_content_loaded_event_end, dom_content_loaded_event_start), Null),
load_event_time Nullable(UInt16) MATERIALIZED if(greaterOrEquals(load_event_end, load_event_start), minus(load_event_end, load_event_start), Null)
CREATE TABLE IF NOT EXISTS pages
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
url String,
url_host String MATERIALIZED lower(domain (url)),
url_path String MATERIALIZED lower(pathFull(url)),
request_start Nullable(UInt16),
response_start Nullable(UInt16),
response_end Nullable(UInt16),
dom_content_loaded_event_start Nullable(UInt16),
dom_content_loaded_event_end Nullable(UInt16),
load_event_start Nullable(UInt16),
load_event_end Nullable(UInt16),
first_paint Nullable(UInt16),
first_contentful_paint Nullable(UInt16),
speed_index Nullable(UInt16),
visually_complete Nullable(UInt16),
time_to_interactive Nullable(UInt16),
ttfb Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_start, request_start), minus(response_start, request_start), Null),
ttlb Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_end, request_start), minus(response_end, request_start), Null),
response_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(response_end, response_start), minus(response_end, response_start), Null),
dom_building_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(dom_content_loaded_event_start, response_end), minus(dom_content_loaded_event_start, response_end), Null),
dom_content_loaded_event_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(dom_content_loaded_event_end, dom_content_loaded_event_start), minus(dom_content_loaded_event_end, dom_content_loaded_event_start), Null),
load_event_time Nullable(UInt16) MATERIALIZED if (greaterOrEquals(load_event_end, load_event_start), minus(load_event_end, load_event_start), Null)
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)

View file

@ -1,30 +1,31 @@
CREATE TABLE performance (
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
min_fps UInt8,
avg_fps UInt8,
max_fps UInt8,
min_cpu UInt8,
avg_cpu UInt8,
max_cpu UInt8,
min_total_js_heap_size UInt64,
avg_total_js_heap_size UInt64,
max_total_js_heap_size UInt64,
min_used_js_heap_size UInt64,
avg_used_js_heap_size UInt64,
max_used_js_heap_size UInt64
CREATE TABLE IF NOT EXISTS performance
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
min_fps UInt8,
avg_fps UInt8,
max_fps UInt8,
min_cpu UInt8,
avg_cpu UInt8,
max_cpu UInt8,
min_total_js_heap_size UInt64,
avg_total_js_heap_size UInt64,
max_total_js_heap_size UInt64,
min_used_js_heap_size UInt64,
avg_used_js_heap_size UInt64,
max_used_js_heap_size UInt64
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,31 +1,32 @@
CREATE TABLE resources (
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
url String,
url_host String MATERIALIZED lower(domain(url)),
url_hostpath String MATERIALIZED concat(url_host, lower(path(url))),
type Enum8('other'=-1, 'script'=0, 'stylesheet'=1, 'fetch'=2, 'img'=3, 'media'=4),
duration Nullable(UInt16),
ttfb Nullable(UInt16),
header_size Nullable(UInt16),
encoded_body_size Nullable(UInt32),
decoded_body_size Nullable(UInt32),
compression_ratio Nullable(Float32) MATERIALIZED divide(decoded_body_size, encoded_body_size),
success UInt8,
method Nullable(Enum8('GET' = 0, 'HEAD' = 1, 'POST' = 2, 'PUT' = 3, 'DELETE' = 4, 'CONNECT' = 5, 'OPTIONS' = 6, 'TRACE' = 7, 'PATCH' = 8)),
status Nullable(UInt16)
CREATE TABLE IF NOT EXISTS resources
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
url String,
url_host String MATERIALIZED lower(domain(url)),
url_hostpath String MATERIALIZED concat(url_host, lower(path(url))),
type Enum8('other'=-1, 'script'=0, 'stylesheet'=1, 'fetch'=2, 'img'=3, 'media'=4),
duration Nullable(UInt16),
ttfb Nullable(UInt16),
header_size Nullable(UInt16),
encoded_body_size Nullable(UInt32),
decoded_body_size Nullable(UInt32),
compression_ratio Nullable(Float32) MATERIALIZED divide(decoded_body_size, encoded_body_size),
success UInt8,
method Nullable(Enum8('GET' = 0, 'HEAD' = 1, 'POST' = 2, 'PUT' = 3, 'DELETE' = 4, 'CONNECT' = 5, 'OPTIONS' = 6, 'TRACE' = 7, 'PATCH' = 8)),
status Nullable(UInt16)
) ENGINE = MergeTree
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,22 +1,26 @@
CREATE TABLE sessions (
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
duration UInt32,
pages_count UInt16,
events_count UInt16,
errors_count UInt16
) ENGINE = ReplacingMergeTree( duration )
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime, session_id)
TTL datetime + INTERVAL 1 MONTH;
CREATE TABLE IF NOT EXISTS sessions
(
session_id UInt64,
project_id UInt32,
tracker_version String,
rev_id Nullable(String),
user_uuid UUID,
user_os String,
user_os_version Nullable(String),
user_browser String,
user_browser_version Nullable(String),
user_device Nullable(String),
user_device_type Enum8('other'=0, 'desktop'=1, 'mobile'=2),
user_country Enum8('UN'=-128, 'RW'=-127, 'SO'=-126, 'YE'=-125, 'IQ'=-124, 'SA'=-123, 'IR'=-122, 'CY'=-121, 'TZ'=-120, 'SY'=-119, 'AM'=-118, 'KE'=-117, 'CD'=-116, 'DJ'=-115, 'UG'=-114, 'CF'=-113, 'SC'=-112, 'JO'=-111, 'LB'=-110, 'KW'=-109, 'OM'=-108, 'QA'=-107, 'BH'=-106, 'AE'=-105, 'IL'=-104, 'TR'=-103, 'ET'=-102, 'ER'=-101, 'EG'=-100, 'SD'=-99, 'GR'=-98, 'BI'=-97, 'EE'=-96, 'LV'=-95, 'AZ'=-94, 'LT'=-93, 'SJ'=-92, 'GE'=-91, 'MD'=-90, 'BY'=-89, 'FI'=-88, 'AX'=-87, 'UA'=-86, 'MK'=-85, 'HU'=-84, 'BG'=-83, 'AL'=-82, 'PL'=-81, 'RO'=-80, 'XK'=-79, 'ZW'=-78, 'ZM'=-77, 'KM'=-76, 'MW'=-75, 'LS'=-74, 'BW'=-73, 'MU'=-72, 'SZ'=-71, 'RE'=-70, 'ZA'=-69, 'YT'=-68, 'MZ'=-67, 'MG'=-66, 'AF'=-65, 'PK'=-64, 'BD'=-63, 'TM'=-62, 'TJ'=-61, 'LK'=-60, 'BT'=-59, 'IN'=-58, 'MV'=-57, 'IO'=-56, 'NP'=-55, 'MM'=-54, 'UZ'=-53, 'KZ'=-52, 'KG'=-51, 'TF'=-50, 'HM'=-49, 'CC'=-48, 'PW'=-47, 'VN'=-46, 'TH'=-45, 'ID'=-44, 'LA'=-43, 'TW'=-42, 'PH'=-41, 'MY'=-40, 'CN'=-39, 'HK'=-38, 'BN'=-37, 'MO'=-36, 'KH'=-35, 'KR'=-34, 'JP'=-33, 'KP'=-32, 'SG'=-31, 'CK'=-30, 'TL'=-29, 'RU'=-28, 'MN'=-27, 'AU'=-26, 'CX'=-25, 'MH'=-24, 'FM'=-23, 'PG'=-22, 'SB'=-21, 'TV'=-20, 'NR'=-19, 'VU'=-18, 'NC'=-17, 'NF'=-16, 'NZ'=-15, 'FJ'=-14, 'LY'=-13, 'CM'=-12, 'SN'=-11, 'CG'=-10, 'PT'=-9, 'LR'=-8, 'CI'=-7, 'GH'=-6, 'GQ'=-5, 'NG'=-4, 'BF'=-3, 'TG'=-2, 'GW'=-1, 'MR'=0, 'BJ'=1, 'GA'=2, 'SL'=3, 'ST'=4, 'GI'=5, 'GM'=6, 'GN'=7, 'TD'=8, 'NE'=9, 'ML'=10, 'EH'=11, 'TN'=12, 'ES'=13, 'MA'=14, 'MT'=15, 'DZ'=16, 'FO'=17, 'DK'=18, 'IS'=19, 'GB'=20, 'CH'=21, 'SE'=22, 'NL'=23, 'AT'=24, 'BE'=25, 'DE'=26, 'LU'=27, 'IE'=28, 'MC'=29, 'FR'=30, 'AD'=31, 'LI'=32, 'JE'=33, 'IM'=34, 'GG'=35, 'SK'=36, 'CZ'=37, 'NO'=38, 'VA'=39, 'SM'=40, 'IT'=41, 'SI'=42, 'ME'=43, 'HR'=44, 'BA'=45, 'AO'=46, 'NA'=47, 'SH'=48, 'BV'=49, 'BB'=50, 'CV'=51, 'GY'=52, 'GF'=53, 'SR'=54, 'PM'=55, 'GL'=56, 'PY'=57, 'UY'=58, 'BR'=59, 'FK'=60, 'GS'=61, 'JM'=62, 'DO'=63, 'CU'=64, 'MQ'=65, 'BS'=66, 'BM'=67, 'AI'=68, 'TT'=69, 'KN'=70, 'DM'=71, 'AG'=72, 'LC'=73, 'TC'=74, 'AW'=75, 'VG'=76, 'VC'=77, 'MS'=78, 'MF'=79, 'BL'=80, 'GP'=81, 'GD'=82, 'KY'=83, 'BZ'=84, 'SV'=85, 'GT'=86, 'HN'=87, 'NI'=88, 'CR'=89, 'VE'=90, 'EC'=91, 'CO'=92, 'PA'=93, 'HT'=94, 'AR'=95, 'CL'=96, 'BO'=97, 'PE'=98, 'MX'=99, 'PF'=100, 'PN'=101, 'KI'=102, 'TK'=103, 'TO'=104, 'WF'=105, 'WS'=106, 'NU'=107, 'MP'=108, 'GU'=109, 'PR'=110, 'VI'=111, 'UM'=112, 'AS'=113, 'CA'=114, 'US'=115, 'PS'=116, 'RS'=117, 'AQ'=118, 'SX'=119, 'CW'=120, 'BQ'=121, 'SS'=122),
datetime DateTime,
duration UInt32,
pages_count UInt16,
events_count UInt16,
errors_count UInt16,
utm_source Nullable(String),
utm_medium Nullable(String),
utm_campaign Nullable(String)
) ENGINE = ReplacingMergeTree(duration)
PARTITION BY toDate(datetime)
ORDER BY (project_id, datetime, session_id)
TTL datetime + INTERVAL 1 MONTH;

View file

@ -1,4 +1,4 @@
CREATE TABLE sessions_metadata
CREATE TABLE IF NOT EXISTS sessions_metadata
(
session_id UInt64,
project_id UInt32,

View file

@ -0,0 +1,167 @@
BEGIN;
CREATE OR REPLACE FUNCTION openreplay_version()
RETURNS text AS
$$
SELECT 'v1.4.0-ee'
$$ LANGUAGE sql IMMUTABLE;
CREATE TABLE IF NOT EXISTS traces
(
user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE,
tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE,
created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint,
auth text NULL,
action text NOT NULL,
method text NOT NULL,
path_format text NOT NULL,
endpoint text NOT NULL,
payload jsonb NULL,
parameters jsonb NULL,
status int NULL
);
CREATE INDEX IF NOT EXISTS traces_user_id_idx ON traces (user_id);
CREATE INDEX IF NOT EXISTS traces_tenant_id_idx ON traces (tenant_id);
CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id);
CREATE INDEX IF NOT EXISTS pages_first_contentful_paint_time_idx ON events.pages (first_contentful_paint_time) WHERE first_contentful_paint_time > 0;
CREATE INDEX IF NOT EXISTS pages_dom_content_loaded_time_idx ON events.pages (dom_content_loaded_time) WHERE dom_content_loaded_time > 0;
CREATE INDEX IF NOT EXISTS pages_first_paint_time_idx ON events.pages (first_paint_time) WHERE first_paint_time > 0;
CREATE INDEX IF NOT EXISTS pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0;
CREATE INDEX IF NOT EXISTS pages_time_to_interactive_idx ON events.pages (time_to_interactive) WHERE time_to_interactive > 0;
CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_loadgt0NN_idx ON events.pages (session_id, timestamp) WHERE load_time > 0 AND load_time IS NOT NULL;
CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_visualgt0nn_idx ON events.pages (session_id, timestamp) WHERE visually_complete > 0 AND visually_complete IS NOT NULL;
CREATE INDEX IF NOT EXISTS pages_timestamp_metgt0_idx ON events.pages (timestamp) WHERE response_time > 0 OR
first_paint_time > 0 OR
dom_content_loaded_time > 0 OR
ttfb > 0 OR
time_to_interactive > 0;
CREATE INDEX IF NOT EXISTS pages_session_id_speed_indexgt0nn_idx ON events.pages (session_id, speed_index) WHERE speed_index > 0 AND speed_index IS NOT NULL;
CREATE INDEX IF NOT EXISTS pages_session_id_timestamp_dom_building_timegt0nn_idx ON events.pages (session_id, timestamp, dom_building_time) WHERE dom_building_time > 0 AND dom_building_time IS NOT NULL;
CREATE INDEX IF NOT EXISTS issues_project_id_idx ON issues (project_id);
CREATE INDEX IF NOT EXISTS errors_project_id_error_id_js_exception_idx ON public.errors (project_id, error_id) WHERE source = 'js_exception';
CREATE INDEX IF NOT EXISTS errors_project_id_error_id_idx ON public.errors (project_id, error_id);
CREATE INDEX IF NOT EXISTS errors_project_id_error_id_integration_idx ON public.errors (project_id, error_id) WHERE source != 'js_exception';
CREATE INDEX IF NOT EXISTS sessions_start_ts_idx ON public.sessions (start_ts) WHERE duration > 0;
CREATE INDEX IF NOT EXISTS sessions_project_id_idx ON public.sessions (project_id) WHERE duration > 0;
CREATE INDEX IF NOT EXISTS sessions_session_id_project_id_start_ts_idx ON sessions (session_id, project_id, start_ts) WHERE duration > 0;
CREATE INDEX IF NOT EXISTS user_favorite_sessions_user_id_session_id_idx ON user_favorite_sessions (user_id, session_id);
CREATE INDEX IF NOT EXISTS jobs_project_id_idx ON jobs (project_id);
CREATE INDEX IF NOT EXISTS errors_session_id_timestamp_error_id_idx ON events.errors (session_id, timestamp, error_id);
CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_idx ON events.errors (error_id, timestamp);
CREATE INDEX IF NOT EXISTS errors_timestamp_error_id_session_id_idx ON events.errors (timestamp, error_id, session_id);
CREATE INDEX IF NOT EXISTS errors_error_id_timestamp_session_id_idx ON events.errors (error_id, timestamp, session_id);
CREATE INDEX IF NOT EXISTS resources_timestamp_idx ON events.resources (timestamp);
CREATE INDEX IF NOT EXISTS resources_success_idx ON events.resources (success);
CREATE INDEX IF NOT EXISTS projects_project_key_idx ON public.projects (project_key);
CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL;
CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_idx ON events.resources (session_id, timestamp);
CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_type_idx ON events.resources (session_id, timestamp, type);
CREATE INDEX IF NOT EXISTS resources_timestamp_type_durationgt0NN_noFetch_idx ON events.resources (timestamp, type) WHERE duration > 0 AND duration IS NOT NULL AND type != 'fetch';
CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_fail_idx ON events.resources (session_id, timestamp, url_host) WHERE success = FALSE;
CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_url_host_firstparty_idx ON events.resources (session_id, timestamp, url_host) WHERE type IN ('fetch', 'script');
CREATE INDEX IF NOT EXISTS resources_session_id_timestamp_duration_durationgt0NN_img_idx ON events.resources (session_id, timestamp, duration) WHERE duration > 0 AND duration IS NOT NULL AND type = 'img';
CREATE INDEX IF NOT EXISTS resources_timestamp_session_id_idx ON events.resources (timestamp, session_id);
DROP TRIGGER IF EXISTS on_insert_or_update ON projects;
CREATE TRIGGER on_insert_or_update
AFTER INSERT OR UPDATE
ON projects
FOR EACH ROW
EXECUTE PROCEDURE notify_project();
UPDATE tenants
SET name=''
WHERE name ISNULL;
ALTER TABLE tenants
ALTER COLUMN name SET NOT NULL;
ALTER TABLE sessions
ADD COLUMN IF NOT EXISTS utm_source text NULL DEFAULT NULL,
ADD COLUMN IF NOT EXISTS utm_medium text NULL DEFAULT NULL,
ADD COLUMN IF NOT EXISTS utm_campaign text NULL DEFAULT NULL;
CREATE INDEX IF NOT EXISTS sessions_utm_source_gin_idx ON public.sessions USING GIN (utm_source gin_trgm_ops);
CREATE INDEX IF NOT EXISTS sessions_utm_medium_gin_idx ON public.sessions USING GIN (utm_medium gin_trgm_ops);
CREATE INDEX IF NOT EXISTS sessions_utm_campaign_gin_idx ON public.sessions USING GIN (utm_campaign gin_trgm_ops);
CREATE INDEX IF NOT EXISTS requests_timestamp_session_id_failed_idx ON events_common.requests (timestamp, session_id) WHERE success = FALSE;
DROP INDEX IF EXISTS sessions_project_id_user_browser_idx1;
DROP INDEX IF EXISTS sessions_project_id_user_country_idx1;
ALTER INDEX IF EXISTS platform_idx RENAME TO sessions_platform_idx;
ALTER INDEX IF EXISTS events.resources_duration_idx RENAME TO resources_duration_durationgt0_idx;
DROP INDEX IF EXISTS projects_project_key_idx1;
CREATE INDEX IF NOT EXISTS errors_parent_error_id_idx ON errors (parent_error_id);
CREATE INDEX IF NOT EXISTS performance_session_id_idx ON events.performance (session_id);
CREATE INDEX IF NOT EXISTS performance_timestamp_idx ON events.performance (timestamp);
CREATE INDEX IF NOT EXISTS performance_session_id_timestamp_idx ON events.performance (session_id, timestamp);
CREATE INDEX IF NOT EXISTS performance_avg_cpu_gt0_idx ON events.performance (avg_cpu) WHERE avg_cpu > 0;
CREATE INDEX IF NOT EXISTS performance_avg_used_js_heap_size_gt0_idx ON events.performance (avg_used_js_heap_size) WHERE avg_used_js_heap_size > 0;
CREATE TABLE IF NOT EXISTS metrics
(
metric_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY,
project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE,
user_id integer REFERENCES users (user_id) ON DELETE SET NULL,
name text NOT NULL,
is_public boolean NOT NULL DEFAULT FALSE,
created_at timestamp default timezone('utc'::text, now()) not null,
deleted_at timestamp
);
CREATE INDEX IF NOT EXISTS metrics_user_id_is_public_idx ON public.metrics (user_id, is_public);
CREATE TABLE IF NOT EXISTS metric_series
(
series_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY,
metric_id integer REFERENCES metrics (metric_id) ON DELETE CASCADE,
index integer NOT NULL,
name text NULL,
filter jsonb NOT NULL,
created_at timestamp DEFAULT timezone('utc'::text, now()) NOT NULL,
deleted_at timestamp
);
CREATE INDEX IF NOT EXISTS metric_series_metric_id_idx ON public.metric_series (metric_id);
CREATE INDEX IF NOT EXISTS funnels_project_id_idx ON public.funnels (project_id);
CREATE TABLE IF NOT EXISTS searches
(
search_id integer generated BY DEFAULT AS IDENTITY PRIMARY KEY,
project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE,
user_id integer NOT NULL REFERENCES users (user_id) ON DELETE CASCADE,
name text not null,
filter jsonb not null,
created_at timestamp default timezone('utc'::text, now()) not null,
deleted_at timestamp,
is_public boolean NOT NULL DEFAULT False
);
CREATE INDEX IF NOT EXISTS searches_user_id_is_public_idx ON public.searches (user_id, is_public);
CREATE INDEX IF NOT EXISTS searches_project_id_idx ON public.searches (project_id);
CREATE INDEX IF NOT EXISTS alerts_project_id_idx ON alerts (project_id);
ALTER TABLE alerts
ADD COLUMN IF NOT EXISTS series_id integer NULL REFERENCES metric_series (series_id) ON DELETE CASCADE;
CREATE INDEX IF NOT EXISTS alerts_series_id_idx ON alerts (series_id);
UPDATE alerts
SET options=jsonb_set(options, '{change}', '"change"')
WHERE detection_method = 'change'
AND options -> 'change' ISNULL;
ALTER TABLE roles
ADD COLUMN IF NOT EXISTS all_projects bool NOT NULL DEFAULT TRUE;
CREATE TABLE IF NOT EXISTS roles_projects
(
role_id integer NOT NULL REFERENCES roles (role_id) ON DELETE CASCADE,
project_id integer NOT NULL REFERENCES projects (project_id) ON DELETE CASCADE,
CONSTRAINT roles_projects_pkey PRIMARY KEY (role_id, project_id)
);
CREATE INDEX IF NOT EXISTS roles_projects_role_id_idx ON roles_projects (role_id);
CREATE INDEX IF NOT EXISTS roles_projects_project_id_idx ON roles_projects (project_id);
COMMIT;

View file

@ -1,18 +0,0 @@
BEGIN;
CREATE TABLE traces
(
user_id integer NULL REFERENCES users (user_id) ON DELETE CASCADE,
tenant_id integer NOT NULL REFERENCES tenants (tenant_id) ON DELETE CASCADE,
created_at bigint NOT NULL DEFAULT (EXTRACT(EPOCH FROM now() at time zone 'utc') * 1000)::bigint,
auth text NULL,
action text NOT NULL,
method text NOT NULL,
path_format text NOT NULL,
endpoint text NOT NULL,
payload jsonb NULL,
parameters jsonb NULL,
status int NULL
);
CREATE INDEX traces_user_id_idx ON traces (user_id);
CREATE INDEX traces_tenant_id_idx ON traces (tenant_id);
COMMIT;

File diff suppressed because it is too large Load diff

View file

@ -0,0 +1,23 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*.orig
*~
# Various IDEs
.project
.idea/
*.tmproj
.vscode/

View file

@ -0,0 +1,23 @@
apiVersion: v2
name: clickhouse
description: A Helm chart for Kubernetes
# A chart can be either an 'application' or a 'library' chart.
#
# Application charts are a collection of templates that can be packaged into versioned archives
# to be deployed.
#
# Library charts provide useful utilities or functions for the chart developer. They're included as
# a dependency of application charts to inject those utilities and functions into the rendering
# pipeline. Library charts do not define any templates and therefore cannot be deployed.
type: application
# This is the chart version. This version number should be incremented each time you make changes
# to the chart and its templates, including the app version.
# Versions are expected to follow Semantic Versioning (https://semver.org/)
version: 0.1.0
# This is the version number of the application being deployed. This version number should be
# incremented each time you make changes to the application. Versions are not expected to
# follow Semantic Versioning. They should reflect the version the application is using.
appVersion: 1.16.0

View file

@ -0,0 +1,62 @@
{{/*
Expand the name of the chart.
*/}}
{{- define "clickhouse.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "clickhouse.fullname" -}}
{{- if .Values.fullnameOverride }}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- $name := default .Chart.Name .Values.nameOverride }}
{{- if contains $name .Release.Name }}
{{- .Release.Name | trunc 63 | trimSuffix "-" }}
{{- else }}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" }}
{{- end }}
{{- end }}
{{- end }}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "clickhouse.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" }}
{{- end }}
{{/*
Common labels
*/}}
{{- define "clickhouse.labels" -}}
helm.sh/chart: {{ include "clickhouse.chart" . }}
{{ include "clickhouse.selectorLabels" . }}
{{- if .Chart.AppVersion }}
app.kubernetes.io/version: {{ .Chart.AppVersion | quote }}
{{- end }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end }}
{{/*
Selector labels
*/}}
{{- define "clickhouse.selectorLabels" -}}
app.kubernetes.io/name: {{ include "clickhouse.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end }}
{{/*
Create the name of the service account to use
*/}}
{{- define "clickhouse.serviceAccountName" -}}
{{- if .Values.serviceAccount.create }}
{{- default (include "clickhouse.fullname" .) .Values.serviceAccount.name }}
{{- else }}
{{- default "default" .Values.serviceAccount.name }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,19 @@
apiVersion: v1
kind: Service
metadata:
name: clickhouse
labels:
{{- include "clickhouse.labels" . | nindent 4 }}
spec:
type: ClusterIP
ports:
- port: {{ .Values.service.webPort }}
targetPort: web
protocol: TCP
name: web
- port: {{ .Values.service.dataPort }}
targetPort: data
protocol: TCP
name: data
selector:
{{- include "clickhouse.selectorLabels" . | nindent 4 }}

View file

@ -0,0 +1,12 @@
{{- if .Values.serviceAccount.create -}}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ include "clickhouse.serviceAccountName" . }}
labels:
{{- include "clickhouse.labels" . | nindent 4 }}
{{- with .Values.serviceAccount.annotations }}
annotations:
{{- toYaml . | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,69 @@
apiVersion: apps/v1
kind: StatefulSet
metadata:
name: {{ include "clickhouse.fullname" . }}
labels:
{{- include "clickhouse.labels" . | nindent 4 }}
spec:
replicas: {{ .Values.replicaCount }}
serviceName: {{ include "clickhouse.fullname" . }}
selector:
matchLabels:
{{- include "clickhouse.selectorLabels" . | nindent 6 }}
template:
metadata:
{{- with .Values.podAnnotations }}
annotations:
{{- toYaml . | nindent 8 }}
{{- end }}
labels:
{{- include "clickhouse.selectorLabels" . | nindent 8 }}
spec:
{{- with .Values.imagePullSecrets }}
imagePullSecrets:
{{- toYaml . | nindent 8 }}
{{- end }}
serviceAccountName: {{ include "clickhouse.serviceAccountName" . }}
securityContext:
{{- toYaml .Values.podSecurityContext | nindent 8 }}
containers:
- name: {{ .Chart.Name }}
env:
{{- range $key, $value := .Values.env }}
- name: "{{ $key }}"
value: "{{ $value }}"
{{- end }}
securityContext:
{{- toYaml .Values.securityContext | nindent 12 }}
image: "{{ .Values.image.repository }}:{{ .Values.image.tag | default .Chart.AppVersion }}"
imagePullPolicy: {{ .Values.image.pullPolicy }}
ports:
- containerPort: 9000
name: web
- containerPort: 8123
name: data
volumeMounts:
- name: ch-volume
mountPath: /var/lib/mydata
resources:
{{- toYaml .Values.resources | nindent 12 }}
{{- with .Values.nodeSelector }}
nodeSelector:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.affinity }}
affinity:
{{- toYaml . | nindent 8 }}
{{- end }}
{{- with .Values.tolerations }}
tolerations:
{{- toYaml . | nindent 8 }}
{{- end }}
volumeClaimTemplates:
- metadata:
name: ch-volume
spec:
accessModes: [ "ReadWriteOnce" ]
resources:
requests:
storage: {{ .Values.storageSize }}

View file

@ -0,0 +1,62 @@
# Default values for clickhouse.
# This is a YAML-formatted file.
# Declare variables to be passed into your templates.
replicaCount: 1
image:
repository: yandex/clickhouse-server
pullPolicy: IfNotPresent
# Overrides the image tag whose default is the chart appVersion.
tag: "20.9"
imagePullSecrets: []
nameOverride: ""
fullnameOverride: ""
serviceAccount:
# Specifies whether a service account should be created
create: true
# Annotations to add to the service account
annotations: {}
# The name of the service account to use.
# If not set and create is true, a name is generated using the fullname template
name: ""
podAnnotations: {}
podSecurityContext: {}
# fsGroup: 2000
securityContext: {}
# capabilities:
# drop:
# - ALL
# readOnlyRootFilesystem: true
# runAsNonRoot: true
# runAsUser: 1000
env: {}
service:
webPort: 9000
dataPort: 8123
resources: {}
# We usually recommend not to specify default resources and to leave this as a conscious
# choice for the user. This also increases chances charts run on environments with little
# resources, such as Minikube. If you do want to specify resources, uncomment the following
# lines, adjust them as necessary, and remove the curly braces after 'resources:'.
# limits:
# cpu: 100m
# memory: 128Mi
# requests:
# cpu: 100m
# memory: 128Mi
nodeSelector: {}
tolerations: []
affinity: {}
storageSize: 8G

View file

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View file

@ -0,0 +1,22 @@
annotations:
category: Infrastructure
apiVersion: v1
appVersion: 2.6.0
description: Apache Kafka is a distributed streaming platform.
engine: gotpl
home: https://github.com/bitnami/charts/tree/master/bitnami/kafka
icon: https://bitnami.com/assets/stacks/kafka/img/kafka-stack-110x117.png
keywords:
- kafka
- zookeeper
- streaming
- producer
- consumer
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: kafka
sources:
- https://github.com/bitnami/bitnami-docker-kafka
- https://kafka.apache.org/
version: 11.8.6

View file

@ -0,0 +1,737 @@
# Kafka
[Kafka](https://www.kafka.org/) is a distributed streaming platform used for building real-time data pipelines and streaming apps. It is horizontally scalable, fault-tolerant, wicked fast, and runs in production in thousands of companies.
## TL;DR
```console
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install my-release bitnami/kafka
```
## Introduction
This chart bootstraps a [Kafka](https://github.com/bitnami/bitnami-docker-kafka) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
## Prerequisites
- Kubernetes 1.12+
- Helm 2.12+ or Helm 3.0-beta3+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
helm repo add bitnami https://charts.bitnami.com/bitnami
helm install my-release bitnami/kafka
```
These commands deploy Kafka on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
The following tables lists the configurable parameters of the Kafka chart and their default values per section/component:
### Global parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `global.storageClass` | Global storage class for dynamic provisioning | `nil` |
### Common parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `nameOverride` | String to partially override kafka.fullname | `nil` |
| `fullnameOverride` | String to fully override kafka.fullname | `nil` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `commonLabels` | Labels to add to all deployed objects | `{}` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `extraDeploy` | Array of extra objects to deploy with the release | `nil` (evaluated as a template) |
### Kafka parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `image.registry` | Kafka image registry | `docker.io` |
| `image.repository` | Kafka image name | `bitnami/kafka` |
| `image.tag` | Kafka image tag | `{TAG_NAME}` |
| `image.pullPolicy` | Kafka image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `image.debug` | Set to true if you would like to see extra information on logs | `false` |
| `config` | Configuration file for Kafka. Auto-generated based on other parameters when not specified | `nil` |
| `existingConfigmap` | Name of existing ConfigMap with Kafka configuration | `nil` |
| `log4j` | An optional log4j.properties file to overwrite the default of the Kafka brokers. | `nil` |
| `existingLog4jConfigMap` | The name of an existing ConfigMap containing a log4j.properties file. | `nil` |
| `heapOpts` | Kafka's Java Heap size | `-Xmx1024m -Xms1024m` |
| `deleteTopicEnable` | Switch to enable topic deletion or not | `false` |
| `autoCreateTopicsEnable` | Switch to enable auto creation of topics. Enabling auto creation of topics not recommended for production or similar environments | `false` |
| `logFlushIntervalMessages` | The number of messages to accept before forcing a flush of data to disk | `10000` |
| `logFlushIntervalMs` | The maximum amount of time a message can sit in a log before we force a flush | `1000` |
| `logRetentionBytes` | A size-based retention policy for logs | `_1073741824` |
| `logRetentionCheckIntervalMs` | The interval at which log segments are checked to see if they can be deleted | `300000` |
| `logRetentionHours` | The minimum age of a log file to be eligible for deletion due to age | `168` |
| `logSegmentBytes` | The maximum size of a log segment file. When this size is reached a new log segment will be created | `_1073741824` |
| `logsDirs` | A comma separated list of directories under which to store log files | `/bitnami/kafka/data` |
| `maxMessageBytes` | The largest record batch size allowed by Kafka | `1000012` |
| `defaultReplicationFactor` | Default replication factors for automatically created topics | `1` |
| `offsetsTopicReplicationFactor` | The replication factor for the offsets topic | `1` |
| `transactionStateLogReplicationFactor` | The replication factor for the transaction topic | `1` |
| `transactionStateLogMinIsr` | Overridden min.insync.replicas config for the transaction topic | `1` |
| `numIoThreads` | The number of threads doing disk I/O | `8` |
| `numNetworkThreads` | The number of threads handling network requests | `3` |
| `numPartitions` | The default number of log partitions per topic | `1` |
| `numRecoveryThreadsPerDataDir` | The number of threads per data directory to be used for log recovery at startup and flushing at shutdown | `1` |
| `socketReceiveBufferBytes` | The receive buffer (SO_RCVBUF) used by the socket server | `102400` |
| `socketRequestMaxBytes` | The maximum size of a request that the socket server will accept (protection against OOM) | `_104857600` |
| `socketSendBufferBytes` | The send buffer (SO_SNDBUF) used by the socket server | `102400` |
| `zookeeperConnectionTimeoutMs` | Timeout in ms for connecting to Zookeeper | `6000` |
| `extraEnvVars` | Extra environment variables to add to kafka pods | `[]` |
| `extraVolumes` | Extra volume(s) to add to Kafka statefulset | `[]` |
| `extraVolumeMounts` | Extra volumeMount(s) to add to Kafka containers | `[]` |
| `auth.clientProtocol` | Authentication protocol for communications with clients. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` |
| `auth.interBrokerProtocol` | Authentication protocol for inter-broker communications. Allowed protocols: `plaintext`, `tls`, `mtls`, `sasl` and `sasl_tls` | `plaintext` |
| `auth.saslMechanisms` | SASL mechanisms when either `auth.interBrokerProtocol` or `auth.clientProtocol` are `sasl`. Allowed types: `plain`, `scram-sha-256`, `scram-sha-512` | `plain,scram-sha-256,scram-sha-512` |
| `auth.saslInterBrokerMechanism` | SASL mechanism to use as inter broker protocol, it must be included at `auth.saslMechanisms` | `plain` |
| `auth.jksSecret` | Name of the existing secret containing the truststore and one keystore per Kafka broker you have in the cluster | `nil` |
| `auth.jksPassword` | Password to access the JKS files when they are password-protected | `nil` |
| `auth.tlsEndpointIdentificationAlgorithm` | The endpoint identification algorithm to validate server hostname using server certificate | `https` |
| `auth.jaas.interBrokerUser` | Kafka inter broker communication user for SASL authentication | `admin` |
| `auth.jaas.interBrokerPassword` | Kafka inter broker communication password for SASL authentication | `nil` |
| `auth.jaas.zookeeperUser` | Kafka Zookeeper user for SASL authentication | `nil` |
| `auth.jaas.zookeeperPassword` | Kafka Zookeeper password for SASL authentication | `nil` |
| `auth.jaas.existingSecret` | Name of the existing secret containing credentials for brokerUser, interBrokerUser and zookeeperUser | `nil` |
| `auth.jaas.clientUsers` | List of Kafka client users to be created, separated by commas. This values will override `auth.jaas.clientUser` | `[]` |
| `auth.jaas.clientPasswords` | List of passwords for `auth.jaas.clientUsers`. It is mandatory to provide the passwords when using `auth.jaas.clientUsers` | `[]` |
| `listeners` | The address(es) the socket server listens on. Auto-calculated it's set to an empty array | `[]` |
| `advertisedListeners` | The address(es) (hostname:port) the broker will advertise to producers and consumers. Auto-calculated it's set to an empty array | `[]` |
| `listenerSecurityProtocolMap` | The protocol->listener mapping. Auto-calculated it's set to nil | `nil` |
| `allowPlaintextListener` | Allow to use the PLAINTEXT listener | `true` |
| `interBrokerListenerName` | The listener that the brokers should communicate on | `INTERNAL` |
### Statefulset parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `replicaCount` | Number of Kafka nodes | `1` |
| `updateStrategy` | Update strategy for the stateful set | `RollingUpdate` |
| `rollingUpdatePartition` | Partition update strategy | `nil` |
| `podLabels` | Kafka pod labels | `{}` (evaluated as a template) |
| `podAnnotations` | Kafka Pod annotations | `{}` (evaluated as a template) |
| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) |
| `priorityClassName` | Name of the existing priority class to be used by kafka pods | `""` |
| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) |
| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) |
| `podSecurityContext` | Kafka pods' Security Context | `{}` |
| `containerSecurityContext` | Kafka containers' Security Context | `{}` |
| `resources.limits` | The resources limits for Kafka containers | `{}` |
| `resources.requests` | The requested resources for Kafka containers | `{}` |
| `livenessProbe` | Liveness probe configuration for Kafka | `Check values.yaml file` |
| `readinessProbe` | Readiness probe configuration for Kafka | `Check values.yaml file` |
| `customLivenessProbe` | Custom Liveness probe configuration for Kafka | `{}` |
| `customReadinessProbe` | Custom Readiness probe configuration for Kafka | `{}` |
| `pdb.create` | Enable/disable a Pod Disruption Budget creation | `false` |
| `pdb.minAvailable` | Minimum number/percentage of pods that should remain scheduled | `nil` |
| `pdb.maxUnavailable` | Maximum number/percentage of pods that may be made unavailable | `1` |
| `command` | Override kafka container command | `['/scripts/setup.sh']` (evaluated as a template) |
| `args` | Override kafka container arguments | `[]` (evaluated as a template) |
| `sidecars` | Attach additional sidecar containers to the Kafka pod | `{}` |
### Exposure parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.port` | Kafka port for client connections | `9092` |
| `service.internalPort` | Kafka port for inter-broker connections | `9093` |
| `service.externalPort` | Kafka port for external connections | `9094` |
| `service.nodePorts.client` | Nodeport for client connections | `""` |
| `service.nodePorts.external` | Nodeport for external connections | `""` |
| `service.loadBalancerIP` | loadBalancerIP for Kafka Service | `nil` |
| `service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `service.annotations` | Service annotations | `{}`(evaluated as a template) |
| `externalAccess.enabled` | Enable Kubernetes external cluster access to Kafka brokers | `false` |
| `externalAccess.autoDiscovery.enabled` | Enable using an init container to auto-detect external IPs/ports by querying the K8s API | `false` |
| `externalAccess.autoDiscovery.image.registry` | Init container auto-discovery image registry (kubectl) | `docker.io` |
| `externalAccess.autoDiscovery.image.repository` | Init container auto-discovery image name (kubectl) | `bitnami/kubectl` |
| `externalAccess.autoDiscovery.image.tag` | Init container auto-discovery image tag (kubectl) | `{TAG_NAME}` |
| `externalAccess.autoDiscovery.image.pullPolicy` | Init container auto-discovery image pull policy (kubectl) | `Always` |
| `externalAccess.autoDiscovery.resources.limits` | Init container auto-discovery resource limits | `{}` |
| `externalAccess.autoDiscovery.resources.requests` | Init container auto-discovery resource requests | `{}` |
| `externalAccess.service.type` | Kubernetes Service type for external access. It can be NodePort or LoadBalancer | `LoadBalancer` |
| `externalAccess.service.port` | Kafka port used for external access when service type is LoadBalancer | `9094` |
| `externalAccess.service.loadBalancerIPs` | Array of load balancer IPs for Kafka brokers | `[]` |
| `externalAccess.service.loadBalancerSourceRanges` | Address(es) that are allowed when service is LoadBalancer | `[]` |
| `externalAccess.service.domain` | Domain or external ip used to configure Kafka external listener when service type is NodePort | `nil` |
| `externalAccess.service.nodePorts` | Array of node ports used to configure Kafka external listener when service type is NodePort | `[]` |
| `externalAccess.service.annotations` | Service annotations for external access | `{}`(evaluated as a template) |
### Persistence parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `persistence.enabled` | Enable Kafka data persistence using PVC, note that Zookeeper persistence is unaffected | `true` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim`, the value is evaluated as a template | `nil` |
| `persistence.storageClass` | PVC Storage Class for Kafka data volume | `nil` |
| `persistence.accessMode` | PVC Access Mode for Kafka data volume | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Request for Kafka data volume | `8Gi` |
| `persistence.annotations` | Annotations for the PVC | `{}`(evaluated as a template) |
### RBAC parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `serviceAccount.create` | Enable creation of ServiceAccount for Kafka pods | `true` |
| `serviceAccount.name` | Name of the created serviceAccount | Generated using the `kafka.fullname` template |
| `rbac.create` | Weather to create & use RBAC resources or not | `false` |
### Volume Permissions parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` |
| `volumePermissions.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `volumePermissions.resources.limits` | Init container volume-permissions resource limits | `{}` |
| `volumePermissions.resources.requests` | Init container volume-permissions resource requests | `{}` |
### Metrics parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `metrics.kafka.enabled` | Whether or not to create a standalone Kafka exporter to expose Kafka metrics | `false` |
| `metrics.kafka.image.registry` | Kafka exporter image registry | `docker.io` |
| `metrics.kafka.image.repository` | Kafka exporter image name | `bitnami/kafka-exporter` |
| `metrics.kafka.image.tag` | Kafka exporter image tag | `{TAG_NAME}` |
| `metrics.kafka.image.pullPolicy` | Kafka exporter image pull policy | `IfNotPresent` |
| `metrics.kafka.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `metrics.kafka.extraFlags` | Extra flags to be passed to Kafka exporter | `{}` |
| `metrics.kafka.certificatesSecret` | Name of the existing secret containing the optional certificate and key files | `nil` |
| `metrics.kafka.resources.limits` | Kafka Exporter container resource limits | `{}` |
| `metrics.kafka.resources.requests` | Kafka Exporter container resource requests | `{}` |
| `metrics.kafka.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Kafka Exporter | `ClusterIP` |
| `metrics.kafka.service.port` | Kafka Exporter Prometheus port | `9308` |
| `metrics.kafka.service.nodePort` | Kubernetes HTTP node port | `""` |
| `metrics.kafka.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` |
| `metrics.kafka.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `metrics.kafka.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `metrics.jmx.enabled` | Whether or not to expose JMX metrics to Prometheus | `false` |
| `metrics.jmx.image.registry` | JMX exporter image registry | `docker.io` |
| `metrics.jmx.image.repository` | JMX exporter image name | `bitnami/jmx-exporter` |
| `metrics.jmx.image.tag` | JMX exporter image tag | `{TAG_NAME}` |
| `metrics.jmx.image.pullPolicy` | JMX exporter image pull policy | `IfNotPresent` |
| `metrics.jmx.image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `metrics.jmx.resources.limits` | JMX Exporter container resource limits | `{}` |
| `metrics.jmx.resources.requests` | JMX Exporter container resource requests | `{}` |
| `metrics.jmx.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for JMX Exporter | `ClusterIP` |
| `metrics.jmx.service.port` | JMX Exporter Prometheus port | `5556` |
| `metrics.jmx.service.nodePort` | Kubernetes HTTP node port | `""` |
| `metrics.jmx.service.annotations` | Annotations for Prometheus metrics service | `Check values.yaml file` |
| `metrics.jmx.service.loadBalancerIP` | loadBalancerIP if service type is `LoadBalancer` | `nil` |
| `metrics.jmx.service.clusterIP` | Static clusterIP or None for headless services | `nil` |
| `metrics.jmx.whitelistObjectNames` | Allows setting which JMX objects you want to expose to via JMX stats to JMX Exporter | (see `values.yaml`) |
| `metrics.jmx.config` | Configuration file for JMX exporter | (see `values.yaml`) |
| `metrics.jmx.existingConfigmap` | Name of existing ConfigMap with JMX exporter configuration | `nil` |
| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (requires `metrics.kafka.enabled` or `metrics.jmx.enabled` to be `true`) | `false` |
| `metrics.serviceMonitor.namespace` | Namespace which Prometheus is running in | `monitoring` |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped | `nil` |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) |
| `metrics.serviceMonitor.selector` | ServiceMonitor selector labels | `nil` (Prometheus Operator default value) |
### Zookeeper chart parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `zookeeper.enabled` | Switch to enable or disable the Zookeeper helm chart | `true` |
| `zookeeper.persistence.enabled` | Enable Zookeeper persistence using PVC | `true` |
| `externalZookeeper.servers` | Server or list of external Zookeeper servers to use | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
helm install my-release \
--set replicaCount=3 \
bitnami/kafka
```
The above command deploys Kafka with 3 brokers (replicas).
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
helm install my-release -f values.yaml bitnami/kafka
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Production configuration and horizontal scaling
This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
- Number of Kafka nodes:
```diff
- replicaCount: 1
+ replicaCount: 3
```
- Allow to use the PLAINTEXT listener:
```diff
- allowPlaintextListener: true
+ allowPlaintextListener: false
```
- Default replication factors for automatically created topics:
```diff
- defaultReplicationFactor: 1
+ defaultReplicationFactor: 3
```
- Allow auto creation of topics.
```diff
- autoCreateTopicsEnable: true
+ autoCreateTopicsEnable: false
```
- The replication factor for the offsets topic:
```diff
- offsetsTopicReplicationFactor: 1
+ offsetsTopicReplicationFactor: 3
```
- The replication factor for the transaction topic:
```diff
- transactionStateLogReplicationFactor: 1
+ transactionStateLogReplicationFactor: 3
```
- Overridden min.insync.replicas config for the transaction topic:
```diff
- transactionStateLogMinIsr: 1
+ transactionStateLogMinIsr: 3
```
- Switch to enable the Kafka SASAL authentication on client and inter-broker communications:
```diff
- auth.clientProtocol: plaintext
+ auth.clientProtocol: sasl
- auth.interBrokerProtocol: plaintext
+ auth.interBrokerProtocol: sasl
```
- Enable Zookeeper authentication:
```diff
+ auth.jaas.zookeeperUser: zookeeperUser
+ auth.jaas.zookeeperPassword: zookeeperPassword
- zookeeper.auth.enabled: false
+ zookeeper.auth.enabled: true
+ zookeeper.auth.clientUser: zookeeperUser
+ zookeeper.auth.clientPassword: zookeeperPassword
+ zookeeper.auth.serverUsers: zookeeperUser
+ zookeeper.auth.serverPasswords: zookeeperPassword
```
- Enable Pod Disruption Budget:
```diff
- pdb.create: false
+ pdb.create: true
```
- Create a separate Kafka metrics exporter:
```diff
- metrics.kafka.enabled: false
+ metrics.kafka.enabled: true
```
- Expose JMX metrics to Prometheus:
```diff
- metrics.jmx.enabled: false
+ metrics.jmx.enabled: true
```
- Enable Zookeeper metrics:
```diff
+ zookeeper.metrics.enabled: true
```
To horizontally scale this chart once it has been deployed, you can upgrade the statefulset using a new value for the `replicaCount` parameter. Please note that, when enabling TLS encryption, you must update your JKS secret including the keystore for the new replicas.
### Setting custom parameters
Any environment variable beginning with `KAFKA_CFG_` will be mapped to its corresponding Kafka key. For example, use `KAFKA_CFG_BACKGROUND_THREADS` in order to set `background.threads`. In order to pass custom environment variables use the `extraEnvVars` property.
### Listeners configuration
This chart allows you to automatically configure Kafka with 3 listeners:
- One for inter-broker communications.
- A second one for communications with clients within the K8s cluster.
- (optional) a third listener for communications with clients outside the K8s cluster. Check [this section](#accessing-kafka-brokers-from-outside-the-clusters) for more information.
For more complex configurations, set the `listeners`, `advertisedListeners` and `listenerSecurityProtocolMap` parameters as needed.
### Enable security for Kafka and Zookeeper
You can configure different authentication protocols for each listener you configure in Kafka. For instance, you can use `sasl_tls` authentication for client communications, while using `tls` for inter-broker communications. This table shows the available protocols and the security they provide:
| Method | Authentication | Encryption via TLS |
|-----------|-------------------------------|--------------------|
| plaintext | None | No |
| tls | None | Yes |
| mtls | Yes (two-way authentication) | Yes |
| sasl | Yes (via SASL) | No |
| sasl_tls | Yes (via SASL) | Yes |
If you enabled SASL authentication on any listener, you can set the SASL credentials using the parameters below:
- `auth.jaas.clientUsers`/`auth.jaas.clientPasswords`: when enabling SASL authentication for communications with clients.
- `auth.jaas.interBrokerUser`/`auth.jaas.interBrokerPassword`: when enabling SASL authentication for inter-broker communications.
- `auth.jaas.zookeeperUser`/`auth.jaas.zookeeperPassword`: In the case that the Zookeeper chart is deployed with SASL authentication enabled.
In order to configure TLS authentication/encryption, you **must** create a secret containing the Java Key Stores (JKS) files: the truststore (`kafka.truststore.jks`) and one keystore (`kafka.keystore.jks`) per Kafka broker you have in the cluster. Then, you need pass the secret name with the `--auth.jksSecret` parameter when deploying the chart.
> **Note**: If the JKS files are password protected (recommended), you will need to provide the password to get access to the keystores. To do so, use the `auth.jksPassword` parameter to provide your password.
For instance, to configure TLS authentication on a Kafka cluster with 2 Kafka brokers use the command below to create the secret:
```console
kubectl create secret generic kafka-jks --from-file=./kafka.truststore.jks --from-file=./kafka-0.keystore.jks --from-file=./kafka-1.keystore.jks
```
> **Note**: the command above assumes you already created the trustore and keystores files. This [script](https://raw.githubusercontent.com/confluentinc/confluent-platform-security-tools/master/kafka-generate-ssl.sh) can help you with the JKS files generation.
As an alternative to manually create the secret before installing the chart, you can put your JKS files inside the chart folder `files/jks`, an a secret including them will be generated. Please note this alternative requires to have the chart downloaded locally, so you will have to clone this repository or fetch the chart before installing it.
You can deploy the chart with authentication using the following parameters:
```console
replicaCount=2
auth.clientProtocol=sasl
auth.interBrokerProtocol=tls
auth.certificatesSecret=kafka-jks
auth.certificatesPassword=jksPassword
auth.jaas.clientUsers[0]=brokerUser
auth.jaas.clientPassword[0]=brokerPassword
auth.jaas.zookeeperUser=zookeeperUser
auth.jaas.zookeeperPassword=zookeeperPassword
zookeeper.auth.enabled=true
zookeeper.auth.serverUsers=zookeeperUser
zookeeper.auth.serverPasswords=zookeeperPassword
zookeeper.auth.clientUser=zookeeperUser
zookeeper.auth.clientPassword=zookeeperPassword
```
If you also enable exposing metrics using the Kafka expoter, and you are using `sasl_tls`, `tls`, or `mtls` authentication protocols, you need to mount the CA certificated used to sign the brokers certificates in the exporter so it can validate the Kafka brokers. To do so, create a secret containing the CA, and set the `metrics.certificatesSecret` parameter. As an alternative, you can skip TLS validation using extra flags:
```console
metrics.kafka.extraFlags={tls.insecure-skip-tls-verify: ""}
```
### Accessing Kafka brokers from outside the cluster
In order to access Kafka Brokers from outside the cluster, an additional listener and advertised listener must be configured. Additionally, a specific service per kafka pod will be created.
There are two ways of configuring external access. Using LoadBalancer services or using NodePort services.
#### Using LoadBalancer services
You have two alternatives to use LoadBalancer services:
- Option A) Use random load balancer IPs using an **initContainer** that waits for the IPs to be ready and discover them automatically.
```console
externalAccess.enabled=true
externalAccess.service.type=LoadBalancer
externalAccess.service.port=9094
externalAccess.autoDiscovery.enabled=true
serviceAccount.create=true
rbac.create=true
```
Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled.
- Option B) Manually specify the load balancer IPs:
```console
externalAccess.enabled=true
externalAccess.service.type=LoadBalancer
externalAccess.service.port=9094
externalAccess.service.loadBalancerIPs[0]='external-ip-1'
externalAccess.service.loadBalancerIPs[1]='external-ip-2'}
```
Note: You need to know in advance the load balancer IPs so each Kafka broker advertised listener is configured with it.
#### Using NodePort services
You have two alternatives to use NodePort services:
- Option A) Use random node ports using an **initContainer** that discover them automatically.
```console
externalAccess.enabled=true
externalAccess.service.type=NodePort
externalAccess.autoDiscovery.enabled=true
serviceAccount.create=true
rbac.create=true
```
Note: This option requires creating RBAC rules on clusters where RBAC policies are enabled.
- Option B) Manually specify the node ports:
```console
externalAccess.enabled=true
externalAccess.service.type=NodePort
externalAccess.serivce.nodePorts[0]='node-port-1'
externalAccess.serivce.nodePorts[1]='node-port-2'
```
Note: You need to know in advance the node ports that will be exposed so each Kafka broker advertised listener is configured with it.
The pod will try to get the external ip of the node using `curl -s https://ipinfo.io/ip` unless `externalAccess.service.domain` is provided.
Following the aforementioned steps will also allow to connect the brokers from the outside using the cluster's default service (when `service.type` is `LoadBalancer` or `NodePort`). Use the property `service.externalPort` to specify the port used for external connections.
### Sidecars
If you have a need for additional containers to run within the same pod as Kafka (e.g. an additional metrics or logging exporter), you can do so via the `sidecars` config parameter. Simply define your container according to the Kubernetes container spec.
```yaml
sidecars:
- name: your-image-name
image: your-image
imagePullPolicy: Always
ports:
- name: portname
containerPort: 1234
```
### Deploying extra resources
There are cases where you may want to deploy extra objects, such as Kafka Connect. For covering this case, the chart allows adding the full specification of other objects using the `extraDeploy` parameter. The following example would create a deployment including a Kafka Connect deployment so you can connect Kafka with MongoDB:
```yaml
## Extra objects to deploy (value evaluated as a template)
##
extraDeploy: |-
- apiVersion: apps/v1
kind: Deployment
metadata:
name: {{ include "kafka.fullname" . }}-connect
labels: {{- include "kafka.labels" . | nindent 6 }}
app.kubernetes.io/component: connector
spec:
replicas: 1
selector:
matchLabels: {{- include "kafka.matchLabels" . | nindent 8 }}
app.kubernetes.io/component: connector
template:
metadata:
labels: {{- include "kafka.labels" . | nindent 10 }}
app.kubernetes.io/component: connector
spec:
containers:
- name: connect
image: KAFKA-CONNECT-IMAGE
imagePullPolicy: IfNotPresent
ports:
- name: connector
containerPort: 8083
volumeMounts:
- name: configuration
mountPath: /opt/bitnami/kafka/config
volumes:
- name: configuration
configMap:
name: {{ include "kafka.fullname" . }}-connect
- apiVersion: v1
kind: ConfigMap
metadata:
name: {{ include "kafka.fullname" . }}-connect
labels: {{- include "kafka.labels" . | nindent 6 }}
app.kubernetes.io/component: connector
data:
connect-standalone.properties: |-
bootstrap.servers = {{ include "kafka.fullname" . }}-0.{{ include "kafka.fullname" . }}-headless.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}:{{ .Values.service.port }}
...
mongodb.properties: |-
connection.uri=mongodb://root:password@mongodb-hostname:27017
...
- apiVersion: v1
kind: Service
metadata:
name: {{ include "kafka.fullname" . }}-connect
labels: {{- include "kafka.labels" . | nindent 6 }}
app.kubernetes.io/component: connector
spec:
ports:
- protocol: TCP
port: 8083
targetPort: connector
selector: {{- include "kafka.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: connector
```
You can create the Kafka Connect image using the Dockerfile below:
```Dockerfile
FROM bitnami/kafka:latest
# Download MongoDB Connector for Apache Kafka https://www.confluent.io/hub/mongodb/kafka-connect-mongodb
RUN mkdir -p /opt/bitnami/kafka/plugins && \
cd /opt/bitnami/kafka/plugins && \
curl --remote-name --location --silent https://search.maven.org/remotecontent?filepath=org/mongodb/kafka/mongo-kafka-connect/1.2.0/mongo-kafka-connect-1.2.0-all.jar
CMD /opt/bitnami/kafka/bin/connect-standalone.sh /opt/bitnami/kafka/config/connect-standalone.properties /opt/bitnami/kafka/config/mongo.properties
```
## Persistence
The [Bitnami Kafka](https://github.com/bitnami/bitnami-docker-kafka) image stores the Kafka data at the `/bitnami/kafka` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube. See the [Parameters](#persistence-parameters) section to configure the PVC or to disable persistence.
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
## Upgrading
### To 11.8.0
External access to brokers can now be archived through the cluster's Kafka service.
- `service.nodePort` -> deprecated in favor of `service.nodePorts.client` and `service.nodePorts.external`
### To 11.7.0
The way to configure the users and passwords changed. Now it is allowed to create multiple users during the installation by providing the list of users and passwords.
- `auth.jaas.clientUser` (string) -> deprecated in favor of `auth.jaas.clientUsers` (array).
- `auth.jaas.clientPassword` (string) -> deprecated in favor of `auth.jaas.clientPasswords` (array).
### To 11.0.0
The way to configure listeners and authentication on Kafka is totally refactored allowing users to configure different authentication protocols on different listeners. Please check the sections [Listeners Configuration](listeners-configuration) and [Listeners Configuration](enable-kafka-for-kafka-and-zookeeper) for more information.
Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed or disappeared in favor of new ones on this major version:
- `auth.enabled` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters.
- `auth.ssl` -> deprecated in favor of `auth.clientProtocol` and `auth.interBrokerProtocol` parameters.
- `auth.certificatesSecret` -> renamed to `auth.jksSecret`.
- `auth.certificatesPassword` -> renamed to `auth.jksPassword`.
- `sslEndpointIdentificationAlgorithm` -> renamedo to `auth.tlsEndpointIdentificationAlgorithm`.
- `auth.interBrokerUser` -> renamed to `auth.jaas.interBrokerUser`
- `auth.interBrokerPassword` -> renamed to `auth.jaas.interBrokerPassword`
- `auth.zookeeperUser` -> renamed to `auth.jaas.zookeeperUser`
- `auth.zookeeperPassword` -> renamed to `auth.jaas.zookeeperPassword`
- `auth.existingSecret` -> renamed to `auth.jaas.existingSecret`
- `service.sslPort` -> deprecated in favor of `service.internalPort`
- `service.nodePorts.kafka` and `service.nodePorts.ssl` -> deprecated in favor of `service.nodePort`
- `metrics.kafka.extraFlag` -> new parameter
- `metrics.kafka.certificatesSecret` -> new parameter
### To 10.0.0
If you are setting the `config` or `log4j` parameter, backwards compatibility is not guaranteed, because the `KAFKA_MOUNTED_CONFDIR` has moved from `/opt/bitnami/kafka/conf` to `/bitnami/kafka/config`. In order to continue using these parameters, you must also upgrade your image to `docker.io/bitnami/kafka:2.4.1-debian-10-r38` or later.
### To 9.0.0
Backwards compatibility is not guaranteed you adapt your values.yaml to the new format. Here you can find some parameters that were renamed on this major version:
```diff
- securityContext.enabled
- securityContext.fsGroup
- securityContext.fsGroup
+ podSecurityContext
- externalAccess.service.loadBalancerIP
+ externalAccess.service.loadBalancerIPs
- externalAccess.service.nodePort
+ externalAccess.service.nodePorts
- metrics.jmx.configMap.enabled
- metrics.jmx.configMap.overrideConfig
+ metrics.jmx.config
- metrics.jmx.configMap.overrideName
+ metrics.jmx.existingConfigmap
```
Ports names were prefixed with the protocol to comply with Istio (see https://istio.io/docs/ops/deployment/requirements/).
### To 8.0.0
There is not backwards compatibility since the brokerID changes to the POD_NAME. For more information see [this PR](https://github.com/bitnami/charts/pull/2028).
### To 7.0.0
Backwards compatibility is not guaranteed when Kafka metrics are enabled, unless you modify the labels used on the exporter deployments.
Use the workaround below to upgrade from versions previous to 7.0.0. The following example assumes that the release name is kafka:
```console
helm upgrade kafka bitnami/kafka --version 6.1.8 --set metrics.kafka.enabled=false
helm upgrade kafka bitnami/kafka --version 7.0.0 --set metrics.kafka.enabled=true
```
### To 2.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is kafka:
```console
kubectl delete statefulset kafka-kafka --cascade=false
kubectl delete statefulset kafka-zookeeper --cascade=false
```
### To 1.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is kafka:
```console
kubectl delete statefulset kafka-kafka --cascade=false
kubectl delete statefulset kafka-zookeeper --cascade=false
```

View file

@ -0,0 +1,21 @@
# Patterns to ignore when building packages.
# This supports shell glob matching, relative path matching, and
# negation (prefixed with !). Only one pattern per line.
.DS_Store
# Common VCS dirs
.git/
.gitignore
.bzr/
.bzrignore
.hg/
.hgignore
.svn/
# Common backup files
*.swp
*.bak
*.tmp
*~
# Various IDEs
.project
.idea/
*.tmproj

View file

@ -0,0 +1,20 @@
annotations:
category: Infrastructure
apiVersion: v1
appVersion: 3.6.2
description: A centralized service for maintaining configuration information, naming,
providing distributed synchronization, and providing group services for distributed
applications.
engine: gotpl
home: https://github.com/bitnami/charts/tree/master/bitnami/zookeeper
icon: https://bitnami.com/assets/stacks/zookeeper/img/zookeeper-stack-110x117.png
keywords:
- zookeeper
maintainers:
- email: containers@bitnami.com
name: Bitnami
name: zookeeper
sources:
- https://github.com/bitnami/bitnami-docker-zookeeper
- https://zookeeper.apache.org/
version: 5.21.9

View file

@ -0,0 +1,297 @@
# ZooKeeper
[ZooKeeper](https://zookeeper.apache.org/) is a centralized service for maintaining configuration information, naming, providing distributed synchronization, and providing group services. All of these kinds of services are used in some form or other by distributed applications.
## TL;DR
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/zookeeper
```
## Introduction
This chart bootstraps a [ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) deployment on a [Kubernetes](http://kubernetes.io) cluster using the [Helm](https://helm.sh) package manager.
Bitnami charts can be used with [Kubeapps](https://kubeapps.com/) for deployment and management of Helm Charts in clusters. This Helm chart has been tested on top of [Bitnami Kubernetes Production Runtime](https://kubeprod.io/) (BKPR). Deploy BKPR to get automated TLS certificates, logging and monitoring for your applications.
## Prerequisites
- Kubernetes 1.12+
- Helm 2.12+ or Helm 3.0-beta3+
- PV provisioner support in the underlying infrastructure
## Installing the Chart
To install the chart with the release name `my-release`:
```console
$ helm repo add bitnami https://charts.bitnami.com/bitnami
$ helm install my-release bitnami/zookeeper
```
These commands deploy ZooKeeper on the Kubernetes cluster in the default configuration. The [Parameters](#parameters) section lists the parameters that can be configured during installation.
> **Tip**: List all releases using `helm list`
## Uninstalling the Chart
To uninstall/delete the `my-release` deployment:
```console
$ helm delete my-release
```
The command removes all the Kubernetes components associated with the chart and deletes the release.
## Parameters
The following tables lists the configurable parameters of the ZooKeeper chart and their default values per section/component:
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `global.imageRegistry` | Global Docker image registry | `nil` |
| `global.imagePullSecrets` | Global Docker registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `global.storageClass` | Global storage class for dynamic provisioning | `nil` |
### Common parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `nameOverride` | String to partially override zookeeper.fullname | `nil` |
| `fullnameOverride` | String to fully override zookeeper.fullname | `nil` |
| `clusterDomain` | Default Kubernetes cluster domain | `cluster.local` |
| `commonLabels` | Labels to add to all deployed objects | `{}` |
| `commonAnnotations` | Annotations to add to all deployed objects | `{}` |
| `schedulerName` | Kubernetes pod scheduler registry | `nil` (use the default-scheduler) |
### Zookeeper chart parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `image.registry` | ZooKeeper image registry | `docker.io` |
| `image.repository` | ZooKeeper Image name | `bitnami/zookeeper` |
| `image.tag` | ZooKeeper Image tag | `{TAG_NAME}` |
| `image.pullPolicy` | ZooKeeper image pull policy | `IfNotPresent` |
| `image.pullSecrets` | Specify docker-registry secret names as an array | `[]` (does not add image pull secrets to deployed pods) |
| `image.debug` | Specify if debug values should be set | `false` |
| `tickTime` | Basic time unit in milliseconds used by ZooKeeper for heartbeats | `2000` |
| `initLimit` | Time the ZooKeeper servers in quorum have to connect to a leader | `10` |
| `syncLimit` | How far out of date a server can be from a leader | `5` |
| `maxClientCnxns` | Number of concurrent connections that a single client may make to a single member | `60` |
| `maxSessionTimeout` | Maximum session timeout in milliseconds that the server will allow the client to negotiate. | `40000` |
| `autopurge.snapRetainCount` | Number of retains snapshots for autopurge | `3` |
| `autopurge.purgeInterval` | The time interval in hours for which the purge task has to be triggered | `0` |
| `fourlwCommandsWhitelist` | A list of comma separated Four Letter Words commands to use | `srvr, mntr` |
| `listenOnAllIPs` | Allow Zookeeper to listen for connections from its peers on all available IP addresses. | `false` |
| `allowAnonymousLogin` | Allow to accept connections from unauthenticated users | `yes` |
| `auth.existingSecret` | Use existing secret (ignores previous password) | `nil` |
| `auth.enabled` | Enable ZooKeeper auth | `false` |
| `auth.clientUser` | User that will use ZooKeeper clients to auth | `nil` |
| `auth.clientPassword` | Password that will use ZooKeeper clients to auth | `nil` |
| `auth.serverUsers` | List of user to be created | `nil` |
| `auth.serverPasswords` | List of passwords to assign to users when created | `nil` |
| `heapSize` | Size in MB for the Java Heap options (Xmx and XMs) | `[]` |
| `logLevel` | Log level of ZooKeeper server | `ERROR` |
| `jvmFlags` | Default JVMFLAGS for the ZooKeeper process | `nil` |
| `config` | Configure ZooKeeper with a custom zoo.conf file | `nil` |
| `dataLogDir` | Data log directory | `""` |
### Statefulset parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `replicaCount` | Number of ZooKeeper nodes | `1` |
| `updateStrategy` | Update strategy for the statefulset | `RollingUpdate` |
| `rollingUpdatePartition` | Partition update strategy | `nil` |
| `podManagementPolicy` | Pod management policy | `Parallel` |
| `podLabels` | ZooKeeper pod labels | `{}` (evaluated as a template) |
| `podAnnotations` | ZooKeeper Pod annotations | `{}` (evaluated as a template) |
| `affinity` | Affinity for pod assignment | `{}` (evaluated as a template) |
| `nodeSelector` | Node labels for pod assignment | `{}` (evaluated as a template) |
| `tolerations` | Tolerations for pod assignment | `[]` (evaluated as a template) |
| `priorityClassName` | Name of the existing priority class to be used by ZooKeeper pods | `""` |
| `securityContext.enabled` | Enable security context (ZooKeeper master pod) | `true` |
| `securityContext.fsGroup` | Group ID for the container (ZooKeeper master pod) | `1001` |
| `securityContext.runAsUser` | User ID for the container (ZooKeeper master pod) | `1001` |
| `resources` | CPU/Memory resource requests/limits | Memory: `256Mi`, CPU: `250m` |
| `livenessProbe` | Liveness probe configuration for ZooKeeper | Check `values.yaml` file |
| `readinessProbe` | Readiness probe configuration for ZooKeeper | Check `values.yaml` file |
| `extraVolumes` | Extra volumes | `nil` |
| `extraVolumeMounts` | Mount extra volume(s) | `nil` |
| `podDisruptionBudget.maxUnavailable` | Max number of pods down simultaneously | `1` |
### Exposure parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `service.type` | Kubernetes Service type | `ClusterIP` |
| `service.port` | ZooKeeper port | `2181` |
| `service.followerPort` | ZooKeeper follower port | `2888` |
| `service.electionPort` | ZooKeeper election port | `3888` |
| `service.publishNotReadyAddresses` | If the ZooKeeper headless service should publish DNS records for not ready pods | `true` |
| `serviceAccount.create` | Enable creation of ServiceAccount for zookeeper pod | `false` |
| `serviceAccount.name` | The name of the service account to use. If not set and `create` is `true`, a name is generated | Generated using the `zookeeper.fullname` template |
| `service.tls.client_enable` | Enable tls for client connections | `false` |
| `service.tls.quorum_enable` | Enable tls for quorum protocol | `false` |
| `service.tls.disable_base_client_port` | Remove client port from service definitions. | `false` |
| `service.tls.client_port` | Service port for tls client connections | `3181` |
| `service.tls.client_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` |
| `service.tls.client_keystore_password` | KeyStore password. You can use environment variables. | `nil` |
| `service.tls.client_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` |
| `service.tls.client_truststore_password` | TrustStore password. You can use environment variables. | `nil` |
| `service.tls.quorum_keystore_path` | KeyStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_key_store/key_store_file` |
| `service.tls.quorum_keystore_password` | KeyStore password. You can use environment variables. | `nil` |
| `service.tls.quorum_truststore_path` | TrustStore file path. Refer to extraVolumes and extraVolumeMounts for mounting files into the pods | `/tls_trust_store/trust_store_file` |
| `service.tls.quorum_truststore_password` | TrustStore password. You can use environment variables. | `nil` |
| `service.annotations` | Annotations for the Service | `{}` |
| `service.headless.annotations` | Annotations for the Headless Service | `{}` |
| `networkPolicy.enabled` | Enable NetworkPolicy | `false` |
| `networkPolicy.allowExternal` | Don't require client label for connections | `true` |
### Persistence parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `persistence.enabled` | Enable Zookeeper data persistence using PVC | `true` |
| `persistence.existingClaim` | Provide an existing `PersistentVolumeClaim` | `nil` (evaluated as a template) |
| `persistence.storageClass` | PVC Storage Class for ZooKeeper data volume | `nil` |
| `persistence.accessMode` | PVC Access Mode for ZooKeeper data volume | `ReadWriteOnce` |
| `persistence.size` | PVC Storage Request for ZooKeeper data volume | `8Gi` |
| `persistence.annotations` | Annotations for the PVC | `{}` (evaluated as a template) |
| `persistence.dataLogDir.size` | PVC Storage Request for ZooKeeper's Data log directory | `8Gi` |
| `persistence.dataLogDir.existingClaim` | Provide an existing `PersistentVolumeClaim` for Zookeeper's Data log directory | `nil` (evaluated as a template) |
### Volume Permissions parameters
| Parameter | Description | Default |
|---------------------------------------------------|-----------------------------------------------------------------------------------------------------------------------------------|---------------------------------------------------------|
| `volumePermissions.enabled` | Enable init container that changes the owner and group of the persistent volume(s) mountpoint to `runAsUser:fsGroup` | `false` |
| `volumePermissions.image.registry` | Init container volume-permissions image registry | `docker.io` |
| `volumePermissions.image.repository` | Init container volume-permissions image name | `bitnami/minideb` |
| `volumePermissions.image.tag` | Init container volume-permissions image tag | `buster` |
| `volumePermissions.image.pullPolicy` | Init container volume-permissions image pull policy | `Always` |
| `volumePermissions.resources` | Init container resource requests/limit | `nil` |
### Metrics parameters
| Parameter | Description | Default |
|-------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------------------|--------------------------------------------------------------|
| `metrics.enabled` | Enable prometheus to access zookeeper metrics endpoint | `false` |
| `metrics.containerPort` | Port where a Jetty server will expose Prometheus metrics | `9141` |
| `metrics.service.type` | Kubernetes service type (`ClusterIP`, `NodePort` or `LoadBalancer`) for Jetty server exposing Prometheus metrics | `ClusterIP` |
| `metrics.service.port` | Prometheus metrics service port | `9141` |
| `metrics.service.annotations` | Service annotations for Prometheus to auto-discover the metrics endpoint | `{prometheus.io/scrape: "true", prometheus.io/port: "9141"}` |
| `metrics.serviceMonitor.enabled` | if `true`, creates a Prometheus Operator ServiceMonitor (also requires `metrics.enabled` to be `true`) | `false` |
| `metrics.serviceMonitor.namespace` | Namespace for the ServiceMonitor Resource | The Release Namespace |
| `metrics.serviceMonitor.interval` | Interval at which metrics should be scraped. | `nil` (Prometheus Operator default value) |
| `metrics.serviceMonitor.scrapeTimeout` | Timeout after which the scrape is ended | `nil` (Prometheus Operator default value) |
| `metrics.serviceMonitor.selector` | Prometheus instance selector labels | `nil` |
| `metrics.prometheusRule.enabled` | if `true`, creates a Prometheus Operator PrometheusRule (also requires `metrics.enabled` to be `true` and `metrics.prometheusRule.rules`) | `false` |
| `metrics.prometheusRule.namespace` | Namespace for the PrometheusRule Resource | The Release Namespace |
| `metrics.prometheusRule.selector` | Prometheus instance selector labels | `nil` |
| `metrics.prometheusRule.rules` | Prometheus Rule definitions (see values.yaml for examples) | `[]` |
Specify each parameter using the `--set key=value[,key=value]` argument to `helm install`. For example,
```console
$ helm install my-release \
--set auth.clientUser=newUser \
bitnami/zookeeper
```
The above command sets the ZooKeeper user to `newUser`.
Alternatively, a YAML file that specifies the values for the parameters can be provided while installing the chart. For example,
```console
$ helm install my-release -f values.yaml bitnami/zookeeper
```
> **Tip**: You can use the default [values.yaml](values.yaml)
## Configuration and installation details
### [Rolling VS Immutable tags](https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/)
It is strongly recommended to use immutable tags in a production environment. This ensures your deployment does not change automatically if the same tag is updated with a different image.
Bitnami will release a new chart updating its containers if a new version of the main container, significant changes, or critical vulnerabilities exist.
### Production configuration
This chart includes a `values-production.yaml` file where you can find some parameters oriented to production configuration in comparison to the regular `values.yaml`. You can use this file instead of the default one.
- Number of ZooKeeper nodes:
```diff
- replicaCount: 1
+ replicaCount: 3
```
- Enable prometheus metrics:
```diff
- metrics.enabled: false
+ metrics.enabled: true
```
### Log level
You can configure the ZooKeeper log level using the `ZOO_LOG_LEVEL` environment variable. By default, it is set to `ERROR` because of each readiness probe produce an `INFO` message on connection and a `WARN` message on disconnection.
## Persistence
The [Bitnami ZooKeeper](https://github.com/bitnami/bitnami-docker-zookeeper) image stores the ZooKeeper data and configurations at the `/bitnami/zookeeper` path of the container.
Persistent Volume Claims are used to keep the data across deployments. This is known to work in GCE, AWS, and minikube.
See the [Parameters](#parameters) section to configure the PVC or to disable persistence.
### Adjust permissions of persistent volume mountpoint
As the image run as non-root by default, it is necessary to adjust the ownership of the persistent volume so that the container can write data into it.
By default, the chart is configured to use Kubernetes Security Context to automatically change the ownership of the volume. However, this feature does not work in all Kubernetes distributions.
As an alternative, this chart supports using an initContainer to change the ownership of the volume before mounting it in the final destination.
You can enable this initContainer by setting `volumePermissions.enabled` to `true`.
### Data Log Directory
You can use a dedicated device for logs (instead of using the data directory) to help avoiding competition between logging and snapshots. To do so, set the `dataLogDir` parameter with the path to be used for writing transaction logs. Alternatively, set this parameter with an empty string an it result in the log being written to the data directory (Zookeeper's default behavior).
When using a dedicated device for logs, you can use a PVC to persist the logs. To do so, set `persistence.enabled` to `true`. See the [Persistence Parameters](#persistence-parameters) section for more information.
## Upgrading
### To 5.21.0
A couple of parameters related to Zookeeper metrics were renamed or disappeared in favor of new ones:
- `metrics.port` is renamed to `metrics.containerPort`.
- `metrics.annotations` is deprecated in favor of `metrics.service.annotations`.
### To 3.0.0
This new version of the chart includes the new ZooKeeper major version 3.5.5. Note that to perform an automatic upgrade
of the application, each node will need to have at least one snapshot file created in the data directory. If not, the
new version of the application won't be able to start the service. Please refer to [ZOOKEEPER-3056](https://issues.apache.org/jira/browse/ZOOKEEPER-3056)
in order to find ways to workaround this issue in case you are facing it.
### To 2.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's statefulsets.
Use the workaround below to upgrade from versions previous to 2.0.0. The following example assumes that the release name is `zookeeper`:
```console
$ kubectl delete statefulset zookeeper-zookeeper --cascade=false
```
### To 1.0.0
Backwards compatibility is not guaranteed unless you modify the labels used on the chart's deployments.
Use the workaround below to upgrade from versions previous to 1.0.0. The following example assumes that the release name is zookeeper:
```console
$ kubectl delete statefulset zookeeper-zookeeper --cascade=false
```

View file

@ -0,0 +1,57 @@
{{- if contains .Values.service.type "LoadBalancer" }}
{{- if not .Values.auth.clientPassword }}
-------------------------------------------------------------------------------
WARNING
By specifying "serviceType=LoadBalancer" and not specifying "auth.enabled=true"
you have most likely exposed the ZooKeeper service externally without any
authentication mechanism.
For security reasons, we strongly suggest that you switch to "ClusterIP" or
"NodePort". As alternative, you can also specify a valid password on the
"auth.clientPassword" parameter.
-------------------------------------------------------------------------------
{{- end }}
{{- end }}
** Please be patient while the chart is being deployed **
ZooKeeper can be accessed via port 2181 on the following DNS name from within your cluster:
{{ template "zookeeper.fullname" . }}.{{ .Release.Namespace }}.svc.{{ .Values.clusterDomain }}
To connect to your ZooKeeper server run the following commands:
export POD_NAME=$(kubectl get pods --namespace {{ .Release.Namespace }} -l "app.kubernetes.io/name={{ template "zookeeper.name" . }},app.kubernetes.io/instance={{ .Release.Name }},app.kubernetes.io/component=zookeeper" -o jsonpath="{.items[0].metadata.name}")
kubectl exec -it $POD_NAME -- zkCli.sh
To connect to your ZooKeeper server from outside the cluster execute the following commands:
{{- if contains "NodePort" .Values.service.type }}
export NODE_IP=$(kubectl get nodes --namespace {{ .Release.Namespace }} -o jsonpath="{.items[0].status.addresses[0].address}")
export NODE_PORT=$(kubectl get --namespace {{ .Release.Namespace }} -o jsonpath="{.spec.ports[0].nodePort}" services {{ template "zookeeper.fullname" . }})
zkCli.sh $NODE_IP:$NODE_PORT
{{- else if contains "LoadBalancer" .Values.service.type }}
NOTE: It may take a few minutes for the LoadBalancer IP to be available.
Watch the status with: 'kubectl get svc --namespace {{ .Release.Namespace }} -w {{ template "zookeeper.fullname" . }}'
export SERVICE_IP=$(kubectl get svc --namespace {{ .Release.Namespace }} {{ template "zookeeper.fullname" . }} --template "{{"{{ range (index .status.loadBalancer.ingress 0) }}{{.}}{{ end }}"}}")
zkCli.sh $SERVICE_IP:2181
{{- else if contains "ClusterIP" .Values.service.type }}
kubectl port-forward --namespace {{ .Release.Namespace }} svc/{{ template "zookeeper.fullname" . }} 2181:2181 &
zkCli.sh 127.0.0.1:2181
{{- end }}
{{- if and (contains "bitnami/" .Values.image.repository) (not (.Values.image.tag | toString | regexFind "-r\\d+$|sha256:")) }}
WARNING: Rolling tag detected ({{ .Values.image.repository }}:{{ .Values.image.tag }}), please note that it is strongly recommended to avoid using rolling tags in a production environment.
+info https://docs.bitnami.com/containers/how-to/understand-rolling-tags-containers/
{{- end }}

View file

@ -0,0 +1,212 @@
{{/* vim: set filetype=mustache: */}}
{{/*
Expand the name of the chart.
*/}}
{{- define "zookeeper.name" -}}
{{- default .Chart.Name .Values.nameOverride | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create a default fully qualified app name.
We truncate at 63 chars because some Kubernetes name fields are limited to this (by the DNS naming spec).
If release name contains chart name it will be used as a full name.
*/}}
{{- define "zookeeper.fullname" -}}
{{- if .Values.fullnameOverride -}}
{{- .Values.fullnameOverride | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- $name := default .Chart.Name .Values.nameOverride -}}
{{- if contains $name .Release.Name -}}
{{- .Release.Name | trunc 63 | trimSuffix "-" -}}
{{- else -}}
{{- printf "%s-%s" .Release.Name $name | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{/*
Create chart name and version as used by the chart label.
*/}}
{{- define "zookeeper.chart" -}}
{{- printf "%s-%s" .Chart.Name .Chart.Version | replace "+" "_" | trunc 63 | trimSuffix "-" -}}
{{- end -}}
{{/*
Create the name of the service account to use
*/}}
{{- define "zookeeper.serviceAccountName" -}}
{{- if .Values.serviceAccount.create -}}
{{ default (include "zookeeper.fullname" .) .Values.serviceAccount.name }}
{{- else -}}
{{ default "default" .Values.serviceAccount.name }}
{{- end -}}
{{- end -}}
{{/*
Return the proper Zookeeper image name
*/}}
{{- define "zookeeper.image" -}}
{{- $registryName := .Values.image.registry -}}
{{- $repositoryName := .Values.image.repository -}}
{{- $tag := .Values.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Docker Image Registry Secret Names
*/}}
{{- define "zookeeper.imagePullSecrets" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
Also, we can not use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imagePullSecrets }}
imagePullSecrets:
{{- range .Values.global.imagePullSecrets }}
- name: {{ . }}
{{- end }}
{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}}
{{- else if or .Values.image.pullSecrets .Values.volumePermissions.image.pullSecrets }}
imagePullSecrets:
{{- range .Values.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- range .Values.volumePermissions.image.pullSecrets }}
- name: {{ . }}
{{- end }}
{{- end -}}
{{- end -}}
{{/*
Common labels
*/}}
{{- define "zookeeper.labels" -}}
app.kubernetes.io/name: {{ include "zookeeper.name" . }}
helm.sh/chart: {{ include "zookeeper.chart" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
app.kubernetes.io/managed-by: {{ .Release.Service }}
{{- end -}}
{{/*
Renders a value that contains template.
Usage:
{{ include "zookeeper.tplValue" ( dict "value" .Values.path.to.the.Value "context" $) }}
*/}}
{{- define "zookeeper.tplValue" -}}
{{- if typeIs "string" .value }}
{{- tpl .value .context }}
{{- else }}
{{- tpl (.value | toYaml) .context }}
{{- end }}
{{- end -}}
{{/*
Labels to use on deploy.spec.selector.matchLabels and svc.spec.selector
*/}}
{{- define "zookeeper.matchLabels" -}}
app.kubernetes.io/name: {{ include "zookeeper.name" . }}
app.kubernetes.io/instance: {{ .Release.Name }}
{{- end -}}
{{/*
Return ZooKeeper Client Password
*/}}
{{- define "zookeeper.clientPassword" -}}
{{- if .Values.auth.clientPassword -}}
{{- .Values.auth.clientPassword -}}
{{- else -}}
{{- randAlphaNum 10 -}}
{{- end -}}
{{- end -}}
{{/*
Return ZooKeeper Servers Passwords
*/}}
{{- define "zookeeper.serverPasswords" -}}
{{- if .Values.auth.serverPasswords -}}
{{- .Values.auth.serverPasswords -}}
{{- else -}}
{{- randAlphaNum 10 -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper image name (for the init container volume-permissions image)
*/}}
{{- define "zookeeper.volumePermissions.image" -}}
{{- $registryName := .Values.volumePermissions.image.registry -}}
{{- $repositoryName := .Values.volumePermissions.image.repository -}}
{{- $tag := .Values.volumePermissions.image.tag | toString -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 doesn't support it, so we need to implement this if-else logic.
Also, we can't use a single if because lazy evaluation is not an option
*/}}
{{- if .Values.global }}
{{- if .Values.global.imageRegistry }}
{{- printf "%s/%s:%s" .Values.global.imageRegistry $repositoryName $tag -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- else -}}
{{- printf "%s/%s:%s" $registryName $repositoryName $tag -}}
{{- end -}}
{{- end -}}
{{/*
Return the proper Storage Class
*/}}
{{- define "zookeeper.storageClass" -}}
{{/*
Helm 2.11 supports the assignment of a value to a variable defined in a different scope,
but Helm 2.9 and 2.10 does not support it, so we need to implement this if-else logic.
*/}}
{{- if .Values.global -}}
{{- if .Values.global.storageClass -}}
{{- if (eq "-" .Values.global.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.global.storageClass -}}
{{- end -}}
{{- else -}}
{{- if .Values.persistence.storageClass -}}
{{- if (eq "-" .Values.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- else -}}
{{- if .Values.persistence.storageClass -}}
{{- if (eq "-" .Values.persistence.storageClass) -}}
{{- printf "storageClassName: \"\"" -}}
{{- else }}
{{- printf "storageClassName: %s" .Values.persistence.storageClass -}}
{{- end -}}
{{- end -}}
{{- end -}}
{{- end -}}

View file

@ -0,0 +1,17 @@
{{- if .Values.config }}
apiVersion: v1
kind: ConfigMap
metadata:
name: {{ template "zookeeper.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
data:
zoo.cfg: |-
{{ .Values.config | indent 4 }}
{{- end -}}

View file

@ -0,0 +1,29 @@
{{- if .Values.metrics.enabled }}
apiVersion: v1
kind: Service
metadata:
name: {{ template "zookeeper.fullname" . }}-metrics
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
app.kubernetes.io/component: zookeeper
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if or .Values.metrics.service.annotations .Values.commonAnnotations }}
annotations:
{{- if .Values.metrics.service.annotations }}
{{ include "zookeeper.tplValue" ( dict "value" .Values.metrics.service.annotations "context" $) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}
spec:
type: {{ .Values.metrics.service.type }}
ports:
- name: tcp-metrics
port: {{ .Values.metrics.service.port }}
targetPort: metrics
selector: {{- include "zookeeper.matchLabels" . | nindent 4 }}
app.kubernetes.io/component: zookeeper
{{- end }}

View file

@ -0,0 +1,43 @@
{{- if .Values.networkPolicy.enabled }}
kind: NetworkPolicy
apiVersion: networking.k8s.io/v1
metadata:
name: {{ include "zookeeper.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
podSelector:
matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }}
ingress:
# Allow inbound connections to zookeeper
- ports:
- port: {{ .Values.service.port }}
from:
{{- if not .Values.networkPolicy.allowExternal }}
- podSelector:
matchLabels:
{{ include "zookeeper.fullname" . }}-client: "true"
- podSelector:
matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }}
{{- else }}
- podSelector:
matchLabels: {}
{{- end }}
# Internal ports
- ports: &intranodes_ports
- port: {{ .Values.service.followerPort }}
- port: {{ .Values.service.electionPort }}
from:
- podSelector:
matchLabels: {{- include "zookeeper.matchLabels" . | nindent 14 }}
egress:
- ports: *intranodes_ports
# Allow outbound connections from zookeeper nodes
{{- end }}

View file

@ -0,0 +1,21 @@
{{- $replicaCount := int .Values.replicaCount }}
{{- if gt $replicaCount 1 }}
apiVersion: policy/v1beta1
kind: PodDisruptionBudget
metadata:
name: {{ template "zookeeper.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
app.kubernetes.io/component: zookeeper
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: zookeeper
{{- toYaml .Values.podDisruptionBudget | nindent 2 }}
{{- end }}

View file

@ -0,0 +1,27 @@
{{- if and .Values.metrics.enabled .Values.metrics.prometheusRule.enabled .Values.metrics.prometheusRule.rules }}
apiVersion: monitoring.coreos.com/v1
kind: PrometheusRule
metadata:
name: {{ include "zookeeper.fullname" . }}
{{- if .Values.metrics.prometheusRule.namespace }}
namespace: {{ .Values.metrics.prometheusRule.namespace }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
app.kubernetes.io/component: zookeeper
{{- range $key, $value := .Values.metrics.prometheusRule.selector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
groups:
- name: {{ include "zookeeper.fullname" . }}
rules: {{- toYaml .Values.metrics.prometheusRule.rules | nindent 6 }}
{{- end }}

View file

@ -0,0 +1,18 @@
{{- if and .Values.auth.enabled (not .Values.auth.existingSecret) -}}
apiVersion: v1
kind: Secret
metadata:
name: {{ template "zookeeper.fullname" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
type: Opaque
data:
client-password: {{ include "zookeeper.clientPassword" . | b64enc | quote }}
server-password: {{ include "zookeeper.serverPasswords" . | b64enc | quote }}
{{- end }}

View file

@ -0,0 +1,15 @@
{{- if .Values.serviceAccount.create }}
apiVersion: v1
kind: ServiceAccount
metadata:
name: {{ template "zookeeper.serviceAccountName" . }}
namespace: {{ .Release.Namespace }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
role: zookeeper
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
{{- end }}

View file

@ -0,0 +1,38 @@
{{- if and .Values.metrics.enabled .Values.metrics.serviceMonitor.enabled }}
apiVersion: monitoring.coreos.com/v1
kind: ServiceMonitor
metadata:
name: {{ template "zookeeper.fullname" . }}
{{- if .Values.metrics.serviceMonitor.namespace }}
namespace: {{ .Values.metrics.serviceMonitor.namespace }}
{{- else }}
namespace: {{ .Release.Namespace }}
{{- end }}
labels: {{- include "zookeeper.labels" . | nindent 4 }}
app.kubernetes.io/component: zookeeper
{{- range $key, $value := .Values.metrics.serviceMonitor.selector }}
{{ $key }}: {{ $value | quote }}
{{- end }}
{{- if .Values.commonLabels }}
{{- include "zookeeper.tplValue" ( dict "value" .Values.commonLabels "context" $ ) | nindent 4 }}
{{- end }}
{{- if .Values.commonAnnotations }}
annotations: {{- include "zookeeper.tplValue" ( dict "value" .Values.commonAnnotations "context" $ ) | nindent 4 }}
{{- end }}
spec:
selector:
matchLabels: {{- include "zookeeper.matchLabels" . | nindent 6 }}
app.kubernetes.io/component: zookeeper
endpoints:
- port: tcp-metrics
path: "/metrics"
{{- if .Values.metrics.serviceMonitor.interval }}
interval: {{ .Values.metrics.serviceMonitor.interval }}
{{- end }}
{{- if .Values.metrics.serviceMonitor.scrapeTimeout }}
scrapeTimeout: {{ .Values.metrics.serviceMonitor.scrapeTimeout }}
{{- end }}
namespaceSelector:
matchNames:
- {{ .Release.Namespace }}
{{- end }}

Some files were not shown because too many files have changed in this diff Show more