Compare commits

..

2 commits

Author SHA1 Message Date
Shekar Siri
c21f7219f6 feat(ui): custom events manage (wip) 2024-08-29 10:24:07 +05:30
Shekar Siri
b24f1f688a feat(ui): widget options 2024-08-26 13:21:13 +05:30
3527 changed files with 133039 additions and 172875 deletions

View file

@ -10,9 +10,6 @@ inputs:
jwt_secret:
required: true
description: 'JWT Secret'
jwt_spot_secret:
required: true
description: 'JWT spot Secret'
minio_access_key:
required: true
description: 'MinIO Access Key'
@ -39,36 +36,21 @@ runs:
- name: "Updating OSS secrets"
run: |
cd scripts/helmcharts/
vars=(
"ASSIST_JWT_SECRET:.global.assistJWTSecret"
"ASSIST_KEY:.global.assistKey"
"DOMAIN_NAME:.global.domainName"
"JWT_REFRESH_SECRET:.chalice.env.JWT_REFRESH_SECRET"
"JWT_SECRET:.global.jwtSecret"
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
"LICENSE_KEY:.global.enterpriseEditionLicense"
"MINIO_ACCESS_KEY:.global.s3.accessKey"
"MINIO_SECRET_KEY:.global.s3.secretKey"
"PG_PASSWORD:.postgresql.postgresqlPassword"
"REGISTRY_URL:.global.openReplayContainerRegistry"
)
for var in "${vars[@]}"; do
IFS=":" read -r env_var yq_path <<<"$var"
yq e -i "${yq_path} = strenv(${env_var})" vars.yaml
done
yq e -i '.global.domainName = strenv(DOMAIN_NAME)' vars.yaml
yq e -i '.global.assistKey = strenv(JWT_SECRET)' vars.yaml
yq e -i '.global.assistJWTSecret = strenv(JWT_SECRET)' vars.yaml
yq e -i '.chalice.env.jwt_secret = strenv(JWT_SECRET)' vars.yaml
yq e -i '.global.enterpriseEditionLicense = strenv(LICENSE_KEY)' vars.yaml
yq e -i '.global.s3.accessKey = strenv(MINIO_ACCESS_KEY)' vars.yaml
yq e -i '.global.s3.secretKey = strenv(MINIO_SECRET_KEY)' vars.yaml
yq e -i '.postgresql.postgresqlPassword = strenv(PG_PASSWORD)' vars.yaml
yq e -i '.global.openReplayContainerRegistry = strenv(REGISTRY_URL)' vars.yaml
shell: bash
env:
ASSIST_JWT_SECRET: ${{ inputs.assist_jwt_secret }}
ASSIST_KEY: ${{ inputs.assist_key }}
DOMAIN_NAME: ${{ inputs.domain_name }}
JWT_REFRESH_SECRET: ${{ inputs.jwt_refresh_secret }}
JWT_SECRET: ${{ inputs.jwt_secret }}
JWT_SPOT_REFRESH_SECRET: ${{inputs.jwt_spot_refresh_secret}}
JWT_SPOT_SECRET: ${{ inputs.jwt_spot_secret }}
LICENSE_KEY: ${{ inputs.license_key }}
JWT_SECRET: ${{ inputs.jwt_secret }}
MINIO_ACCESS_KEY: ${{ inputs.minio_access_key }}
MINIO_SECRET_KEY: ${{ inputs.minio_secret_key }}
PG_PASSWORD: ${{ inputs.pg_password }}
REGISTRY_URL: ${{ inputs.registry_url }}

View file

@ -43,14 +43,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -83,10 +78,10 @@ jobs:
cd api
PUSH_IMAGE=0 bash -x ./build_alerts.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("alerts")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -36,14 +36,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -76,10 +71,10 @@ jobs:
cd api
PUSH_IMAGE=0 bash -x ./build_alerts.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("alerts")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -10,6 +10,8 @@ on:
branches:
- dev
- api-*
- v1.11.0-patch
- actions_test
paths:
- "ee/api/**"
- "api/**"
@ -40,14 +42,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -80,10 +77,10 @@ jobs:
cd api
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("chalice")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -10,6 +10,7 @@ on:
branches:
- dev
- api-*
- v1.11.0-patch
paths:
- "api/**"
- "!api/.gitignore"
@ -34,14 +35,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -74,10 +70,10 @@ jobs:
cd api
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("chalice")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
@ -139,12 +135,12 @@ jobs:
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: "Build failed :bomb:"
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

View file

@ -9,6 +9,7 @@ on:
push:
branches:
- dev
- api-*
paths:
- "ee/assist/**"
- "assist/**"
@ -32,14 +33,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -67,10 +63,10 @@ jobs:
cd assist
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -15,7 +15,7 @@ on:
- "!assist-stats/*-dev.sh"
- "!assist-stats/requirements-*.txt"
name: Build and Deploy Assist Stats ee
name: Build and Deploy Assist Stats
jobs:
deploy:
@ -32,14 +32,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -72,10 +67,10 @@ jobs:
cd assist-stats
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-stats")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
@ -99,14 +94,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -123,9 +113,8 @@ jobs:
tag: ${IMAGE_TAG}
EOF
export IMAGE_TAG=${IMAGE_TAG}
# Update changed image tag
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command

View file

@ -9,6 +9,7 @@ on:
push:
branches:
- dev
- api-*
paths:
- "assist/**"
- "!assist/.gitignore"
@ -31,14 +32,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -66,10 +62,10 @@ jobs:
cd assist
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -10,6 +10,7 @@ on:
branches:
- dev
- api-*
- v1.11.0-patch
paths:
- "ee/api/**"
- "api/**"
@ -43,14 +44,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -83,10 +79,10 @@ jobs:
cd api
PUSH_IMAGE=0 bash -x ./build_crons.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("crons")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
@ -100,32 +96,33 @@ jobs:
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
env:
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
run: |
cd scripts/helmcharts/
cat <<EOF>/tmp/image_override.yaml
image: &image
tag: "${IMAGE_TAG}"
utilities:
apiCrons:
assiststats:
image: *image
report:
image: *image
sessionsCleaner:
image: *image
projectsStats:
image: *image
fixProjectsStats:
image: *image
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
# Update changed image tag
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
@ -135,6 +132,8 @@ jobs:
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
- name: Alert slack

View file

@ -52,7 +52,7 @@ jobs:
set -x
cd frontend
mv .env.sample .env
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee

View file

@ -19,26 +19,19 @@ jobs:
uses: actions/checkout@v2
- name: Cache node modules
uses: actions/cache@v4
uses: actions/cache@v1
with:
path: |
/home/runner/work/openreplay/openreplay/frontend/node_modules
/home/runner/work/openreplay/openreplay/frontend/.yarn
key: ${{ runner.OS }}-build-${{ hashFiles('frontend/yarn.lock') }}
path: node_modules
key: ${{ runner.OS }}-build-${{ hashFiles('**/package-lock.json') }}
restore-keys: |
${{ runner.OS }}-build-
${{ runner.OS }}-
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -65,7 +58,7 @@ jobs:
set -x
cd frontend
mv .env.sample .env
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:20-slim /bin/bash -c "yarn && yarn build"
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:18-slim /bin/bash -c "yarn && yarn build"
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
@ -108,14 +101,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}

View file

@ -1,189 +0,0 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
inputs:
services:
description: 'Comma separated names of services to build(in small letters).'
required: true
default: 'chalice,frontend'
tag:
description: 'Tag to update.'
required: true
type: string
branch:
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
required: true
type: string
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
jobs:
deploy:
name: Build Patch from old tag
runs-on: ubuntu-latest
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 4
ref: ${{ github.event.inputs.tag }}
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Create backup tag with timestamp
run: |
set -e # Exit immediately if a command exits with a non-zero status
TIMESTAMP=$(date +%Y%m%d%H%M%S)
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
echo "Created backup tag: $BACKUP_TAG"
# Get the oldest commit date from the last 3 commits in raw format
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
# Add 1 second to the timestamp
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
- name: Setup yq
uses: mikefarah/yq@master
# Configure AWS credentials for the first registry
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
id: login-ecr-arm
run: |
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1
- name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
- name: Build
id: build-image
env:
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas
run: |
set -exo pipefail
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b $BRANCH_NAME
working_dir=$(pwd)
function image_version(){
local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
current_version=$(yq eval '.AppVersion' $chart_path)
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
echo $new_version
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
}
function clone_msaas() {
[ -d $MSAAS_REPO_FOLDER ] || {
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
cd $MSAAS_REPO_FOLDER
cd openreplay && git fetch origin && git checkout $INPUT_TAG
git log -1
cd $MSAAS_REPO_FOLDER
bash git-init.sh
git checkout
}
}
function build_managed() {
local service=$1
local version=$2
echo building managed
clone_msaas
if [[ $service == 'chalice' ]]; then
cd $MSAAS_REPO_FOLDER/openreplay/api
else
cd $MSAAS_REPO_FOLDER/openreplay/$service
fi
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
}
# Checking for backend images
ls backend/cmd >> /tmp/backend.txt
echo Services: "${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
BUILD_SCRIPT_NAME="build.sh"
# Build FOSS
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
ee_build_args="ee"
fi
version=$(image_version $SERVICE)
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
else
build_managed $SERVICE $version
fi
cd $working_dir
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
git add $chart_path
git commit -m "Increment $SERVICE chart version"
done
- name: Change commit timestamp
run: |
# Convert the timestamp to a date format git can understand
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
echo "Setting commit date to: $NEW_DATE"
# Amend the commit with the new date
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
# Verify the change
git log -1 --pretty=format:"Commit now dated: %cD"
# git tag and push
git tag $INPUT_TAG -f
git push origin $INPUT_TAG -f
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
# MSAAS_REPO_FOLDER: /tmp/msaas
# with:
# limit-access-to-actor: true

View file

@ -2,6 +2,7 @@
on:
workflow_dispatch:
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
inputs:
services:
description: 'Comma separated names of services to build(in small letters).'
@ -19,20 +20,12 @@ jobs:
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v4
uses: actions/checkout@v2
with:
fetch-depth: 0
token: ${{ secrets.GITHUB_TOKEN }}
fetch-depth: 1
- name: Rebase with main branch, to make sure the code has latest main changes
if: github.ref != 'refs/heads/main'
run: |
git remote -v
git config --global user.email "action@github.com"
git config --global user.name "GitHub Action"
git config --global rebase.autoStash true
git fetch origin main:main
git rebase main
git log -3
git pull --rebase origin main
- name: Downloading yq
run: |
@ -55,8 +48,6 @@ jobs:
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
- name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name
@ -74,168 +65,73 @@ jobs:
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas
SERVICES_INPUT: ${{ github.event.inputs.services }}
run: |
#!/bin/bash
set -euo pipefail
# Configuration
readonly WORKING_DIR=$(pwd)
readonly BUILD_SCRIPT_NAME="build.sh"
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
# Initialize git configuration
setup_git() {
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b "$BRANCH_NAME"
set -exo pipefail
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b $BRANCH_NAME
working_dir=$(pwd)
function image_version(){
local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
current_version=$(yq eval '.AppVersion' $chart_path)
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
echo $new_version
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
}
# Get and increment image version
image_version() {
local service=$1
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
local current_version new_version
current_version=$(yq eval '.AppVersion' "$chart_path")
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
echo "$new_version"
function clone_msaas() {
[ -d $MSAAS_REPO_FOLDER ] || {
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
cd $MSAAS_REPO_FOLDER
bash git-init.sh
git checkout
}
}
# Clone MSAAS repository if not exists
clone_msaas() {
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
cd "$MSAAS_REPO_FOLDER"
cd openreplay && git fetch origin && git checkout main
git log -1
cd "$MSAAS_REPO_FOLDER"
bash git-init.sh
git checkout
fi
function build_managed() {
local service=$1
local version=$2
echo building managed
clone_msaas
if [[ $service == 'chalice' ]]; then
cd $MSAAS_REPO_FOLDER/openreplay/api
else
cd $MSAAS_REPO_FOLDER/openreplay/$service
fi
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
}
# Build managed services
build_managed() {
local service=$1
local version=$2
echo "Building managed service: $service"
clone_msaas
if [[ $service == 'chalice' ]]; then
cd "$MSAAS_REPO_FOLDER/openreplay/api"
else
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
fi
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
echo "Executing: $build_cmd"
if ! eval "$build_cmd" 2>&1; then
echo "Build failed for $service"
exit 1
fi
}
# Build service with given arguments
build_service() {
local service=$1
local version=$2
local build_args=$3
local build_script=${4:-$BUILD_SCRIPT_NAME}
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
echo "Executing: $command"
eval "$command"
}
# Update chart version and commit changes
update_chart_version() {
local service=$1
local version=$2
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
# Ensure we're in the original working directory/repository
cd "$WORKING_DIR"
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
git add "$chart_path"
git commit -m "Increment $service chart version to $version"
git push --set-upstream origin "$BRANCH_NAME"
cd -
}
# Main execution
main() {
setup_git
# Get backend services list
ls backend/cmd >"$BACKEND_SERVICES_FILE"
# Parse services input (fix for GitHub Actions syntax)
echo "Services: ${SERVICES_INPUT:-$1}"
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
# Process each service
for service in "${services[@]}"; do
echo "Processing service: $service"
cd "$WORKING_DIR"
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
# Determine build configuration based on service type
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
# Backend service
cd backend
foss_build_args="nil $service"
ee_build_args="ee $service"
else
# Non-backend service
case "$service" in
chalice | alerts | crons)
cd "$WORKING_DIR/api"
;;
*)
cd "$service"
;;
esac
# Special build scripts for alerts/crons
if [[ $service == 'alerts' || $service == 'crons' ]]; then
build_script="build_${service}.sh"
fi
ee_build_args="ee"
fi
# Get version and build
local version
version=$(image_version "$service")
# Build FOSS and EE versions
build_service "$service" "$version" "$foss_build_args"
build_service "$service" "${version}-ee" "$ee_build_args"
# Build managed version for specific services
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
echo "Nothing to build in managed for service $service"
else
build_managed "$service" "$version"
fi
# Update chart and commit
update_chart_version "$service" "$version"
done
cd "$WORKING_DIR"
# Cleanup
rm -f "$BACKEND_SERVICES_FILE"
}
echo "Working directory: $WORKING_DIR"
# Run main function with all arguments
main "$SERVICES_INPUT"
# Checking for backend images
ls backend/cmd >> /tmp/backend.txt
echo Services: "${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
# Build FOSS
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
[[ $SERVICE == 'chalice' ]] && cd $working_dir/api || cd $SERVICE
ee_build_args="ee"
fi
version=$(image_version $SERVICE)
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $foss_build_args
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash build.sh $ee_build_args
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh $foss_build_args
else
build_managed $SERVICE $version
fi
cd $working_dir
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
git add $chart_path
git commit -m "Increment $SERVICE chart version"
git push --set-upstream origin $BRANCH_NAME
done
- name: Create Pull Request
uses: repo-sync/pull-request@v2
@ -246,7 +142,8 @@ jobs:
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
pr_body: |
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
Once this PR is merged, tag update job will run automatically.
Once this PR is merged, To update the latest tag, run the following workflow.
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
# - name: Debug Job
# if: ${{ failure() }}

View file

@ -1,4 +1,4 @@
# This action will push the assist changes to aws
# This action will push the peers changes to aws
on:
workflow_dispatch:
inputs:
@ -9,10 +9,14 @@ on:
push:
branches:
- dev
- api-*
paths:
- "ee/assist-server/**"
- "ee/peers/**"
- "peers/**"
- "!peers/.gitignore"
- "!peers/*-dev.sh"
name: Build and Deploy Assist-Server EE
name: Build and Deploy Peers EE
jobs:
deploy:
@ -29,14 +33,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -53,7 +52,12 @@ jobs:
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist-Server image
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pushing peers image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
@ -61,13 +65,13 @@ jobs:
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd assist-server
cd peers
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-server")
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("peers")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
@ -76,7 +80,7 @@ jobs:
} && {
echo "Skipping Security Checks"
}
images=("assist-server")
images=("peers")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
@ -100,23 +104,43 @@ jobs:
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
pwd
cd scripts/helmcharts/
# Update changed image tag
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
rm -rf openreplay/charts/*
mv /tmp/charts/* openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
- name: Alert slack
if: ${{ failure() }}
uses: rtCamp/action-slack-notify@v2
env:
SLACK_CHANNEL: ee
SLACK_TITLE: "Failed ${{ github.workflow }}"
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: "Build failed :bomb:"
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true

144
.github/workflows/peers.yaml vendored Normal file
View file

@ -0,0 +1,144 @@
# This action will push the peers changes to aws
on:
workflow_dispatch:
inputs:
skip_security_checks:
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
required: false
default: "false"
push:
branches:
- dev
- api-*
paths:
- "peers/**"
- "!peers/.gitignore"
- "!peers/*-dev.sh"
name: Build and Deploy Peers
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- uses: ./.github/composite-actions/update-keys
with:
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
name: Update Keys
- name: Docker login
run: |
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pushing peers image
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd peers
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("peers")
for image in ${images[*]};do
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("peers")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helmcharts/
# Update changed image tag
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
rm -rf openreplay/charts/*
mv /tmp/charts/* openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging
- name: Alert slack
if: ${{ failure() }}
uses: rtCamp/action-slack-notify@v2
env:
SLACK_CHANNEL: foss
SLACK_TITLE: "Failed ${{ github.workflow }}"
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
SLACK_USERNAME: "OR Bot"
SLACK_MESSAGE: "Build failed :bomb:"
# - name: Debug Job
# # if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
# with:
# limit-access-to-actor: true

View file

@ -1,103 +0,0 @@
name: Release Deployment
on:
workflow_dispatch:
inputs:
services:
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
required: true
branch:
description: 'Branch to deploy (defaults to dev)'
required: false
default: 'dev'
env:
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.branch }}
- name: Docker login
run: |
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- name: Set image tag with branch info
run: |
SHORT_SHA=$(git rev-parse --short HEAD)
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
echo "Using image tag: $IMAGE_TAG"
- uses: depot/setup-action@v1
- name: Build and push Docker images
run: |
# Parse the comma-separated services list into an array
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
working_dir=$(pwd)
# Define backend services (consider moving this to workflow inputs or repo config)
ls backend/cmd >> /tmp/backend.txt
BUILD_SCRIPT_NAME="build.sh"
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd $working_dir/backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
cd $working_dir
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
ee_build_args="ee"
fi
{
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
}&
{
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
}&
done
wait
- uses: azure/k8s-set-context@v1
name: Using ee release cluster
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
- name: Deploy to ee release Kubernetes
run: |
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
for SERVICE in "${SERVICES[@]}"; do
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
done
- uses: azure/k8s-set-context@v1
name: Using foss release cluster
with:
method: kubeconfig
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
- name: Deploy to FOSS release Kubernetes
run: |
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
for SERVICE in "${SERVICES[@]}"; do
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
done

View file

@ -1,4 +1,4 @@
# This action will push the sourcemapreader changes to ee
# This action will push the sourcemapreader changes to aws
on:
workflow_dispatch:
inputs:
@ -9,13 +9,13 @@ on:
push:
branches:
- dev
- api-*
paths:
- "ee/sourcemap-reader/**"
- "sourcemap-reader/**"
- "!sourcemap-reader/.gitignore"
- "!sourcemap-reader/*-dev.sh"
name: Build and Deploy sourcemap-reader EE
name: Build and Deploy sourcemap-reader
jobs:
deploy:
@ -32,14 +32,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -64,7 +59,7 @@ jobs:
- name: Building and Pushing sourcemaps-reader image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
@ -72,10 +67,10 @@ jobs:
cd sourcemap-reader
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("sourcemaps-reader")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
@ -132,7 +127,7 @@ jobs:
if: ${{ failure() }}
uses: rtCamp/action-slack-notify@v2
env:
SLACK_CHANNEL: ee
SLACK_CHANNEL: foss
SLACK_TITLE: "Failed ${{ github.workflow }}"
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}

View file

@ -9,6 +9,7 @@ on:
push:
branches:
- dev
- api-*
paths:
- "sourcemap-reader/**"
- "!sourcemap-reader/.gitignore"
@ -31,14 +32,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -71,10 +67,10 @@ jobs:
cd sourcemap-reader
PUSH_IMAGE=0 bash -x ./build.sh
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("sourcemaps-reader")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {

View file

@ -9,16 +9,24 @@ on:
pull_request:
branches: [ "dev", "main" ]
paths:
- frontend/**
- tracker/**
jobs:
build-and-test:
runs-on: macos-latest
name: Build and test Tracker
strategy:
matrix:
node-version: [ 18.x ]
steps:
- uses: oven-sh/setup-bun@v2
- uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- uses: actions/checkout@v3
- name: Use Node.js ${{ matrix.node-version }}
uses: actions/setup-node@v3
with:
node-version: ${{ matrix.node-version }}
- name: Cache tracker modules
uses: actions/cache@v3
with:

View file

@ -27,9 +27,9 @@ jobs:
name: Build and test Tracker plus Replayer
strategy:
matrix:
node-version: [ 20.x ]
node-version: [ 18.x ]
steps:
- uses: oven-sh/setup-bun@v2
- uses: oven-sh/setup-bun@v1
with:
bun-version: latest
- uses: actions/checkout@v3
@ -102,11 +102,11 @@ jobs:
- name: Setup packages
run: |
cd frontend
yarn
npm install --legacy-peer-deps
- name: Run unit tests
run: |
cd frontend
yarn test:ci
npm run test:ci
- name: Upload coverage reports to Codecov
uses: codecov/codecov-action@v3
with:
@ -138,7 +138,7 @@ jobs:
- name: (Chrome) Run visual tests
run: |
cd frontend
yarn cy:test
npm run cy:test
# firefox have different viewport somehow
# - name: (Firefox) Run visual tests
# run: yarn cy:test-firefox

View file

@ -1,42 +1,35 @@
on:
pull_request:
types: [closed]
branches:
- main
name: Release tag update --force
workflow_dispatch:
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
inputs:
services:
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
required: true
default: "false"
name: Force Push tag with main branch HEAD
jobs:
deploy:
name: Build Patch from main
runs-on: ubuntu-latest
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
- name: Get latest release tag using GitHub API
id: get-latest-tag
run: |
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
| jq -r .tag_name)
# Fallback to git command if API doesn't return a tag
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
echo "Not found latest tag"
exit 100
fi
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
echo "Latest tag: $LATEST_TAG"
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Push main branch to tag
run: |
git fetch --tags
git checkout main
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# with:
# limit-access-to-actor: true

View file

@ -36,14 +36,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
@ -121,8 +116,8 @@ jobs:
echo "Bulding $image"
PUSH_IMAGE=0 bash -x ./build.sh ee $image
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code

View file

@ -35,14 +35,9 @@ jobs:
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.OSS_LICENSE_KEY }}
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
@ -114,8 +109,8 @@ jobs:
echo "Bulding $image"
PUSH_IMAGE=0 bash -x ./build.sh skip $image
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
@ -183,4 +178,3 @@ jobs:
# ENVIRONMENT: staging
# with:
# iimit-access-to-actor: true
#

702
LICENSE
View file

@ -1,694 +1,64 @@
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
Copyright (c) 2022 Asayer, Inc.
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
- All third party components incorporated into the OpenReplay Software are licensed under the original license provided by the owner of the applicable component.
- Some directories are licensed under the "MIT" license, as defined below.
- Content outside of the above mentioned directories or restrictions defaults to the "GNU Affero General Public License Version 3 (AGPL v3)" license, as defined below.
- Some directories have a specific LICENSE file and are licensed under the "MIT" license, as defined below.
- Content outside of the above mentioned directories or restrictions defaults to the "Elastic License 2.0 (ELv2)" license, as defined below.
Reach out (license@openreplay.com) if you have any questions regarding licenses.
--------------------------------------------------------------------------------
MIT LICENSE
------------------------------------------------------------------------------------
MIT License
Permission is hereby granted, free of charge, to any person obtaining a copy
of this software and associated documentation files (the "Software"), to deal
in the Software without restriction, including without limitation the rights
to use, copy, modify, merge, publish, distribute, sublicense, and/or sell
copies of the Software, and to permit persons to whom the Software is
furnished to do so, subject to the following conditions:
Permission is hereby granted, free of charge, to any person obtaining a copy of this software and associated documentation files (the "Software"), to deal in the Software without restriction, including without limitation the rights to use, copy, modify, merge, publish, distribute, sublicense, and/or sell copies of the Software, and to permit persons to whom the Software is furnished to do so, subject to the following conditions:
The above copyright notice and this permission notice shall be included in all
copies or substantial portions of the Software.
The above copyright notice and this permission notice shall be included in all copies or substantial portions of the Software.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR
IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY,
FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE
AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER
LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM,
OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE
SOFTWARE.
THE SOFTWARE IS PROVIDED "AS IS", WITHOUT WARRANTY OF ANY KIND, EXPRESS OR IMPLIED, INCLUDING BUT NOT LIMITED TO THE WARRANTIES OF MERCHANTABILITY, FITNESS FOR A PARTICULAR PURPOSE AND NONINFRINGEMENT. IN NO EVENT SHALL THE AUTHORS OR COPYRIGHT HOLDERS BE LIABLE FOR ANY CLAIM, DAMAGES OR OTHER LIABILITY, WHETHER IN AN ACTION OF CONTRACT, TORT OR OTHERWISE, ARISING FROM, OUT OF OR IN CONNECTION WITH THE SOFTWARE OR THE USE OR OTHER DEALINGS IN THE SOFTWARE.
--------------------------------------------------------------------------------
GNU AFFERO GENERAL PUBLIC LICENSE
Version 3, 19 November 2007
------------------------------------------------------------------------------------
Elastic License 2.0 (ELv2)
Copyright (C) 2007 Free Software Foundation, Inc. <https://fsf.org/>
Everyone is permitted to copy and distribute verbatim copies
of this license document, but changing it is not allowed.
**Acceptance**
By using the software, you agree to all of the terms and conditions below.
Preamble
**Copyright License**
The licensor grants you a non-exclusive, royalty-free, worldwide, non-sublicensable, non-transferable license to use, copy, distribute, make available, and prepare derivative works of the software, in each case subject to the limitations and conditions below
The GNU Affero General Public License is a free, copyleft license for
software and other kinds of works, specifically designed to ensure
cooperation with the community in the case of network server software.
**Limitations**
You may not provide the software to third parties as a hosted or managed service, where the service provides users with access to any substantial set of the features or functionality of the software.
The licenses for most software and other practical works are designed
to take away your freedom to share and change the works. By contrast,
our General Public Licenses are intended to guarantee your freedom to
share and change all versions of a program--to make sure it remains free
software for all its users.
You may not move, change, disable, or circumvent the license key functionality in the software, and you may not remove or obscure any functionality in the software that is protected by the license key.
When we speak of free software, we are referring to freedom, not
price. Our General Public Licenses are designed to make sure that you
have the freedom to distribute copies of free software (and charge for
them if you wish), that you receive source code or can get it if you
want it, that you can change the software or use pieces of it in new
free programs, and that you know you can do these things.
You may not alter, remove, or obscure any licensing, copyright, or other notices of the licensor in the software. Any use of the licensors trademarks is subject to applicable law.
Developers that use our General Public Licenses protect your rights
with two steps: (1) assert copyright on the software, and (2) offer
you this License which gives you legal permission to copy, distribute
and/or modify the software.
**Patents**
The licensor grants you a license, under any patent claims the licensor can license, or becomes able to license, to make, have made, use, sell, offer for sale, import and have imported the software, in each case subject to the limitations and conditions in this license. This license does not cover any patent claims that you cause to be infringed by modifications or additions to the software. If you or your company make any written claim that the software infringes or contributes to infringement of any patent, your patent license for the software granted under these terms ends immediately. If your company makes such a claim, your patent license ends immediately for work on behalf of your company.
A secondary benefit of defending all users' freedom is that
improvements made in alternate versions of the program, if they
receive widespread use, become available for other developers to
incorporate. Many developers of free software are heartened and
encouraged by the resulting cooperation. However, in the case of
software used on network servers, this result may fail to come about.
The GNU General Public License permits making a modified version and
letting the public access it on a server without ever releasing its
source code to the public.
**Notices**
You must ensure that anyone who gets a copy of any part of the software from you also gets a copy of these terms.
The GNU Affero General Public License is designed specifically to
ensure that, in such cases, the modified source code becomes available
to the community. It requires the operator of a network server to
provide the source code of the modified version running there to the
users of that server. Therefore, public use of a modified version, on
a publicly accessible server, gives the public access to the source
code of the modified version.
If you modify the software, you must include in any modified copies of the software prominent notices stating that you have modified the software.
An older license, called the Affero General Public License and
published by Affero, was designed to accomplish similar goals. This is
a different license, not a version of the Affero GPL, but Affero has
released a new version of the Affero GPL which permits relicensing under
this license.
**No Other Rights**
These terms do not imply any licenses other than those expressly granted in these terms.
The precise terms and conditions for copying, distribution and
modification follow.
**Termination**
If you use the software in violation of these terms, such use is not licensed, and your licenses will automatically terminate. If the licensor provides you with a notice of your violation, and you cease all violation of this license no later than 30 days after you receive that notice, your licenses will be reinstated retroactively. However, if you violate these terms after such reinstatement, any additional violation of these terms will cause your licenses to terminate automatically and permanently.
TERMS AND CONDITIONS
**No Liability**
As far as the law allows, the software comes as is, without any warranty or condition, and the licensor will not be liable to you for any damages arising out of these terms or the use or nature of the software, under any kind of legal claim.
0. Definitions.
**Definitions**
The *licensor* is the entity offering these terms, and the *software* is the software the licensor makes available under these terms, including any portion of it.
"This License" refers to version 3 of the GNU Affero General Public License.
*you* refers to the individual or entity agreeing to these terms.
"Copyright" also means copyright-like laws that apply to other kinds of
works, such as semiconductor masks.
*your company* is any legal entity, sole proprietorship, or other kind of organization that you work for, plus all organizations that have control over, are under the control of, or are under common control with that organization. *control* means ownership of substantially all the assets of an entity, or the power to direct its management and policies by vote, contract, or otherwise. Control can be direct or indirect.
"The Program" refers to any copyrightable work licensed under this
License. Each licensee is addressed as "you". "Licensees" and
"recipients" may be individuals or organizations.
*your licenses* are all the licenses granted to you for the software under these terms.
To "modify" a work means to copy from or adapt all or part of the work
in a fashion requiring copyright permission, other than the making of an
exact copy. The resulting work is called a "modified version" of the
earlier work or a work "based on" the earlier work.
*use* means anything you do with the software requiring one of your licenses.
A "covered work" means either the unmodified Program or a work based
on the Program.
To "propagate" a work means to do anything with it that, without
permission, would make you directly or secondarily liable for
infringement under applicable copyright law, except executing it on a
computer or modifying a private copy. Propagation includes copying,
distribution (with or without modification), making available to the
public, and in some countries other activities as well.
To "convey" a work means any kind of propagation that enables other
parties to make or receive copies. Mere interaction with a user through
a computer network, with no transfer of a copy, is not conveying.
An interactive user interface displays "Appropriate Legal Notices"
to the extent that it includes a convenient and prominently visible
feature that (1) displays an appropriate copyright notice, and (2)
tells the user that there is no warranty for the work (except to the
extent that warranties are provided), that licensees may convey the
work under this License, and how to view a copy of this License. If
the interface presents a list of user commands or options, such as a
menu, a prominent item in the list meets this criterion.
1. Source Code.
The "source code" for a work means the preferred form of the work
for making modifications to it. "Object code" means any non-source
form of a work.
A "Standard Interface" means an interface that either is an official
standard defined by a recognized standards body, or, in the case of
interfaces specified for a particular programming language, one that
is widely used among developers working in that language.
The "System Libraries" of an executable work include anything, other
than the work as a whole, that (a) is included in the normal form of
packaging a Major Component, but which is not part of that Major
Component, and (b) serves only to enable use of the work with that
Major Component, or to implement a Standard Interface for which an
implementation is available to the public in source code form. A
"Major Component", in this context, means a major essential component
(kernel, window system, and so on) of the specific operating system
(if any) on which the executable work runs, or a compiler used to
produce the work, or an object code interpreter used to run it.
The "Corresponding Source" for a work in object code form means all
the source code needed to generate, install, and (for an executable
work) run the object code and to modify the work, including scripts to
control those activities. However, it does not include the work's
System Libraries, or general-purpose tools or generally available free
programs which are used unmodified in performing those activities but
which are not part of the work. For example, Corresponding Source
includes interface definition files associated with source files for
the work, and the source code for shared libraries and dynamically
linked subprograms that the work is specifically designed to require,
such as by intimate data communication or control flow between those
subprograms and other parts of the work.
The Corresponding Source need not include anything that users
can regenerate automatically from other parts of the Corresponding
Source.
The Corresponding Source for a work in source code form is that
same work.
2. Basic Permissions.
All rights granted under this License are granted for the term of
copyright on the Program, and are irrevocable provided the stated
conditions are met. This License explicitly affirms your unlimited
permission to run the unmodified Program. The output from running a
covered work is covered by this License only if the output, given its
content, constitutes a covered work. This License acknowledges your
rights of fair use or other equivalent, as provided by copyright law.
You may make, run and propagate covered works that you do not
convey, without conditions so long as your license otherwise remains
in force. You may convey covered works to others for the sole purpose
of having them make modifications exclusively for you, or provide you
with facilities for running those works, provided that you comply with
the terms of this License in conveying all material for which you do
not control copyright. Those thus making or running the covered works
for you must do so exclusively on your behalf, under your direction
and control, on terms that prohibit them from making any copies of
your copyrighted material outside their relationship with you.
Conveying under any other circumstances is permitted solely under
the conditions stated below. Sublicensing is not allowed; section 10
makes it unnecessary.
3. Protecting Users' Legal Rights From Anti-Circumvention Law.
No covered work shall be deemed part of an effective technological
measure under any applicable law fulfilling obligations under article
11 of the WIPO copyright treaty adopted on 20 December 1996, or
similar laws prohibiting or restricting circumvention of such
measures.
When you convey a covered work, you waive any legal power to forbid
circumvention of technological measures to the extent such circumvention
is effected by exercising rights under this License with respect to
the covered work, and you disclaim any intention to limit operation or
modification of the work as a means of enforcing, against the work's
users, your or third parties' legal rights to forbid circumvention of
technological measures.
4. Conveying Verbatim Copies.
You may convey verbatim copies of the Program's source code as you
receive it, in any medium, provided that you conspicuously and
appropriately publish on each copy an appropriate copyright notice;
keep intact all notices stating that this License and any
non-permissive terms added in accord with section 7 apply to the code;
keep intact all notices of the absence of any warranty; and give all
recipients a copy of this License along with the Program.
You may charge any price or no price for each copy that you convey,
and you may offer support or warranty protection for a fee.
5. Conveying Modified Source Versions.
You may convey a work based on the Program, or the modifications to
produce it from the Program, in the form of source code under the
terms of section 4, provided that you also meet all of these conditions:
a) The work must carry prominent notices stating that you modified
it, and giving a relevant date.
b) The work must carry prominent notices stating that it is
released under this License and any conditions added under section
7. This requirement modifies the requirement in section 4 to
"keep intact all notices".
c) You must license the entire work, as a whole, under this
License to anyone who comes into possession of a copy. This
License will therefore apply, along with any applicable section 7
additional terms, to the whole of the work, and all its parts,
regardless of how they are packaged. This License gives no
permission to license the work in any other way, but it does not
invalidate such permission if you have separately received it.
d) If the work has interactive user interfaces, each must display
Appropriate Legal Notices; however, if the Program has interactive
interfaces that do not display Appropriate Legal Notices, your
work need not make them do so.
A compilation of a covered work with other separate and independent
works, which are not by their nature extensions of the covered work,
and which are not combined with it such as to form a larger program,
in or on a volume of a storage or distribution medium, is called an
"aggregate" if the compilation and its resulting copyright are not
used to limit the access or legal rights of the compilation's users
beyond what the individual works permit. Inclusion of a covered work
in an aggregate does not cause this License to apply to the other
parts of the aggregate.
6. Conveying Non-Source Forms.
You may convey a covered work in object code form under the terms
of sections 4 and 5, provided that you also convey the
machine-readable Corresponding Source under the terms of this License,
in one of these ways:
a) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by the
Corresponding Source fixed on a durable physical medium
customarily used for software interchange.
b) Convey the object code in, or embodied in, a physical product
(including a physical distribution medium), accompanied by a
written offer, valid for at least three years and valid for as
long as you offer spare parts or customer support for that product
model, to give anyone who possesses the object code either (1) a
copy of the Corresponding Source for all the software in the
product that is covered by this License, on a durable physical
medium customarily used for software interchange, for a price no
more than your reasonable cost of physically performing this
conveying of source, or (2) access to copy the
Corresponding Source from a network server at no charge.
c) Convey individual copies of the object code with a copy of the
written offer to provide the Corresponding Source. This
alternative is allowed only occasionally and noncommercially, and
only if you received the object code with such an offer, in accord
with subsection 6b.
d) Convey the object code by offering access from a designated
place (gratis or for a charge), and offer equivalent access to the
Corresponding Source in the same way through the same place at no
further charge. You need not require recipients to copy the
Corresponding Source along with the object code. If the place to
copy the object code is a network server, the Corresponding Source
may be on a different server (operated by you or a third party)
that supports equivalent copying facilities, provided you maintain
clear directions next to the object code saying where to find the
Corresponding Source. Regardless of what server hosts the
Corresponding Source, you remain obligated to ensure that it is
available for as long as needed to satisfy these requirements.
e) Convey the object code using peer-to-peer transmission, provided
you inform other peers where the object code and Corresponding
Source of the work are being offered to the general public at no
charge under subsection 6d.
A separable portion of the object code, whose source code is excluded
from the Corresponding Source as a System Library, need not be
included in conveying the object code work.
A "User Product" is either (1) a "consumer product", which means any
tangible personal property which is normally used for personal, family,
or household purposes, or (2) anything designed or sold for incorporation
into a dwelling. In determining whether a product is a consumer product,
doubtful cases shall be resolved in favor of coverage. For a particular
product received by a particular user, "normally used" refers to a
typical or common use of that class of product, regardless of the status
of the particular user or of the way in which the particular user
actually uses, or expects or is expected to use, the product. A product
is a consumer product regardless of whether the product has substantial
commercial, industrial or non-consumer uses, unless such uses represent
the only significant mode of use of the product.
"Installation Information" for a User Product means any methods,
procedures, authorization keys, or other information required to install
and execute modified versions of a covered work in that User Product from
a modified version of its Corresponding Source. The information must
suffice to ensure that the continued functioning of the modified object
code is in no case prevented or interfered with solely because
modification has been made.
If you convey an object code work under this section in, or with, or
specifically for use in, a User Product, and the conveying occurs as
part of a transaction in which the right of possession and use of the
User Product is transferred to the recipient in perpetuity or for a
fixed term (regardless of how the transaction is characterized), the
Corresponding Source conveyed under this section must be accompanied
by the Installation Information. But this requirement does not apply
if neither you nor any third party retains the ability to install
modified object code on the User Product (for example, the work has
been installed in ROM).
The requirement to provide Installation Information does not include a
requirement to continue to provide support service, warranty, or updates
for a work that has been modified or installed by the recipient, or for
the User Product in which it has been modified or installed. Access to a
network may be denied when the modification itself materially and
adversely affects the operation of the network or violates the rules and
protocols for communication across the network.
Corresponding Source conveyed, and Installation Information provided,
in accord with this section must be in a format that is publicly
documented (and with an implementation available to the public in
source code form), and must require no special password or key for
unpacking, reading or copying.
7. Additional Terms.
"Additional permissions" are terms that supplement the terms of this
License by making exceptions from one or more of its conditions.
Additional permissions that are applicable to the entire Program shall
be treated as though they were included in this License, to the extent
that they are valid under applicable law. If additional permissions
apply only to part of the Program, that part may be used separately
under those permissions, but the entire Program remains governed by
this License without regard to the additional permissions.
When you convey a copy of a covered work, you may at your option
remove any additional permissions from that copy, or from any part of
it. (Additional permissions may be written to require their own
removal in certain cases when you modify the work.) You may place
additional permissions on material, added by you to a covered work,
for which you have or can give appropriate copyright permission.
Notwithstanding any other provision of this License, for material you
add to a covered work, you may (if authorized by the copyright holders of
that material) supplement the terms of this License with terms:
a) Disclaiming warranty or limiting liability differently from the
terms of sections 15 and 16 of this License; or
b) Requiring preservation of specified reasonable legal notices or
author attributions in that material or in the Appropriate Legal
Notices displayed by works containing it; or
c) Prohibiting misrepresentation of the origin of that material, or
requiring that modified versions of such material be marked in
reasonable ways as different from the original version; or
d) Limiting the use for publicity purposes of names of licensors or
authors of the material; or
e) Declining to grant rights under trademark law for use of some
trade names, trademarks, or service marks; or
f) Requiring indemnification of licensors and authors of that
material by anyone who conveys the material (or modified versions of
it) with contractual assumptions of liability to the recipient, for
any liability that these contractual assumptions directly impose on
those licensors and authors.
All other non-permissive additional terms are considered "further
restrictions" within the meaning of section 10. If the Program as you
received it, or any part of it, contains a notice stating that it is
governed by this License along with a term that is a further
restriction, you may remove that term. If a license document contains
a further restriction but permits relicensing or conveying under this
License, you may add to a covered work material governed by the terms
of that license document, provided that the further restriction does
not survive such relicensing or conveying.
If you add terms to a covered work in accord with this section, you
must place, in the relevant source files, a statement of the
additional terms that apply to those files, or a notice indicating
where to find the applicable terms.
Additional terms, permissive or non-permissive, may be stated in the
form of a separately written license, or stated as exceptions;
the above requirements apply either way.
8. Termination.
You may not propagate or modify a covered work except as expressly
provided under this License. Any attempt otherwise to propagate or
modify it is void, and will automatically terminate your rights under
this License (including any patent licenses granted under the third
paragraph of section 11).
However, if you cease all violation of this License, then your
license from a particular copyright holder is reinstated (a)
provisionally, unless and until the copyright holder explicitly and
finally terminates your license, and (b) permanently, if the copyright
holder fails to notify you of the violation by some reasonable means
prior to 60 days after the cessation.
Moreover, your license from a particular copyright holder is
reinstated permanently if the copyright holder notifies you of the
violation by some reasonable means, this is the first time you have
received notice of violation of this License (for any work) from that
copyright holder, and you cure the violation prior to 30 days after
your receipt of the notice.
Termination of your rights under this section does not terminate the
licenses of parties who have received copies or rights from you under
this License. If your rights have been terminated and not permanently
reinstated, you do not qualify to receive new licenses for the same
material under section 10.
9. Acceptance Not Required for Having Copies.
You are not required to accept this License in order to receive or
run a copy of the Program. Ancillary propagation of a covered work
occurring solely as a consequence of using peer-to-peer transmission
to receive a copy likewise does not require acceptance. However,
nothing other than this License grants you permission to propagate or
modify any covered work. These actions infringe copyright if you do
not accept this License. Therefore, by modifying or propagating a
covered work, you indicate your acceptance of this License to do so.
10. Automatic Licensing of Downstream Recipients.
Each time you convey a covered work, the recipient automatically
receives a license from the original licensors, to run, modify and
propagate that work, subject to this License. You are not responsible
for enforcing compliance by third parties with this License.
An "entity transaction" is a transaction transferring control of an
organization, or substantially all assets of one, or subdividing an
organization, or merging organizations. If propagation of a covered
work results from an entity transaction, each party to that
transaction who receives a copy of the work also receives whatever
licenses to the work the party's predecessor in interest had or could
give under the previous paragraph, plus a right to possession of the
Corresponding Source of the work from the predecessor in interest, if
the predecessor has it or can get it with reasonable efforts.
You may not impose any further restrictions on the exercise of the
rights granted or affirmed under this License. For example, you may
not impose a license fee, royalty, or other charge for exercise of
rights granted under this License, and you may not initiate litigation
(including a cross-claim or counterclaim in a lawsuit) alleging that
any patent claim is infringed by making, using, selling, offering for
sale, or importing the Program or any portion of it.
11. Patents.
A "contributor" is a copyright holder who authorizes use under this
License of the Program or a work on which the Program is based. The
work thus licensed is called the contributor's "contributor version".
A contributor's "essential patent claims" are all patent claims
owned or controlled by the contributor, whether already acquired or
hereafter acquired, that would be infringed by some manner, permitted
by this License, of making, using, or selling its contributor version,
but do not include claims that would be infringed only as a
consequence of further modification of the contributor version. For
purposes of this definition, "control" includes the right to grant
patent sublicenses in a manner consistent with the requirements of
this License.
Each contributor grants you a non-exclusive, worldwide, royalty-free
patent license under the contributor's essential patent claims, to
make, use, sell, offer for sale, import and otherwise run, modify and
propagate the contents of its contributor version.
In the following three paragraphs, a "patent license" is any express
agreement or commitment, however denominated, not to enforce a patent
(such as an express permission to practice a patent or covenant not to
sue for patent infringement). To "grant" such a patent license to a
party means to make such an agreement or commitment not to enforce a
patent against the party.
If you convey a covered work, knowingly relying on a patent license,
and the Corresponding Source of the work is not available for anyone
to copy, free of charge and under the terms of this License, through a
publicly available network server or other readily accessible means,
then you must either (1) cause the Corresponding Source to be so
available, or (2) arrange to deprive yourself of the benefit of the
patent license for this particular work, or (3) arrange, in a manner
consistent with the requirements of this License, to extend the patent
license to downstream recipients. "Knowingly relying" means you have
actual knowledge that, but for the patent license, your conveying the
covered work in a country, or your recipient's use of the covered work
in a country, would infringe one or more identifiable patents in that
country that you have reason to believe are valid.
If, pursuant to or in connection with a single transaction or
arrangement, you convey, or propagate by procuring conveyance of, a
covered work, and grant a patent license to some of the parties
receiving the covered work authorizing them to use, propagate, modify
or convey a specific copy of the covered work, then the patent license
you grant is automatically extended to all recipients of the covered
work and works based on it.
A patent license is "discriminatory" if it does not include within
the scope of its coverage, prohibits the exercise of, or is
conditioned on the non-exercise of one or more of the rights that are
specifically granted under this License. You may not convey a covered
work if you are a party to an arrangement with a third party that is
in the business of distributing software, under which you make payment
to the third party based on the extent of your activity of conveying
the work, and under which the third party grants, to any of the
parties who would receive the covered work from you, a discriminatory
patent license (a) in connection with copies of the covered work
conveyed by you (or copies made from those copies), or (b) primarily
for and in connection with specific products or compilations that
contain the covered work, unless you entered into that arrangement,
or that patent license was granted, prior to 28 March 2007.
Nothing in this License shall be construed as excluding or limiting
any implied license or other defenses to infringement that may
otherwise be available to you under applicable patent law.
12. No Surrender of Others' Freedom.
If conditions are imposed on you (whether by court order, agreement or
otherwise) that contradict the conditions of this License, they do not
excuse you from the conditions of this License. If you cannot convey a
covered work so as to satisfy simultaneously your obligations under this
License and any other pertinent obligations, then as a consequence you may
not convey it at all. For example, if you agree to terms that obligate you
to collect a royalty for further conveying from those to whom you convey
the Program, the only way you could satisfy both those terms and this
License would be to refrain entirely from conveying the Program.
13. Remote Network Interaction; Use with the GNU General Public License.
Notwithstanding any other provision of this License, if you modify the
Program, your modified version must prominently offer all users
interacting with it remotely through a computer network (if your version
supports such interaction) an opportunity to receive the Corresponding
Source of your version by providing access to the Corresponding Source
from a network server at no charge, through some standard or customary
means of facilitating copying of software. This Corresponding Source
shall include the Corresponding Source for any work covered by version 3
of the GNU General Public License that is incorporated pursuant to the
following paragraph.
Notwithstanding any other provision of this License, you have
permission to link or combine any covered work with a work licensed
under version 3 of the GNU General Public License into a single
combined work, and to convey the resulting work. The terms of this
License will continue to apply to the part which is the covered work,
but the work with which it is combined will remain governed by version
3 of the GNU General Public License.
14. Revised Versions of this License.
The Free Software Foundation may publish revised and/or new versions of
the GNU Affero General Public License from time to time. Such new versions
will be similar in spirit to the present version, but may differ in detail to
address new problems or concerns.
Each version is given a distinguishing version number. If the
Program specifies that a certain numbered version of the GNU Affero General
Public License "or any later version" applies to it, you have the
option of following the terms and conditions either of that numbered
version or of any later version published by the Free Software
Foundation. If the Program does not specify a version number of the
GNU Affero General Public License, you may choose any version ever published
by the Free Software Foundation.
If the Program specifies that a proxy can decide which future
versions of the GNU Affero General Public License can be used, that proxy's
public statement of acceptance of a version permanently authorizes you
to choose that version for the Program.
Later license versions may give you additional or different
permissions. However, no additional obligations are imposed on any
author or copyright holder as a result of your choosing to follow a
later version.
15. Disclaimer of Warranty.
THERE IS NO WARRANTY FOR THE PROGRAM, TO THE EXTENT PERMITTED BY
APPLICABLE LAW. EXCEPT WHEN OTHERWISE STATED IN WRITING THE COPYRIGHT
HOLDERS AND/OR OTHER PARTIES PROVIDE THE PROGRAM "AS IS" WITHOUT WARRANTY
OF ANY KIND, EITHER EXPRESSED OR IMPLIED, INCLUDING, BUT NOT LIMITED TO,
THE IMPLIED WARRANTIES OF MERCHANTABILITY AND FITNESS FOR A PARTICULAR
PURPOSE. THE ENTIRE RISK AS TO THE QUALITY AND PERFORMANCE OF THE PROGRAM
IS WITH YOU. SHOULD THE PROGRAM PROVE DEFECTIVE, YOU ASSUME THE COST OF
ALL NECESSARY SERVICING, REPAIR OR CORRECTION.
16. Limitation of Liability.
IN NO EVENT UNLESS REQUIRED BY APPLICABLE LAW OR AGREED TO IN WRITING
WILL ANY COPYRIGHT HOLDER, OR ANY OTHER PARTY WHO MODIFIES AND/OR CONVEYS
THE PROGRAM AS PERMITTED ABOVE, BE LIABLE TO YOU FOR DAMAGES, INCLUDING ANY
GENERAL, SPECIAL, INCIDENTAL OR CONSEQUENTIAL DAMAGES ARISING OUT OF THE
USE OR INABILITY TO USE THE PROGRAM (INCLUDING BUT NOT LIMITED TO LOSS OF
DATA OR DATA BEING RENDERED INACCURATE OR LOSSES SUSTAINED BY YOU OR THIRD
PARTIES OR A FAILURE OF THE PROGRAM TO OPERATE WITH ANY OTHER PROGRAMS),
EVEN IF SUCH HOLDER OR OTHER PARTY HAS BEEN ADVISED OF THE POSSIBILITY OF
SUCH DAMAGES.
17. Interpretation of Sections 15 and 16.
If the disclaimer of warranty and limitation of liability provided
above cannot be given local legal effect according to their terms,
reviewing courts shall apply local law that most closely approximates
an absolute waiver of all civil liability in connection with the
Program, unless a warranty or assumption of liability accompanies a
copy of the Program in return for a fee.
END OF TERMS AND CONDITIONS
How to Apply These Terms to Your New Programs
If you develop a new program, and you want it to be of the greatest
possible use to the public, the best way to achieve this is to make it
free software which everyone can redistribute and change under these terms.
To do so, attach the following notices to the program. It is safest
to attach them to the start of each source file to most effectively
state the exclusion of warranty; and each file should have at least
the "copyright" line and a pointer to where the full notice is found.
<one line to give the program's name and a brief idea of what it does.>
Copyright (C) <year> <name of author>
This program is free software: you can redistribute it and/or modify
it under the terms of the GNU Affero General Public License as published by
the Free Software Foundation, either version 3 of the License, or
(at your option) any later version.
This program is distributed in the hope that it will be useful,
but WITHOUT ANY WARRANTY; without even the implied warranty of
MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the
GNU Affero General Public License for more details.
You should have received a copy of the GNU Affero General Public License
along with this program. If not, see <https://www.gnu.org/licenses/>.
Also add information on how to contact you by electronic and paper mail.
If your software can interact with users remotely through a computer
network, you should also make sure that it provides a way for users to
get its source. For example, if your program is a web application, its
interface could display a "Source" link that leads users to an archive
of the code. There are many ways you could offer source, and different
solutions will be better for different programs; see section 13 for the
specific requirements.
You should also get your employer (if you work as a programmer) or school,
if any, to sign a "copyright disclaimer" for the program, if necessary.
For more information on this, and how to apply and follow the GNU AGPL, see
<https://www.gnu.org/licenses/>.
*trademark* means trademarks, service marks, and similar rights.

View file

@ -44,7 +44,7 @@
</a>
</p>
OpenReplay is an open-source session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster.
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
- **Low footprint**. With a ~26KB (.br) tracker that asynchronously sends minimal data for a very limited impact on performance.
@ -55,9 +55,9 @@ OpenReplay is an open-source session replay suite you can host yourself, that le
## Features
- **Session replay:** Lets you relive your users' experience, see where they struggle and how it affects their behavior. Each session replay is automatically analyzed based on heuristics, for easy triage.
- **Spot:** A Chrome extension that lets record bugs directly from your browser — each recording includes all the technical details developers need to fix them.
- **DevTools:** It's like debugging in your own browser. OpenReplay provides you with the full context (network activity, JS errors, store actions/state and 40+ metrics) so you can instantly reproduce bugs and understand performance issues.
- **Assist:** Helps you support your users by seeing their live screen and instantly hopping on call (WebRTC) with them without requiring any 3rd-party screen sharing software.
- **Feature flags:** Enable or disable a feature, make gradual releases and A/B test all without redeploying your app.
- **Omni-search:** Search and filter by almost any user action/criteria, session attribute or technical event, so you can answer any question. No instrumentation required.
- **Analytics:** For surfacing the most impactful issues causing conversion and revenue loss.
- **Fine-grained privacy controls:** Choose what to capture, what to obscure or what to ignore so user data doesn't even reach your servers.
@ -98,6 +98,10 @@ See our [Contributing Guide](CONTRIBUTING.md) for more details.
Also, feel free to join our [Slack](https://slack.openreplay.com) to ask questions, discuss ideas or connect with our contributors.
## Roadmap
Check out our [roadmap](https://www.notion.so/openreplay/Roadmap-889d2c3d968b4786ab9b281ab2394a94) and keep an eye on what's coming next. You're free to [submit](https://github.com/openreplay/openreplay/issues/new) new ideas and vote on features.
## License
This monorepo uses several licenses. See [LICENSE](/LICENSE) for more details.

3
api/.gitignore vendored
View file

@ -175,5 +175,4 @@ SUBNETS.json
./chalicelib/.configs
README/*
.local
/.dev/
.local

View file

@ -1,31 +1,30 @@
FROM python:3.12-alpine AS builder
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk add --no-cache build-base
WORKDIR /work
COPY requirements.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade uv && \
export UV_SYSTEM_PYTHON=true && \
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
uv pip install --no-cache-dir --upgrade -r requirements.txt
FROM python:3.12-alpine
FROM python:3.11-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache build-base tini
ARG envarg
# Add Tini
# Startup daemon
ENV SOURCE_MAP_VERSION=0.7.4 \
APP_NAME=chalice \
LISTEN_PORT=8000 \
PRIVATE_ENDPOINTS=false \
ENTERPRISE_BUILD=${envarg} \
GIT_SHA=$GIT_SHA
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
APP_NAME=chalice \
LISTEN_PORT=8000 \
PRIVATE_ENDPOINTS=false \
ENTERPRISE_BUILD=${envarg} \
GIT_SHA=$GIT_SHA
WORKDIR /work
COPY requirements.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade -r requirements.txt
COPY . .
RUN apk add --no-cache tini && mv env.default .env
RUN mv env.default .env
RUN adduser -u 1001 openreplay -D
USER 1001
ENTRYPOINT ["/sbin/tini", "--"]
CMD ["./entrypoint.sh"]
CMD ./entrypoint.sh

View file

@ -1,4 +1,4 @@
FROM python:3.12-alpine
FROM python:3.11-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA
@ -16,9 +16,7 @@ ENV APP_NAME=alerts \
WORKDIR /work
COPY requirements-alerts.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade uv
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
RUN pip install --no-cache-dir --upgrade -r requirements.txt
COPY . .
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh

View file

@ -4,26 +4,23 @@ verify_ssl = true
name = "pypi"
[packages]
urllib3 = "==2.3.0"
urllib3 = "==1.26.16"
requests = "==2.32.3"
boto3 = "==1.36.12"
pyjwt = "==2.10.1"
psycopg2-binary = "==2.9.10"
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
clickhouse-connect = "==0.8.15"
elasticsearch = "==8.17.1"
boto3 = "==1.34.158"
pyjwt = "==2.9.0"
psycopg2-binary = "==2.9.9"
elasticsearch = "==8.14.0"
jira = "==3.8.0"
cachetools = "==5.5.1"
fastapi = "==0.115.8"
uvicorn = {extras = ["standard"], version = "==0.34.0"}
fastapi = "==0.112.0"
python-decouple = "==3.8"
pydantic = {extras = ["email"], version = "==2.10.6"}
apscheduler = "==3.11.0"
redis = "==5.2.1"
apscheduler = "==3.10.4"
redis = "==5.1.0b6"
cachetools = "==5.4.0"
psycopg = {extras = ["pool", "binary"], version = "==3.2.1"}
uvicorn = {extras = ["standard"], version = "==0.30.5"}
pydantic = {extras = ["email"], version = "==2.8.2"}
[dev-packages]
[requires]
python_version = "3.12"
python_full_version = "3.12.8"

View file

@ -13,16 +13,17 @@ from psycopg.rows import dict_row
from starlette.responses import StreamingResponse
from chalicelib.utils import helper
from chalicelib.utils import pg_client, ch_client
from chalicelib.utils import pg_client
from crons import core_crons, core_dynamic_crons
from routers import core, core_dynamic
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
loglevel = config("LOGLEVEL", default=logging.WARNING)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
class ORPYAsyncConnection(AsyncConnection):
def __init__(self, *args, **kwargs):
@ -38,7 +39,6 @@ async def lifespan(app: FastAPI):
app.schedule = AsyncIOScheduler()
await pg_client.init()
await ch_client.init()
app.schedule.start()
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
@ -128,7 +128,3 @@ app.include_router(usability_tests.app_apikey)
app.include_router(spot.public_app)
app.include_router(spot.app)
app.include_router(spot.app_apikey)
app.include_router(product_anaytics.public_app)
app.include_router(product_anaytics.app)
app.include_router(product_anaytics.app_apikey)

View file

@ -5,14 +5,14 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
from decouple import config
from fastapi import FastAPI
from chalicelib.core.alerts import alerts_processor
from chalicelib.core import alerts_processor
from chalicelib.utils import pg_client
@asynccontextmanager
async def lifespan(app: FastAPI):
# Startup
ap_logger.info(">>>>> starting up <<<<<")
logging.info(">>>>> starting up <<<<<")
await pg_client.init()
app.schedule.start()
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
@ -27,22 +27,14 @@ async def lifespan(app: FastAPI):
yield
# Shutdown
ap_logger.info(">>>>> shutting down <<<<<")
logging.info(">>>>> shutting down <<<<<")
app.schedule.shutdown(wait=False)
await pg_client.terminate()
loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
app.schedule = AsyncIOScheduler()
ap_logger.info("============= ALERTS =============")
logging.info("============= ALERTS =============")
@app.get("/")
@ -58,8 +50,17 @@ async def get_health_status():
}}
app.schedule = AsyncIOScheduler()
loglevel = config("LOGLEVEL", default=logging.INFO)
print(f">Loglevel set to: {loglevel}")
logging.basicConfig(level=loglevel)
ap_logger = logging.getLogger('apscheduler')
ap_logger.setLevel(loglevel)
app.schedule = AsyncIOScheduler()
if config("LOCAL_DEV", default=False, cast=bool):
@app.get('/trigger', tags=["private"])
async def trigger_main_cron():
ap_logger.info("Triggering main cron")
logging.info("Triggering main cron")
alerts_processor.process()

View file

@ -35,7 +35,7 @@ class JWTAuth(HTTPBearer):
if request.url.path in ["/refresh", "/api/refresh"]:
return await self.__process_refresh_call(request)
elif request.url.path in ["/spot/refresh", "/api/spot/refresh"]:
elif request.url.path in ["/spot/refresh", "/spot/api/refresh"]:
return await self.__process_spot_refresh_call(request)
else:

View file

@ -31,8 +31,8 @@ class ProjectAuthorizer:
logger.debug(f"unauthorized project {self.project_identifier}:{value}")
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="project not found.")
else:
current_project = schemas.ProjectContext(projectId=current_project["projectId"],
projectKey=current_project["projectKey"],
platform=current_project["platform"],
name=current_project["name"])
current_project = schemas.CurrentProjectContext(projectId=current_project["projectId"],
projectKey=current_project["projectKey"],
platform=current_project["platform"],
name=current_project["name"])
request.state.currentContext.project = current_project

View file

@ -7,13 +7,11 @@ from decouple import config
import schemas
from chalicelib.core import notifications, webhook
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
from chalicelib.core.collaborations.collaboration_slack import Slack
from chalicelib.core.collaboration_msteams import MSTeams
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import pg_client, helper, email_helper, smtp
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
def get(id):
with pg_client.PostgresClient() as cur:

View file

@ -1,10 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_ALERTS", cast=bool, default=False):
logging.info(">>> Using experimental alerts")
from . import alerts_processor_ch as alerts_processor
else:
from . import alerts_processor as alerts_processor

View file

@ -1,3 +0,0 @@
TENANT_ID = "-1"
from . import helpers as alert_helpers

View file

@ -1,74 +0,0 @@
import decimal
import logging
import schemas
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
# This is the frequency of execution for each threshold
TimeInterval = {
15: 3,
30: 5,
60: 10,
120: 20,
240: 30,
1440: 60,
}
def __format_value(x):
if x % 1 == 0:
x = int(x)
else:
x = round(x, 2)
return f"{x:,}"
def can_check(a) -> bool:
now = TimeUTC.now()
repetitionBase = a["options"]["currentPeriod"] \
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
else a["options"]["previousPeriod"]
if TimeInterval.get(repetitionBase) is None:
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
return False
return (a["options"]["renotifyInterval"] <= 0 or
a["options"].get("lastNotification") is None or
a["options"]["lastNotification"] <= 0 or
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
def generate_notification(alert, result):
left = __format_value(result['value'])
right = __format_value(alert['query']['right'])
return {
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
"projectId": alert["projectId"],
"projectName": alert["projectName"],
"options": {"source": "ALERT", "sourceId": alert["alertId"],
"sourceMeta": alert["detectionMethod"],
"message": alert["options"]["message"], "projectId": alert["projectId"],
"data": {"title": alert["name"],
"limitValue": alert["query"]["right"],
"actualValue": float(result["value"]) \
if isinstance(result["value"], decimal.Decimal) \
else result["value"],
"operator": alert["query"]["operator"],
"trigger": alert["query"]["left"],
"alertId": alert["alertId"],
"detectionMethod": alert["detectionMethod"],
"currentPeriod": alert["options"]["currentPeriod"],
"previousPeriod": alert["options"]["previousPeriod"],
"createdAt": TimeUTC.now()}},
}

View file

@ -1,10 +1,9 @@
from chalicelib.core.alerts.modules import TENANT_ID
from chalicelib.utils import pg_client, helper
def get_all_alerts():
with pg_client.PostgresClient(long_query=True) as cur:
query = f"""SELECT {TENANT_ID} AS tenant_id,
query = """SELECT -1 AS tenant_id,
alert_id,
projects.project_id,
projects.name AS project_name,

View file

@ -1,15 +1,17 @@
import decimal
import logging
from decouple import config
from pydantic_core._pydantic_core import ValidationError
import schemas
from chalicelib.core.alerts import alerts, alerts_listener
from chalicelib.core.alerts.modules import alert_helpers
from chalicelib.core.sessions import sessions_pg as sessions
from chalicelib.core import alerts
from chalicelib.core import alerts_listener
from chalicelib.core import sessions
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
logging.basicConfig(level=config("LOGLEVEL", default=logger.info))
LeftToDb = {
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
@ -34,6 +36,30 @@ LeftToDb = {
schemas.AlertColumn.PERFORMANCE__TIME_TO_RENDER__AVERAGE: {
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(visually_complete,0))"},
schemas.AlertColumn.PERFORMANCE__IMAGE_LOAD_TIME__AVERAGE: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='img'"},
schemas.AlertColumn.PERFORMANCE__REQUEST_LOAD_TIME__AVERAGE: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))", "condition": "type='fetch'"},
schemas.AlertColumn.RESOURCES__LOAD_TIME__AVERAGE: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "AVG(NULLIF(resources.duration,0))"},
schemas.AlertColumn.RESOURCES__MISSING__COUNT: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT url_hostpath)", "condition": "success= FALSE AND type='img'"},
schemas.AlertColumn.ERRORS__4XX_5XX__COUNT: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
"condition": "status/100!=2"},
schemas.AlertColumn.ERRORS__4XX__COUNT: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=4"},
schemas.AlertColumn.ERRORS__5XX__COUNT: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(session_id)", "condition": "status/100=5"},
schemas.AlertColumn.ERRORS__JAVASCRIPT__IMPACTED_SESSIONS__COUNT: {
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
schemas.AlertColumn.PERFORMANCE__CRASHES__COUNT: {
"table": "public.sessions",
"formula": "COUNT(DISTINCT session_id)",
@ -46,6 +72,35 @@ LeftToDb = {
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
}
# This is the frequency of execution for each threshold
TimeInterval = {
15: 3,
30: 5,
60: 10,
120: 20,
240: 30,
1440: 60,
}
def can_check(a) -> bool:
now = TimeUTC.now()
repetitionBase = a["options"]["currentPeriod"] \
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
else a["options"]["previousPeriod"]
if TimeInterval.get(repetitionBase) is None:
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
return False
return (a["options"]["renotifyInterval"] <= 0 or
a["options"].get("lastNotification") is None or
a["options"]["lastNotification"] <= 0 or
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
def Build(a):
now = TimeUTC.now()
@ -132,12 +187,11 @@ def Build(a):
def process():
logger.info("> processing alerts on PG")
notifications = []
all_alerts = alerts_listener.get_all_alerts()
with pg_client.PostgresClient() as cur:
for alert in all_alerts:
if alert_helpers.can_check(alert):
if can_check(alert):
query, params = Build(alert)
try:
query = cur.mogrify(query, params)
@ -153,7 +207,7 @@ def process():
result = cur.fetchone()
if result["valid"]:
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
notifications.append(alert_helpers.generate_notification(alert, result))
notifications.append(generate_notification(alert, result))
except Exception as e:
logger.error(
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
@ -167,3 +221,42 @@ def process():
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
if len(notifications) > 0:
alerts.process_notifications(notifications)
def __format_value(x):
if x % 1 == 0:
x = int(x)
else:
x = round(x, 2)
return f"{x:,}"
def generate_notification(alert, result):
left = __format_value(result['value'])
right = __format_value(alert['query']['right'])
return {
"alertId": alert["alertId"],
"tenantId": alert["tenantId"],
"title": alert["name"],
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
"buttonText": "Check metrics for more details",
"buttonUrl": f"/{alert['projectId']}/metrics",
"imageUrl": None,
"projectId": alert["projectId"],
"projectName": alert["projectName"],
"options": {"source": "ALERT", "sourceId": alert["alertId"],
"sourceMeta": alert["detectionMethod"],
"message": alert["options"]["message"], "projectId": alert["projectId"],
"data": {"title": alert["name"],
"limitValue": alert["query"]["right"],
"actualValue": float(result["value"]) \
if isinstance(result["value"], decimal.Decimal) \
else result["value"],
"operator": alert["query"]["operator"],
"trigger": alert["query"]["left"],
"alertId": alert["alertId"],
"detectionMethod": alert["detectionMethod"],
"currentPeriod": alert["options"]["currentPeriod"],
"previousPeriod": alert["options"]["previousPeriod"],
"createdAt": TimeUTC.now()}},
}

View file

@ -1,4 +1,3 @@
import logging
from os import access, R_OK
from os.path import exists as path_exists, getsize
@ -11,10 +10,22 @@ import schemas
from chalicelib.core import projects
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
ASSIST_KEY = config("ASSIST_KEY")
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
SESSION_PROJECTION_COLS = """s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_device_type,
s.user_country,
s.start_ts,
s.user_anonymous_id,
s.platform
"""
def get_live_sessions_ws_user_id(project_id, user_id):
@ -55,21 +66,21 @@ def __get_live_sessions_ws(project_id, data):
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
json=data, timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
logger.error(results.text)
print(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
print(results.text)
return {"total": 0, "sessions": []}
live_peers = results.json().get("data", [])
except requests.exceptions.Timeout:
logger.error("!! Timeout getting Assist response")
print("!! Timeout getting Assist response")
live_peers = {"total": 0, "sessions": []}
except Exception as e:
logger.error("!! Issue getting Live-Assist response")
logger.exception(e)
logger.error("expected JSON, received:")
print("!! Issue getting Live-Assist response")
print(str(e))
print("expected JSON, received:")
try:
logger.error(results.text)
print(results.text)
except:
logger.error("couldn't get response")
print("couldn't get response")
live_peers = {"total": 0, "sessions": []}
_live_peers = live_peers
if "sessions" in live_peers:
@ -95,7 +106,7 @@ def __get_agent_token(project_id, project_key, session_id):
"aud": f"openreplay:agent"
},
key=config("ASSIST_JWT_SECRET"),
algorithm=config("JWT_ALGORITHM")
algorithm=config("jwt_algorithm")
)
@ -105,8 +116,8 @@ def get_live_session_by_id(project_id, session_id):
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
logger.error(results.text)
print(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
print(results.text)
return None
results = results.json().get("data")
if results is None:
@ -114,16 +125,16 @@ def get_live_session_by_id(project_id, session_id):
results["live"] = True
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
except requests.exceptions.Timeout:
logger.error("!! Timeout getting Assist response")
print("!! Timeout getting Assist response")
return None
except Exception as e:
logger.error("!! Issue getting Assist response")
logger.exception(e)
logger.error("expected JSON, received:")
print("!! Issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
logger.error(results.text)
print(results.text)
except:
logger.error("couldn't get response")
print("couldn't get response")
return None
return results
@ -135,21 +146,21 @@ def is_live(project_id, session_id, project_key=None):
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
logger.error(results.text)
print(f"!! issue with the peer-server code:{results.status_code} for is_live")
print(results.text)
return False
results = results.json().get("data")
except requests.exceptions.Timeout:
logger.error("!! Timeout getting Assist response")
print("!! Timeout getting Assist response")
return False
except Exception as e:
logger.error("!! Issue getting Assist response")
logger.exception(e)
logger.error("expected JSON, received:")
print("!! Issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
logger.error(results.text)
print(results.text)
except:
logger.error("couldn't get response")
print("couldn't get response")
return False
return str(session_id) == results
@ -164,27 +175,32 @@ def autocomplete(project_id, q: str, key: str = None):
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
params=params, timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
logger.error(results.text)
print(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
print(results.text)
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
results = results.json().get("data", [])
except requests.exceptions.Timeout:
logger.error("!! Timeout getting Assist response")
print("!! Timeout getting Assist response")
return {"errors": ["Assist request timeout"]}
except Exception as e:
logger.error("!! Issue getting Assist response")
logger.exception(e)
logger.error("expected JSON, received:")
print("!! Issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
logger.error(results.text)
print(results.text)
except:
logger.error("couldn't get response")
print("couldn't get response")
return {"errors": ["Something went wrong wile calling assist"]}
for r in results:
r["type"] = __change_keys(r["type"])
return {"data": results}
def get_ice_servers():
return config("iceServers") if config("iceServers", default=None) is not None \
and len(config("iceServers")) > 0 else None
def __get_efs_path():
efs_path = config("FS_DIR")
if not path_exists(efs_path):
@ -242,24 +258,24 @@ def session_exists(project_id, session_id):
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
logger.error(results.text)
print(f"!! issue with the peer-server code:{results.status_code} for session_exists")
print(results.text)
return None
results = results.json().get("data")
if results is None:
return False
return True
except requests.exceptions.Timeout:
logger.error("!! Timeout getting Assist response")
print("!! Timeout getting Assist response")
return False
except Exception as e:
logger.error("!! Issue getting Assist response")
logger.exception(e)
logger.error("expected JSON, received:")
print("!! Issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
logger.error(results.text)
print(results.text)
except:
logger.error("couldn't get response")
print("couldn't get response")
return False

View file

@ -20,7 +20,7 @@ def is_spot_token(token: str) -> bool:
audience = decoded_token.get("aud")
return audience == spot.AUDIENCE
except jwt.InvalidTokenError:
logger.error(f"Invalid token for is_spot_token: {token}")
logger.error(f"Invalid token: {token}")
raise
@ -29,15 +29,16 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
return None
try:
payload = jwt.decode(jwt=token,
key=config("JWT_SECRET") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
algorithms=config("JWT_ALGORITHM"),
key=config("jwt_secret") if not is_spot_token(token) else config("JWT_SPOT_SECRET"),
algorithms=config("jwt_algorithm"),
audience=get_supported_audience(),
leeway=leeway)
except jwt.ExpiredSignatureError:
logger.debug("! JWT Expired signature")
return None
except BaseException as e:
logger.warning("! JWT Base Exception", exc_info=e)
logger.warning("! JWT Base Exception")
logger.debug(e)
return None
return payload
@ -48,14 +49,15 @@ def jwt_refresh_authorizer(scheme: str, token: str):
try:
payload = jwt.decode(jwt=token,
key=config("JWT_REFRESH_SECRET") if not is_spot_token(token) \
else config("JWT_SPOT_REFRESH_SECRET"),
algorithms=config("JWT_ALGORITHM"),
else config("JWT_SPOT_SECRET"),
algorithms=config("jwt_algorithm"),
audience=get_supported_audience())
except jwt.ExpiredSignatureError:
logger.debug("! JWT-refresh Expired signature")
return None
except BaseException as e:
logger.error("! JWT-refresh Base Exception", exc_info=e)
logger.warning("! JWT-refresh Base Exception")
logger.debug(e)
return None
return payload
@ -71,8 +73,8 @@ def generate_jwt(user_id, tenant_id, iat, aud, for_spot=False):
"iat": iat,
"aud": aud
},
key=config("JWT_SECRET") if not for_spot else config("JWT_SPOT_SECRET"),
algorithm=config("JWT_ALGORITHM")
key=config("jwt_secret") if not for_spot else config("JWT_SPOT_SECRET"),
algorithm=config("jwt_algorithm")
)
return token
@ -90,7 +92,7 @@ def generate_jwt_refresh(user_id, tenant_id, iat, aud, jwt_jti, for_spot=False):
"jti": jwt_jti
},
key=config("JWT_REFRESH_SECRET") if not for_spot else config("JWT_SPOT_REFRESH_SECRET"),
algorithm=config("JWT_ALGORITHM")
algorithm=config("jwt_algorithm")
)
return token

View file

@ -61,11 +61,11 @@ def __get_autocomplete_table(value, project_id):
try:
cur.execute(query)
except Exception as err:
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
logger.exception(query.decode('UTF-8'))
logger.exception("--------- VALUE -----------")
logger.exception(value)
logger.exception("--------------------")
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(query.decode('UTF-8'))
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
results = cur.fetchall()
for r in results:
@ -85,8 +85,7 @@ def __generic_query(typename, value_length=None):
ORDER BY value"""
if value_length is None or value_length > 2:
return f"""SELECT DISTINCT ON(value,type) value, type
((SELECT DISTINCT value, type
return f"""(SELECT DISTINCT value, type
FROM {TABLE}
WHERE
project_id = %(project_id)s
@ -102,7 +101,7 @@ def __generic_query(typename, value_length=None):
AND type='{typename.upper()}'
AND value ILIKE %(value)s
ORDER BY value
LIMIT 5)) AS raw;"""
LIMIT 5);"""
return f"""SELECT DISTINCT value, type
FROM {TABLE}
WHERE
@ -125,7 +124,7 @@ def __generic_autocomplete(event: Event):
return f
def generic_autocomplete_metas(typename):
def __generic_autocomplete_metas(typename):
def f(project_id, text):
with pg_client.PostgresClient() as cur:
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
@ -327,7 +326,7 @@ def __search_metadata(project_id, value, key=None, source=None):
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
with pg_client.PostgresClient() as cur:
cur.execute(cur.mogrify(f"""\
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
SELECT key, value, 'METADATA' AS TYPE
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)}))

View file

@ -1,8 +1,7 @@
from chalicelib.core import projects
from chalicelib.core import users
from chalicelib.core.log_tools import datadog, stackdriver, sentry
from chalicelib.core.modules import TENANT_CONDITION
from chalicelib.utils import pg_client
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
from chalicelib.core import users
def get_state(tenant_id):
@ -13,61 +12,47 @@ def get_state(tenant_id):
if len(pids) > 0:
cur.execute(
cur.mogrify(
"""SELECT EXISTS(( SELECT 1
cur.mogrify("""SELECT EXISTS(( SELECT 1
FROM public.sessions AS s
WHERE s.project_id IN %(ids)s)) AS exists;""",
{"ids": tuple(pids)},
)
{"ids": tuple(pids)})
)
recorded = cur.fetchone()["exists"]
meta = False
if recorded:
query = cur.mogrify(
f"""SELECT EXISTS((SELECT 1
cur.execute("""SELECT EXISTS((SELECT 1
FROM public.projects AS p
LEFT JOIN LATERAL ( SELECT 1
FROM public.sessions
WHERE sessions.project_id = p.project_id
AND sessions.user_id IS NOT NULL
LIMIT 1) AS sessions(user_id) ON (TRUE)
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
WHERE p.deleted_at ISNULL
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
OR p.metadata_10 IS NOT NULL )
)) AS exists;""",
{"tenant_id": tenant_id},
)
cur.execute(query)
)) AS exists;""")
meta = cur.fetchone()["exists"]
return [
{
"task": "Install OpenReplay",
"done": recorded,
"URL": "https://docs.openreplay.com/getting-started/quick-start",
},
{
"task": "Identify Users",
"done": meta,
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
},
{
"task": "Invite Team Members",
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
"URL": "https://app.openreplay.com/client/manage-users",
},
{
"task": "Integrations",
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
or len(sentry.get_all(tenant_id=tenant_id)) > 0
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://docs.openreplay.com/integrations",
},
{"task": "Install OpenReplay",
"done": recorded,
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
{"task": "Identify Users",
"done": meta,
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
{"task": "Invite Team Members",
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
"URL": "https://app.openreplay.com/client/manage-users"},
{"task": "Integrations",
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://docs.openreplay.com/integrations"}
]
@ -78,66 +63,52 @@ def get_state_installing(tenant_id):
if len(pids) > 0:
cur.execute(
cur.mogrify(
"""SELECT EXISTS(( SELECT 1
cur.mogrify("""SELECT EXISTS(( SELECT 1
FROM public.sessions AS s
WHERE s.project_id IN %(ids)s)) AS exists;""",
{"ids": tuple(pids)},
)
{"ids": tuple(pids)})
)
recorded = cur.fetchone()["exists"]
return {
"task": "Install OpenReplay",
"done": recorded,
"URL": "https://docs.openreplay.com/getting-started/quick-start",
}
return {"task": "Install OpenReplay",
"done": recorded,
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
def get_state_identify_users(tenant_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT EXISTS((SELECT 1
cur.execute("""SELECT EXISTS((SELECT 1
FROM public.projects AS p
LEFT JOIN LATERAL ( SELECT 1
FROM public.sessions
WHERE sessions.project_id = p.project_id
AND sessions.user_id IS NOT NULL
LIMIT 1) AS sessions(user_id) ON (TRUE)
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
WHERE p.deleted_at ISNULL
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
OR p.metadata_10 IS NOT NULL )
)) AS exists;""",
{"tenant_id": tenant_id},
)
cur.execute(query)
)) AS exists;""")
meta = cur.fetchone()["exists"]
return {
"task": "Identify Users",
"done": meta,
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
}
return {"task": "Identify Users",
"done": meta,
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
def get_state_manage_users(tenant_id):
return {
"task": "Invite Team Members",
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
"URL": "https://app.openreplay.com/client/manage-users",
}
return {"task": "Invite Team Members",
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
"URL": "https://app.openreplay.com/client/manage-users"}
def get_state_integrations(tenant_id):
return {
"task": "Integrations",
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
or len(sentry.get_all(tenant_id=tenant_id)) > 0
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://docs.openreplay.com/integrations",
}
return {"task": "Integrations",
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://docs.openreplay.com/integrations"}

View file

@ -1,4 +1,4 @@
from chalicelib.utils import pg_client
from chalicelib.utils import pg_client, helper
from chalicelib.utils.storage import StorageClient
from decouple import config

View file

@ -6,7 +6,7 @@ from fastapi import HTTPException, status
import schemas
from chalicelib.core import webhook
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
from chalicelib.core.collaboration_base import BaseCollaboration
logger = logging.getLogger(__name__)
@ -26,23 +26,17 @@ class MSTeams(BaseCollaboration):
@classmethod
def say_hello(cls, url):
try:
r = requests.post(
url=url,
json={
"@type": "MessageCard",
"@context": "https://schema.org/extensions",
"summary": "Welcome to OpenReplay",
"title": "Welcome to OpenReplay"
},
timeout=3)
if r.status_code != 200:
logger.warning("MSTeams integration failed")
logger.warning(r.text)
return False
except Exception as e:
logger.warning("!!! MSTeams integration failed")
logger.exception(e)
r = requests.post(
url=url,
json={
"@type": "MessageCard",
"@context": "https://schema.org/extensions",
"summary": "Welcome to OpenReplay",
"title": "Welcome to OpenReplay"
})
if r.status_code != 200:
logger.warning("MSTeams integration failed")
logger.warning(r.text)
return False
return True

View file

@ -6,7 +6,7 @@ from fastapi import HTTPException, status
import schemas
from chalicelib.core import webhook
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
from chalicelib.core.collaboration_base import BaseCollaboration
class Slack(BaseCollaboration):

View file

@ -1 +0,0 @@
from . import collaboration_base as _

View file

@ -1,17 +1,32 @@
import json
import logging
from decouple import config
from fastapi import HTTPException, status
import schemas
from chalicelib.core import issues
from chalicelib.core.errors import errors
from chalicelib.core.metrics import heatmaps, product_analytics, funnels
from chalicelib.core.sessions import sessions, sessions_search
from chalicelib.core import sessions, funnels, errors, issues, heatmaps, sessions_mobs, product_analytics, \
custom_metrics_predefined
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.storage import StorageClient
logger = logging.getLogger(__name__)
PIE_CHART_GROUP = 5
# TODO: refactor this to split
# timeseries /
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
# remove "table of" calls from this function
def __try_live(project_id, data: schemas.CardSchema):
results = []
for i, s in enumerate(data.series):
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
view_type=data.view_type, metric_type=data.metric_type,
metric_of=data.metric_of, metric_value=data.metric_value))
return results
def __get_table_of_series(project_id, data: schemas.CardSchema):
@ -24,66 +39,60 @@ def __get_table_of_series(project_id, data: schemas.CardSchema):
return results
def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel, user_id: int = None):
def __get_funnel_chart(project_id: int, data: schemas.CardFunnel, user_id: int = None):
if len(data.series) == 0:
return {
"stages": [],
"totalDropDueToIssues": 0
}
return funnels.get_simple_funnel(project=project,
data=data.series[0].filter,
metric_format=data.metric_format)
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
data=data.series[0].filter,
metric_format=data.metric_format)
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
def __get_errors_list(project_id, user_id, data: schemas.CardSchema):
if len(data.series) == 0:
return {
"total": 0,
"errors": []
}
return errors.search(data.series[0].filter, project=project, user_id=user_id)
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
def __get_sessions_list(project_id, user_id, data: schemas.CardSchema):
if len(data.series) == 0:
logger.debug("empty series")
return {
"total": 0,
"sessions": []
}
return sessions_search.search_sessions(data=data.series[0].filter, project=project, user_id=user_id)
return sessions.search_sessions(data=data.series[0].filter, project_id=project_id, user_id=user_id)
def get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
include_mobs: bool = True):
def __get_heat_map_chart(project_id, user_id, data: schemas.CardHeatMap, include_mobs: bool = True):
if len(data.series) == 0:
return None
data.series[0].filter.filters += data.series[0].filter.events
data.series[0].filter.events = []
return heatmaps.search_short_session(project_id=project.project_id, user_id=user_id,
return heatmaps.search_short_session(project_id=project_id, user_id=user_id,
data=schemas.HeatMapSessionsSearch(
**data.series[0].filter.model_dump()),
include_mobs=include_mobs)
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
def __get_path_analysis_chart(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
if len(data.series) == 0:
data.series.append(
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
data.series[0].filter = schemas.PathAnalysisSchema()
return product_analytics.path_analysis(project_id=project.project_id, data=data)
return product_analytics.path_analysis(project_id=project_id, data=data)
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
series_charts = []
for i, s in enumerate(data.series):
series_charts.append(sessions.search2_series(data=s.filter, project_id=project.project_id, density=data.density,
metric_type=data.metric_type, metric_of=data.metric_of,
metric_value=data.metric_value))
def __get_timeseries_chart(project_id: int, data: schemas.CardTimeSeries, user_id: int = None):
series_charts = __try_live(project_id=project_id, data=data)
results = [{}] * len(series_charts[0])
for i in range(len(results)):
for j, series_chart in enumerate(series_charts):
@ -96,47 +105,47 @@ def not_supported(**args):
raise Exception("not supported")
def __get_table_of_user_ids(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_user_ids(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_sessions(project: schemas.ProjectContext, data: schemas.CardTable, user_id):
return __get_sessions_list(project=project, user_id=user_id, data=data)
def __get_table_of_sessions(project_id: int, data: schemas.CardTable, user_id):
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
def __get_table_of_errors(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
return __get_errors_list(project=project, user_id=user_id, data=data)
def __get_table_of_errors(project_id: int, data: schemas.CardTable, user_id: int):
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
def __get_table_of_issues(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_issues(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_browsers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_browsers(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_devises(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_devises(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_countries(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_countries(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_urls(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_urls(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_referrers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_referrers(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_of_requests(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project.project_id, data=data)
def __get_table_of_requests(project_id: int, data: schemas.CardTable, user_id: int = None):
return __get_table_of_series(project_id=project_id, data=data)
def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
def __get_table_chart(project_id: int, data: schemas.CardTable, user_id: int):
supported = {
schemas.MetricOfTable.SESSIONS: __get_table_of_sessions,
schemas.MetricOfTable.ERRORS: __get_table_of_errors,
@ -149,32 +158,93 @@ def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable,
schemas.MetricOfTable.REFERRER: __get_table_of_referrers,
schemas.MetricOfTable.FETCH: __get_table_of_requests
}
return supported.get(data.metric_of, not_supported)(project=project, data=data, user_id=user_id)
return supported.get(data.metric_of, not_supported)(project_id=project_id, data=data, user_id=user_id)
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
def get_chart(project_id: int, data: schemas.CardSchema, user_id: int):
if data.is_predefined:
return custom_metrics_predefined.get_metric(key=data.metric_of,
project_id=project_id,
data=data.model_dump())
supported = {
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
schemas.MetricType.TABLE: __get_table_chart,
schemas.MetricType.HEAT_MAP: get_heat_map_chart,
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
schemas.MetricType.FUNNEL: __get_funnel_chart,
schemas.MetricType.INSIGHTS: not_supported,
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
}
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
def get_sessions_by_card_id(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
if not card_exists(metric_id=metric_id, project_id=project.project_id, user_id=user_id):
# def __merge_metric_with_data(metric: schemas.CardSchema,
# data: schemas.CardSessionsSchema) -> schemas.CardSchema:
# metric.startTimestamp = data.startTimestamp
# metric.endTimestamp = data.endTimestamp
# metric.page = data.page
# metric.limit = data.limit
# metric.density = data.density
# if data.series is not None and len(data.series) > 0:
# metric.series = data.series
#
# # if len(data.filters) > 0:
# # for s in metric.series:
# # s.filter.filters += data.filters
# # metric = schemas.CardSchema(**metric.model_dump(by_alias=True))
# return metric
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
# No need for this because UI is sending the full payload
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
# if card is None:
# return None
# metric: schemas.CardSchema = schemas.CardSchema(**card)
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
return None
results = []
for s in data.series:
results.append({"seriesId": s.series_id, "seriesName": s.name,
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results
def get_sessions(project: schemas.ProjectContext, user_id, data: schemas.CardSessionsSchema):
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
# No need for this because UI is sending the full payload
# raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
# if raw_metric is None:
# return None
# metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
# if metric is None:
# return None
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
return None
for s in data.series:
return {"seriesId": s.series_id, "seriesName": s.name,
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
def get_errors_list(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
# No need for this because UI is sending the full payload
# raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
# if raw_metric is None:
# return None
# metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
# if metric is None:
# return None
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
return None
for s in data.series:
return {"seriesId": s.series_id, "seriesName": s.name,
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
results = []
if len(data.series) == 0:
return results
@ -184,45 +254,85 @@ def get_sessions(project: schemas.ProjectContext, user_id, data: schemas.CardSes
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
results.append({"seriesId": None, "seriesName": s.name,
**sessions_search.search_sessions(data=s.filter, project=project, user_id=user_id)})
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
return results
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
def __get_funnel_issues(project_id: int, user_id: int, data: schemas.CardFunnel):
if len(data.series) == 0:
return []
data.series[0].filter.startTimestamp = data.startTimestamp
data.series[0].filter.endTimestamp = data.endTimestamp
data = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
return data
def __get_path_analysis_issues(project_id: int, user_id: int, data: schemas.CardPathAnalysis):
if len(data.filters) > 0 or len(data.series) > 0:
filters = [f.model_dump(by_alias=True) for f in data.filters] \
+ [f.model_dump(by_alias=True) for f in data.series[0].filter.filters]
else:
return []
search_data = schemas.SessionsSearchPayloadSchema(
startTimestamp=data.startTimestamp,
endTimestamp=data.endTimestamp,
limit=data.limit,
page=data.page,
filters=filters
)
# ---- To make issues response close to the chart response
search_data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
operator=schemas.MathOperator.GREATER,
value=[1]))
if len(data.start_point) == 0:
search_data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
operator=schemas.SearchEventOperator.IS_ANY,
value=[]))
# ---- End
for s in data.excludes:
search_data.events.append(schemas.SessionSearchEventSchema2(type=s.type,
operator=schemas.SearchEventOperator.NOT_ON,
value=s.value))
result = sessions.search_table_of_individual_issues(project_id=project_id, data=search_data)
return result
def get_issues(project_id: int, user_id: int, data: schemas.CardSchema):
if data.is_predefined:
return not_supported()
if data.metric_of == schemas.MetricOfTable.ISSUES:
return __get_table_of_issues(project=project, user_id=user_id, data=data)
return __get_table_of_issues(project_id=project_id, user_id=user_id, data=data)
supported = {
schemas.MetricType.TIMESERIES: not_supported,
schemas.MetricType.TABLE: not_supported,
schemas.MetricType.HEAT_MAP: not_supported,
schemas.MetricType.PATH_ANALYSIS: not_supported,
schemas.MetricType.FUNNEL: __get_funnel_issues,
schemas.MetricType.INSIGHTS: not_supported,
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_issues,
}
return supported.get(data.metric_type, not_supported)()
return supported.get(data.metric_type, not_supported)(project_id=project_id, data=data, user_id=user_id)
def get_global_card_info(data: schemas.CardSchema):
r = {"hideExcess": data.hide_excess, "compareTo": data.compare_to, "rows": data.rows}
return r
def get_path_analysis_card_info(data: schemas.CardPathAnalysis):
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
r = {"start_point": [s.model_dump() for s in data.start_point],
"start_type": data.start_type,
"excludes": [e.model_dump() for e in data.excludes],
"rows": data.rows}
"hideExcess": data.hide_excess}
return r
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
def create_card(project_id, user_id, data: schemas.CardSchema, dashboard=False):
with pg_client.PostgresClient() as cur:
session_data = None
if data.metric_type == schemas.MetricType.HEAT_MAP:
if data.session_id is not None:
session_data = {"sessionId": data.session_id}
else:
session_data = get_heat_map_chart(project=project, user_id=user_id,
data=data, include_mobs=False)
session_data = __get_heat_map_chart(project_id=project_id, user_id=user_id,
data=data, include_mobs=False)
if session_data is not None:
session_data = {"sessionId": session_data["sessionId"]}
@ -233,12 +343,11 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
_data[f"index_{i}"] = i
_data[f"filter_{i}"] = s.filter.json()
series_len = len(data.series)
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
params["card_info"] = get_global_card_info(data=data)
params = {"user_id": user_id, "project_id": project_id, **data.model_dump(), **_data}
params["default_config"] = json.dumps(data.default_config.model_dump())
params["card_info"] = None
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
params["card_info"] = json.dumps(params["card_info"])
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
view_type, metric_type, metric_of, metric_value,
@ -261,7 +370,7 @@ def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSche
r = cur.fetchone()
if dashboard:
return r["metric_id"]
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
return {"data": get_card(metric_id=r["metric_id"], project_id=project_id, user_id=user_id)}
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
@ -298,18 +407,16 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
if i not in u_series_ids:
d_series_ids.append(i)
params["d_series_ids"] = tuple(d_series_ids)
params["card_info"] = None
params["session_data"] = json.dumps(metric["data"])
params["card_info"] = get_global_card_info(data=data)
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
params["card_info"] = {**params["card_info"], **get_path_analysis_card_info(data=data)}
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
elif data.metric_type == schemas.MetricType.HEAT_MAP:
if data.session_id is not None:
params["session_data"] = json.dumps({"sessionId": data.session_id})
elif metric.get("data") and metric["data"].get("sessionId"):
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
params["card_info"] = json.dumps(params["card_info"])
with pg_client.PostgresClient() as cur:
sub_queries = []
if len(n_series) > 0:
@ -352,100 +459,6 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
params = {
"project_id": project_id,
"user_id": user_id,
"offset": (data.page - 1) * data.limit,
"limit": data.limit,
}
if data.mine_only:
constraints.append("user_id = %(user_id)s")
else:
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
if data.shared_only:
constraints.append("is_public")
if data.filter is not None:
if data.filter.type:
constraints.append("metrics.metric_type = %(filter_type)s")
params["filter_type"] = data.filter.type
if data.filter.query and len(data.filter.query) > 0:
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
params["filter_query"] = helper.values_for_operator(
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
)
with pg_client.PostgresClient() as cur:
sub_join = ""
if include_series:
sub_join = """LEFT JOIN LATERAL (
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
WHERE metric_series.metric_id = metrics.metric_id
AND metric_series.deleted_at ISNULL
) AS metric_series ON (TRUE)"""
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
else "created_at"
# change ascend to asc and descend to desc
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
if sort_order == "ascend":
sort_order = "asc"
elif sort_order == "descend":
sort_order = "desc"
query = cur.mogrify(
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
dashboards, owner_email, owner_name, default_config AS config, thumbnail
FROM metrics
{sub_join}
LEFT JOIN LATERAL (
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
FROM (
SELECT DISTINCT dashboard_id, name, is_public
FROM dashboards
INNER JOIN dashboard_widgets USING (dashboard_id)
WHERE deleted_at ISNULL
AND dashboard_widgets.metric_id = metrics.metric_id
AND project_id = %(project_id)s
AND ((dashboards.user_id = %(user_id)s OR is_public))
) AS connected_dashboards
) AS connected_dashboards ON (TRUE)
LEFT JOIN LATERAL (
SELECT email AS owner_email, name AS owner_name
FROM users
WHERE deleted_at ISNULL
AND users.user_id = metrics.user_id
) AS owner ON (TRUE)
WHERE {" AND ".join(constraints)}
ORDER BY {sort_column} {sort_order}
LIMIT %(limit)s OFFSET %(offset)s;""",
params
)
cur.execute(query)
rows = cur.fetchall()
if len(rows) > 0:
total = rows[0]["total"]
if include_series:
for r in rows:
r.pop("total")
for s in r.get("series", []):
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
else:
for r in rows:
r.pop("total")
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
rows = helper.list_to_camel_case(rows)
else:
total = 0
return {"total": total, "list": rows}
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
constraints = ["metrics.project_id = %(project_id)s",
"metrics.deleted_at ISNULL"]
@ -538,16 +551,8 @@ def delete_card(project_id, metric_id, user_id):
return {"state": "success"}
def __get_global_attributes(row):
if row is None or row.get("cardInfo") is None:
return row
card_info = row.get("cardInfo", {})
row["compareTo"] = card_info["compareTo"] if card_info.get("compareTo") is not None else []
return row
def __get_path_analysis_attributes(row):
card_info = row.get("cardInfo", {})
card_info = row.pop("cardInfo")
row["excludes"] = card_info.get("excludes", [])
row["startPoint"] = card_info.get("startPoint", [])
row["startType"] = card_info.get("startType", "start")
@ -600,8 +605,6 @@ def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data:
row = helper.dict_to_camel_case(row)
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
row = __get_path_analysis_attributes(row=row)
row = __get_global_attributes(row=row)
row.pop("cardInfo")
return row
@ -643,7 +646,17 @@ def change_state(project_id, metric_id, user_id, status):
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
data: schemas.CardSessionsSchema):
data: schemas.CardSessionsSchema
# , range_value=None, start_date=None, end_date=None
):
# No need for this because UI is sending the full payload
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
# if card is None:
# return None
# metric: schemas.CardSchema = schemas.CardSchema(**card)
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
# if metric is None:
# return None
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
return None
for s in data.series:
@ -674,8 +687,8 @@ def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
"issue": issue}
def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
raw_metric: dict = get_card(metric_id=metric_id, project_id=project.project_id, user_id=user_id, include_data=True)
def make_chart_from_card(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
raw_metric: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, include_data=True)
if raw_metric is None:
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
@ -685,16 +698,20 @@ def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, da
raw_metric["density"] = data.density
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
if metric.metric_type == schemas.MetricType.HEAT_MAP:
if metric.is_predefined:
return custom_metrics_predefined.get_metric(key=metric.metric_of,
project_id=project_id,
data=data.model_dump())
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
return heatmaps.get_selected_session(project_id=project.project_id,
return heatmaps.get_selected_session(project_id=project_id,
session_id=raw_metric["data"]["sessionId"])
else:
return heatmaps.search_short_session(project_id=project.project_id,
return heatmaps.search_short_session(project_id=project_id,
data=schemas.HeatMapSessionsSearch(**metric.model_dump()),
user_id=user_id)
return get_chart(project=project, data=metric, user_id=user_id)
return get_chart(project_id=project_id, data=metric, user_id=user_id)
def card_exists(metric_id, project_id, user_id) -> bool:

View file

@ -0,0 +1,59 @@
import logging
from typing import Union
import schemas
from chalicelib.core import metrics
logger = logging.getLogger(__name__)
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors, \
schemas.MetricOfPerformance, schemas.MetricOfResources], project_id: int, data: dict):
supported = {schemas.MetricOfWebVitals.COUNT_SESSIONS: metrics.get_processed_sessions,
schemas.MetricOfWebVitals.AVG_IMAGE_LOAD_TIME: metrics.get_application_activity_avg_image_load_time,
schemas.MetricOfWebVitals.AVG_PAGE_LOAD_TIME: metrics.get_application_activity_avg_page_load_time,
schemas.MetricOfWebVitals.AVG_REQUEST_LOAD_TIME: metrics.get_application_activity_avg_request_load_time,
schemas.MetricOfWebVitals.AVG_DOM_CONTENT_LOAD_START: metrics.get_page_metrics_avg_dom_content_load_start,
schemas.MetricOfWebVitals.AVG_FIRST_CONTENTFUL_PIXEL: metrics.get_page_metrics_avg_first_contentful_pixel,
schemas.MetricOfWebVitals.AVG_VISITED_PAGES: metrics.get_user_activity_avg_visited_pages,
schemas.MetricOfWebVitals.AVG_SESSION_DURATION: metrics.get_user_activity_avg_session_duration,
schemas.MetricOfWebVitals.AVG_PAGES_DOM_BUILDTIME: metrics.get_pages_dom_build_time,
schemas.MetricOfWebVitals.AVG_PAGES_RESPONSE_TIME: metrics.get_pages_response_time,
schemas.MetricOfWebVitals.AVG_RESPONSE_TIME: metrics.get_top_metrics_avg_response_time,
schemas.MetricOfWebVitals.AVG_FIRST_PAINT: metrics.get_top_metrics_avg_first_paint,
schemas.MetricOfWebVitals.AVG_DOM_CONTENT_LOADED: metrics.get_top_metrics_avg_dom_content_loaded,
schemas.MetricOfWebVitals.AVG_TILL_FIRST_BYTE: metrics.get_top_metrics_avg_till_first_bit,
schemas.MetricOfWebVitals.AVG_TIME_TO_INTERACTIVE: metrics.get_top_metrics_avg_time_to_interactive,
schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests,
schemas.MetricOfWebVitals.AVG_TIME_TO_RENDER: metrics.get_time_to_render,
schemas.MetricOfWebVitals.AVG_USED_JS_HEAP_SIZE: metrics.get_memory_consumption,
schemas.MetricOfWebVitals.AVG_CPU: metrics.get_avg_cpu,
schemas.MetricOfWebVitals.AVG_FPS: metrics.get_avg_fps,
schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors,
schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx,
schemas.MetricOfErrors.DOMAINS_ERRORS_5XX: metrics.get_domains_errors_5xx,
schemas.MetricOfErrors.ERRORS_PER_DOMAINS: metrics.get_errors_per_domains,
schemas.MetricOfErrors.CALLS_ERRORS: metrics.get_calls_errors,
schemas.MetricOfErrors.ERRORS_PER_TYPE: metrics.get_errors_per_type,
schemas.MetricOfErrors.RESOURCES_BY_PARTY: metrics.get_resources_by_party,
schemas.MetricOfPerformance.SPEED_LOCATION: metrics.get_speed_index_location,
schemas.MetricOfPerformance.SLOWEST_DOMAINS: metrics.get_slowest_domains,
schemas.MetricOfPerformance.SESSIONS_PER_BROWSER: metrics.get_sessions_per_browser,
schemas.MetricOfPerformance.TIME_TO_RENDER: metrics.get_time_to_render,
schemas.MetricOfPerformance.IMPACTED_SESSIONS_BY_SLOW_PAGES: metrics.get_impacted_sessions_by_slow_pages,
schemas.MetricOfPerformance.MEMORY_CONSUMPTION: metrics.get_memory_consumption,
schemas.MetricOfPerformance.CPU: metrics.get_avg_cpu,
schemas.MetricOfPerformance.FPS: metrics.get_avg_fps,
schemas.MetricOfPerformance.CRASHES: metrics.get_crashes,
schemas.MetricOfPerformance.RESOURCES_VS_VISUALLY_COMPLETE: metrics.get_resources_vs_visually_complete,
schemas.MetricOfPerformance.PAGES_DOM_BUILDTIME: metrics.get_pages_dom_build_time,
schemas.MetricOfPerformance.PAGES_RESPONSE_TIME: metrics.get_pages_response_time,
schemas.MetricOfPerformance.PAGES_RESPONSE_TIME_DISTRIBUTION: metrics.get_pages_response_time_distribution,
schemas.MetricOfResources.MISSING_RESOURCES: metrics.get_missing_resources_trend,
schemas.MetricOfResources.SLOWEST_RESOURCES: metrics.get_slowest_resources,
schemas.MetricOfResources.RESOURCES_LOADING_TIME: metrics.get_resources_loading_time,
schemas.MetricOfResources.RESOURCE_TYPE_VS_RESPONSE_END: metrics.resource_type_vs_response_end,
schemas.MetricOfResources.RESOURCES_COUNT_BY_TYPE: metrics.get_resources_count_by_type,
schemas.MetricOfWebVitals.COUNT_USERS: metrics.get_unique_users, }
return supported.get(key, lambda *args: None)(project_id=project_id, **data)

View file

@ -1,7 +1,7 @@
import json
import schemas
from chalicelib.core.metrics import custom_metrics
from chalicelib.core import custom_metrics
from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
@ -149,6 +149,30 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
return helper.dict_to_camel_case(row)
def get_widget(project_id, user_id, dashboard_id, widget_id):
with pg_client.PostgresClient() as cur:
pg_query = """SELECT metrics.*, metric_series.series
FROM dashboard_widgets
INNER JOIN dashboards USING (dashboard_id)
INNER JOIN metrics USING (metric_id)
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index), '[]'::jsonb) AS series
FROM metric_series
WHERE metric_series.metric_id = metrics.metric_id
AND metric_series.deleted_at ISNULL
) AS metric_series ON (TRUE)
WHERE dashboard_id = %(dashboard_id)s
AND widget_id = %(widget_id)s
AND (dashboards.is_public OR dashboards.user_id = %(userId)s)
AND dashboards.deleted_at IS NULL
AND metrics.deleted_at ISNULL
AND (metrics.project_id = %(projectId)s OR metrics.project_id ISNULL)
AND (metrics.is_public OR metrics.user_id = %(userId)s);"""
params = {"userId": user_id, "projectId": project_id, "dashboard_id": dashboard_id, "widget_id": widget_id}
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
return helper.dict_to_camel_case(row)
def add_widget(project_id, user_id, dashboard_id, data: schemas.AddWidgetToDashboardPayloadSchema):
with pg_client.PostgresClient() as cur:
pg_query = """INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
@ -204,9 +228,9 @@ def pin_dashboard(project_id, user_id, dashboard_id):
return helper.dict_to_camel_case(row)
def create_metric_add_widget(project: schemas.ProjectContext, user_id, dashboard_id, data: schemas.CardSchema):
metric_id = custom_metrics.create_card(project=project, user_id=user_id, data=data, dashboard=True)
return add_widget(project_id=project.project_id, user_id=user_id, dashboard_id=dashboard_id,
def create_metric_add_widget(project_id, user_id, dashboard_id, data: schemas.CardSchema):
metric_id = custom_metrics.create_card(project_id=project_id, user_id=user_id, data=data, dashboard=True)
return add_widget(project_id=project_id, user_id=user_id, dashboard_id=dashboard_id,
data=schemas.AddWidgetToDashboardPayloadSchema(metricId=metric_id))
# def make_chart_widget(dashboard_id, project_id, user_id, widget_id, data: schemas.CardChartSchema):

View file

@ -1,8 +1,5 @@
import logging
from chalicelib.utils import pg_client
logger = logging.getLogger(__name__)
from chalicelib.utils import helper, pg_client
class DatabaseRequestHandler:

View file

@ -0,0 +1,714 @@
import json
import schemas
from chalicelib.core import sourcemaps, sessions
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import __get_step_size
def get(error_id, family=False):
if family:
return get_batch([error_id])
with pg_client.PostgresClient() as cur:
# trying: return only 1 error, without event details
query = cur.mogrify(
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
{"error_id": error_id})
cur.execute(query=query)
result = cur.fetchone()
if result is not None:
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
return helper.dict_to_camel_case(result)
def get_batch(error_ids):
if len(error_ids) == 0:
return []
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""
WITH RECURSIVE error_family AS (
SELECT *
FROM public.errors
WHERE error_id IN %(error_ids)s
UNION
SELECT child_errors.*
FROM public.errors AS child_errors
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
)
SELECT *
FROM error_family;""",
{"error_ids": tuple(error_ids)})
cur.execute(query=query)
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
return helper.list_to_camel_case(errors)
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
endTime_arg_name="endDate30", project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = __get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30,
custom_tags
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}
def get_details_chart(project_id, error_id, user_id, **data):
pg_sub_query = __get_basic_constraints()
pg_sub_query.append("error_id = %(error_id)s")
pg_sub_query_chart = __get_basic_constraints(time_constraint=False, chart=True)
pg_sub_query_chart.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
if data.get("startDate") is None:
data["startDate"] = TimeUTC.now(-7)
else:
data["startDate"] = int(data["startDate"])
if data.get("endDate") is None:
data["endDate"] = TimeUTC.now()
else:
data["endDate"] = int(data["endDate"])
density = int(data.get("density", 7))
step_size = __get_step_size(data["startDate"], data["endDate"], density, factor=1)
params = {
"startDate": data['startDate'],
"endDate": data['endDate'],
"project_id": project_id,
"userId": user_id,
"step_size": step_size,
"error_id": error_id}
main_pg_query = f"""\
SELECT %(error_id)s AS error_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart
FROM (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS count_per_version_details) AS browesr_version_details
ON (TRUE)) AS browser_details) AS browser_details
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_query) AS partition
FROM (SELECT COALESCE(user_os_version, 'unknown') AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_query
) AS os_version_query ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
AND user_device_type = count_per_device_details.name
GROUP BY user_device_type, user_device
ORDER BY count DESC) AS count_per_device_details
) AS device_version_details ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query_chart)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details ON (TRUE);"""
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
return {"data": helper.dict_to_camel_case(row)}
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
project_key="project_id"):
if project_key is None:
ch_sub_query = []
else:
ch_sub_query = [f"{project_key} =%(project_id)s"]
if time_constraint:
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
f"timestamp < %({endTime_arg_name})s"]
if chart:
ch_sub_query += [f"timestamp >= generated_timestamp",
f"timestamp < generated_timestamp + %({step_size_name})s"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_sort_key(key):
return {
schemas.ErrorSort.OCCURRENCE: "max_datetime",
schemas.ErrorSort.USERS_COUNT: "users",
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
}.get(key, 'max_datetime')
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
empty_response = {
'total': 0,
'errors': []
}
platform = None
for f in data.filters:
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
platform = f.value[0]
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"]
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
if platform:
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
if data.startTimestamp is None:
data.startTimestamp = TimeUTC.now(-30)
if data.endTimestamp is None:
data.endTimestamp = TimeUTC.now(1)
if len(data.events) > 0 or len(data.filters) > 0:
print("-- searching for sessions before errors")
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
error_status=data.status)
if len(statuses) == 0:
return empty_response
error_ids = [e["errorId"] for e in statuses]
with pg_client.PostgresClient() as cur:
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = schemas.SortOrderType.DESC
if data.order is not None:
order = data.order
extra_join = ""
params = {
"startDate": data.startTimestamp,
"endDate": data.endTimestamp,
"project_id": project_id,
"userId": user_id,
"step_size": step_size}
if data.status != schemas.ErrorStatus.ALL:
pg_sub_query.append("status = %(error_status)s")
params["error_status"] = data.status
if data.limit is not None and data.page is not None:
params["errors_offset"] = (data.page - 1) * data.limit
params["errors_limit"] = data.limit
else:
params["errors_offset"] = 0
params["errors_limit"] = 200
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
pg_sub_query.append("error_id IN %(error_ids)s")
# if data.bookmarked:
# pg_sub_query.append("ufe.user_id = %(userId)s")
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
if data.query is not None and len(data.query) > 0:
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
params["error_query"] = helper.values_for_operator(value=data.query,
op=schemas.SearchEventOperator.CONTAINS)
main_pg_query = f"""SELECT full_count,
error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
chart
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(timestamp) AS max_datetime,
MIN(timestamp) AS min_datetime
FROM events.errors
INNER JOIN public.errors AS pe USING (error_id)
INNER JOIN public.sessions USING (session_id)
{extra_join}
WHERE {" AND ".join(pg_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}) AS details
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
) AS details
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if total == 0:
rows = []
else:
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE errors.error_id = ve.error_id
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
"user_id": user_id})
cur.execute(query=query)
statuses = helper.list_to_camel_case(cur.fetchall())
statuses = {
s["errorId"]: s for s in statuses
}
for r in rows:
r.pop("full_count")
if r["error_id"] in statuses:
r["viewed"] = statuses[r["error_id"]]["viewed"]
else:
r["viewed"] = False
return {
'total': total,
'errors': helper.list_to_camel_case(rows)
}
def __save_stacktrace(error_id, data):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
WHERE error_id = %(error_id)s;""",
{"error_id": error_id, "data": json.dumps(data)})
cur.execute(query=query)
def get_trace(project_id, error_id):
error = get(error_id=error_id, family=False)
if error is None:
return {"errors": ["error not found"]}
if error.get("source", "") != "js_exception":
return {"errors": ["this source of errors doesn't have a sourcemap"]}
if error.get("payload") is None:
return {"errors": ["null payload"]}
if error.get("stacktrace") is not None:
return {"sourcemapUploaded": True,
"trace": error.get("stacktrace"),
"preparsed": True}
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
if all_exists:
__save_stacktrace(error_id=error_id, data=trace)
return {"sourcemapUploaded": all_exists,
"trace": trace,
"preparsed": False}
def get_sessions(start_date, end_date, project_id, user_id, error_id):
extra_constraints = ["s.project_id = %(project_id)s",
"s.start_ts >= %(startDate)s",
"s.start_ts <= %(endDate)s",
"e.error_id = %(error_id)s"]
if start_date is None:
start_date = TimeUTC.now(-7)
if end_date is None:
end_date = TimeUTC.now()
params = {
"startDate": start_date,
"endDate": end_date,
"project_id": project_id,
"userId": user_id,
"error_id": error_id}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count,
s.issue_types,
COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
WHERE {" AND ".join(extra_constraints)}
ORDER BY s.start_ts DESC;""",
params)
cur.execute(query=query)
sessions_list = []
total = cur.rowcount
row = cur.fetchone()
while row is not None and len(sessions_list) < 100:
sessions_list.append(row)
row = cur.fetchone()
return {
'total': total,
'sessions': helper.list_to_camel_case(sessions_list)
}
ACTION_STATE = {
"unsolve": 'unresolved',
"solve": 'resolved',
"ignore": 'ignored'
}
def change_state(project_id, user_id, error_id, action):
errors = get(error_id, family=True)
print(len(errors))
status = ACTION_STATE.get(action)
if errors is None or len(errors) == 0:
return {"errors": ["error not found"]}
if errors[0]["status"] == status:
return {"errors": [f"error is already {status}"]}
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
params = {
"userId": user_id,
"error_ids": tuple([e["errorId"] for e in errors]),
"status": status}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET status = %(status)s
WHERE error_id IN %(error_ids)s
RETURNING status""",
params)
cur.execute(query=query)
row = cur.fetchone()
if row is not None:
for e in errors:
e["status"] = row["status"]
return {"data": errors}

View file

@ -1,13 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
from . import errors_pg as errors_legacy
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental error search")
from . import errors_ch as errors
else:
from . import errors_pg as errors

View file

@ -1,409 +0,0 @@
import schemas
from chalicelib.core import metadata
from chalicelib.core.errors import errors_legacy
from chalicelib.core.errors.modules import errors_helper
from chalicelib.core.errors.modules import sessions
from chalicelib.utils import ch_client, exp_ch_helper
from chalicelib.utils import helper, metrics_helper
from chalicelib.utils.TimeUTC import TimeUTC
def _multiple_values(values, value_key="value"):
query_values = {}
if values is not None and isinstance(values, list):
for i in range(len(values)):
k = f"{value_key}_{i}"
query_values[k] = values[i]
return query_values
def __get_sql_operator(op: schemas.SearchEventOperator):
return {
schemas.SearchEventOperator.IS: "=",
schemas.SearchEventOperator.IS_ANY: "IN",
schemas.SearchEventOperator.ON: "=",
schemas.SearchEventOperator.ON_ANY: "IN",
schemas.SearchEventOperator.IS_NOT: "!=",
schemas.SearchEventOperator.NOT_ON: "!=",
schemas.SearchEventOperator.CONTAINS: "ILIKE",
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
}.get(op, "=")
def _isAny_opreator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
def _isUndefined_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
def __is_negation_operator(op: schemas.SearchEventOperator):
return op in [schemas.SearchEventOperator.IS_NOT,
schemas.SearchEventOperator.NOT_ON,
schemas.SearchEventOperator.NOT_CONTAINS]
def _multiple_conditions(condition, values, value_key="value", is_not=False):
query = []
for i in range(len(values)):
k = f"{value_key}_{i}"
query.append(condition.replace(value_key, k))
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
def get(error_id, family=False):
return errors_legacy.get(error_id=error_id, family=family)
def get_batch(error_ids):
return errors_legacy.get_batch(error_ids=error_ids)
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
table_name=None):
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
if table_name is not None:
table_name = table_name + "."
else:
table_name = ""
if type_condition:
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
if time_constraint:
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
# if platform == schemas.PlatformType.MOBILE:
# ch_sub_query.append("user_device_type = 'mobile'")
# elif platform == schemas.PlatformType.DESKTOP:
# ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_sort_key(key):
return {
schemas.ErrorSort.OCCURRENCE: "max_datetime",
schemas.ErrorSort.USERS_COUNT: "users",
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
}.get(key, 'max_datetime')
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
platform = None
for f in data.filters:
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
platform = f.value[0]
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
# ignore platform for errors table
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
# To ignore Script error
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
error_ids = None
if data.startTimestamp is None:
data.startTimestamp = TimeUTC.now(-7)
if data.endTimestamp is None:
data.endTimestamp = TimeUTC.now(1)
subquery_part = ""
params = {}
if len(data.events) > 0:
errors_condition_count = 0
for i, e in enumerate(data.events):
if e.type == schemas.EventType.ERROR:
errors_condition_count += 1
is_any = _isAny_opreator(e.operator)
op = __get_sql_operator(e.operator)
e_k = f"e_value{i}"
params = {**params, **_multiple_values(e.value, value_key=e_k)}
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
ch_sub_query.append(
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
e.value, value_key=e_k))
if len(data.events) > errors_condition_count:
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
errors_only=True,
project_id=project.project_id,
user_id=user_id,
issue=None,
favorite_only=False)
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
params = {**params, **subquery_part_args}
if len(data.filters) > 0:
meta_keys = None
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
for i, f in enumerate(data.filters):
if not isinstance(f.value, list):
f.value = [f.value]
filter_type = f.type
f.value = helper.values_for_operator(value=f.value, op=f.operator)
f_k = f"f_value{i}"
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
op = __get_sql_operator(f.operator) \
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
is_any = _isAny_opreator(f.operator)
is_undefined = _isUndefined_operator(f.operator)
if not is_any and not is_undefined and len(f.value) == 0:
continue
is_not = False
if __is_negation_operator(f.operator):
is_not = True
if filter_type == schemas.FilterType.USER_BROWSER:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_os)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_device)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_country)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_source)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_medium)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.DURATION:
if len(f.value) > 0 and f.value[0] is not None:
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
params["minDuration"] = f.value[0]
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
params["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.REFERRER:
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
if is_any:
referrer_constraint = 'isNotNull(s.base_referrer)'
else:
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
is_not=is_not, value_key=f_k)
elif filter_type == schemas.FilterType.METADATA:
# get metadata list only if you need it
if meta_keys is None:
meta_keys = metadata.get(project_id=project.project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
if f.source in meta_keys.keys():
if is_any:
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
elif is_undefined:
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
else:
ch_sessions_sub_query.append(
_multiple_conditions(
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.user_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
is_not=is_not,
value_key=f_k))
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
if is_any:
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
elif is_undefined:
ch_sessions_sub_query.append('isNull(s.rev_id)')
else:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
value_key=f_k))
elif filter_type == schemas.FilterType.PLATFORM:
# op = __get_sql_operator(f.operator)
ch_sessions_sub_query.append(
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
# elif filter_type == schemas.FilterType.issue:
# if is_any:
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
# else:
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
# # value_key=f_k))
#
# if is_not:
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
elif filter_type == schemas.FilterType.EVENTS_COUNT:
ch_sessions_sub_query.append(
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
value_key=f_k))
with ch_client.ClickHouseClient() as ch:
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = "DESC"
if data.order is not None:
order = data.order
params = {
**params,
"startDate": data.startTimestamp,
"endDate": data.endTimestamp,
"project_id": project.project_id,
"userId": user_id,
"step_size": step_size}
if data.limit is not None and data.page is not None:
params["errors_offset"] = (data.page - 1) * data.limit
params["errors_limit"] = data.limit
else:
params["errors_offset"] = 0
params["errors_limit"] = 200
# if data.bookmarked:
# cur.execute(cur.mogrify(f"""SELECT error_id
# FROM public.user_favorite_errors
# WHERE user_id = %(userId)s
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
# error_ids = cur.fetchall()
# if len(error_ids) == 0:
# return empty_response
# error_ids = [e["error_id"] for e in error_ids]
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
ch_sub_query.append("error_id IN %(error_ids)s")
main_ch_query = f"""\
SELECT details.error_id as error_id,
name, message, users, total,
sessions, last_occurrence, first_occurrence, chart
FROM (SELECT error_id,
JSONExtractString(toString(`$properties`), 'name') AS name,
JSONExtractString(toString(`$properties`), 'message') AS message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT events.session_id) AS sessions,
MAX(created_at) AS max_datetime,
MIN(created_at) AS min_datetime,
COUNT(DISTINCT error_id)
OVER() AS total
FROM {MAIN_EVENTS_TABLE} AS events
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
FROM {MAIN_SESSIONS_TABLE} AS s
{subquery_part}
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
ON (events.session_id = sessions.session_id)
WHERE {" AND ".join(ch_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
INNER JOIN (SELECT error_id,
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
FROM {MAIN_EVENTS_TABLE}
WHERE project_id=%(project_id)s
AND `$event_name`='ERROR'
GROUP BY error_id) AS time_details
ON details.error_id=time_details.error_id
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
FROM (SELECT error_id,
gs.generate_series AS timestamp,
COUNT(DISTINCT session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
WHERE {" AND ".join(ch_sub_query)}
AND created_at >= toDateTime(timestamp / 1000)
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
GROUP BY error_id, timestamp
ORDER BY timestamp) AS sub_table
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
# print("------------")
# print(ch.format(main_ch_query, params))
# print("------------")
query = ch.format(query=main_ch_query, parameters=params)
rows = ch.execute(query=query)
total = rows[0]["total"] if len(rows) > 0 else 0
for r in rows:
r["chart"] = list(r["chart"])
for i in range(len(r["chart"])):
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
return {
'total': total,
'errors': helper.list_to_camel_case(rows)
}
def get_trace(project_id, error_id):
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
def get_sessions(start_date, end_date, project_id, user_id, error_id):
return errors_legacy.get_sessions(start_date=start_date,
end_date=end_date,
project_id=project_id,
user_id=user_id,
error_id=error_id)

View file

@ -1,248 +0,0 @@
from chalicelib.core.errors.modules import errors_helper
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import get_step_size
def __flatten_sort_key_count_version(data, merge_nested=False):
if data is None:
return []
return sorted(
[
{
"name": f'{o["name"]}@{v["version"]}',
"count": v["count"]
} for o in data for v in o["partition"]
],
key=lambda o: o["count"], reverse=True) if merge_nested else \
[
{
"name": o["name"],
"count": o["count"],
} for o in data
]
def __process_tags(row):
return [
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
{"name": "browser.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
{"name": "OS.ver",
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
{"name": "device",
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
{"name": "country", "partitions": row.pop("country_partition")}
]
def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30",
project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30",
endTime_arg_name="endDate30",
project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1)
data["endDate24"] = TimeUTC.now()
data["startDate30"] = TimeUTC.now(-30)
data["endDate30"] = TimeUTC.now()
density24 = int(data.get("density24", 24))
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
density30 = int(data.get("density30", 30))
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
params = {
"startDate24": data['startDate24'],
"endDate24": data['endDate24'],
"startDate30": data['startDate30'],
"endDate30": data['endDate30'],
"project_id": project_id,
"userId": user_id,
"step_size24": step_size24,
"step_size30": step_size30,
"error_id": error_id}
main_pg_query = f"""\
SELECT error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
last_session_id,
browsers_partition,
os_partition,
device_partition,
country_partition,
chart24,
chart30
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT session_id) AS sessions
FROM public.errors
INNER JOIN events.errors AS s_errors USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_err)}
GROUP BY error_id, name, message) AS details
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id
FROM events.errors
WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
FROM (SELECT *
FROM (SELECT user_browser AS name,
COUNT(session_id) AS count
FROM events.errors
INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_browser
ORDER BY count DESC) AS count_per_browser_query
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
FROM (SELECT user_browser_version AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_browser = count_per_browser_query.name
GROUP BY user_browser_version
ORDER BY count DESC) AS version_details
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
FROM (SELECT *
FROM (SELECT user_os AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_os
ORDER BY count DESC) AS count_per_os_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_os = count_per_os_details.name
GROUP BY user_os_version
ORDER BY count DESC) AS count_per_version_details
GROUP BY count_per_os_details.name ) AS os_version_details
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
FROM (SELECT *
FROM (SELECT user_device_type AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_device_type
ORDER BY count DESC) AS count_per_device_details
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
FROM (SELECT CASE
WHEN user_device = '' OR user_device ISNULL
THEN 'unknown'
ELSE user_device END AS version,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
AND sessions.user_device_type = count_per_device_details.name
GROUP BY user_device
ORDER BY count DESC) AS count_per_device_v_details
GROUP BY count_per_device_details.name ) AS device_version_details
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
FROM (SELECT user_country AS name,
COUNT(session_id) AS count
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30_session)}
GROUP BY user_country
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query24)}
) AS chart_details ON (TRUE)
GROUP BY generated_timestamp
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
row = cur.fetchone()
if row is None:
return {"errors": ["error not found"]}
row["tags"] = __process_tags(row)
query = cur.mogrify(
f"""SELECT error_id, status, session_id, start_ts,
parent_error_id,session_id, user_anonymous_id,
user_id, user_uuid, user_browser, user_browser_version,
user_os, user_os_version, user_device, payload,
FALSE AS favorite,
True AS viewed
FROM public.errors AS pe
INNER JOIN events.errors AS ee USING (error_id)
INNER JOIN public.sessions USING (session_id)
WHERE pe.project_id = %(project_id)s
AND error_id = %(error_id)s
ORDER BY start_ts DESC
LIMIT 1;""",
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
cur.execute(query=query)
status = cur.fetchone()
if status is not None:
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
row["status"] = status.pop("status")
row["parent_error_id"] = status.pop("parent_error_id")
row["favorite"] = status.pop("favorite")
row["viewed"] = status.pop("viewed")
row["last_hydrated_session"] = status
else:
row["stack"] = []
row["last_hydrated_session"] = None
row["status"] = "untracked"
row["parent_error_id"] = None
row["favorite"] = False
row["viewed"] = False
return {"data": helper.dict_to_camel_case(row)}

View file

@ -1,294 +0,0 @@
import json
from typing import List
import schemas
from chalicelib.core.errors.modules import errors_helper
from chalicelib.core.sessions import sessions_search
from chalicelib.core.sourcemaps import sourcemaps
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import get_step_size
def get(error_id, family=False) -> dict | List[dict]:
if family:
return get_batch([error_id])
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT *
FROM public.errors
WHERE error_id = %(error_id)s
LIMIT 1;""",
{"error_id": error_id})
cur.execute(query=query)
result = cur.fetchone()
if result is not None:
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
return helper.dict_to_camel_case(result)
def get_batch(error_ids):
if len(error_ids) == 0:
return []
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""
WITH RECURSIVE error_family AS (
SELECT *
FROM public.errors
WHERE error_id IN %(error_ids)s
UNION
SELECT child_errors.*
FROM public.errors AS child_errors
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
)
SELECT *
FROM error_family;""",
{"error_ids": tuple(error_ids)})
cur.execute(query=query)
errors = cur.fetchall()
for e in errors:
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
return helper.list_to_camel_case(errors)
def __get_sort_key(key):
return {
schemas.ErrorSort.OCCURRENCE: "max_datetime",
schemas.ErrorSort.USERS_COUNT: "users",
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
}.get(key, 'max_datetime')
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
empty_response = {
'total': 0,
'errors': []
}
platform = None
for f in data.filters:
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
platform = f.value[0]
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"]
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
project_key=None)
if platform:
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
pg_sub_query_chart.append("errors.error_id =details.error_id")
statuses = []
error_ids = None
if data.startTimestamp is None:
data.startTimestamp = TimeUTC.now(-30)
if data.endTimestamp is None:
data.endTimestamp = TimeUTC.now(1)
if len(data.events) > 0 or len(data.filters) > 0:
print("-- searching for sessions before errors")
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
error_status=data.status)
if len(statuses) == 0:
return empty_response
error_ids = [e["errorId"] for e in statuses]
with pg_client.PostgresClient() as cur:
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = schemas.SortOrderType.DESC
if data.order is not None:
order = data.order
extra_join = ""
params = {
"startDate": data.startTimestamp,
"endDate": data.endTimestamp,
"project_id": project.project_id,
"userId": user_id,
"step_size": step_size}
if data.status != schemas.ErrorStatus.ALL:
pg_sub_query.append("status = %(error_status)s")
params["error_status"] = data.status
if data.limit is not None and data.page is not None:
params["errors_offset"] = (data.page - 1) * data.limit
params["errors_limit"] = data.limit
else:
params["errors_offset"] = 0
params["errors_limit"] = 200
if error_ids is not None:
params["error_ids"] = tuple(error_ids)
pg_sub_query.append("error_id IN %(error_ids)s")
# if data.bookmarked:
# pg_sub_query.append("ufe.user_id = %(userId)s")
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
if data.query is not None and len(data.query) > 0:
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
params["error_query"] = helper.values_for_operator(value=data.query,
op=schemas.SearchEventOperator.CONTAINS)
main_pg_query = f"""SELECT full_count,
error_id,
name,
message,
users,
sessions,
last_occurrence,
first_occurrence,
chart
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
FROM (SELECT error_id,
name,
message,
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
COUNT(DISTINCT session_id) AS sessions,
MAX(timestamp) AS max_datetime,
MIN(timestamp) AS min_datetime
FROM events.errors
INNER JOIN public.errors AS pe USING (error_id)
INNER JOIN public.sessions USING (session_id)
{extra_join}
WHERE {" AND ".join(pg_sub_query)}
GROUP BY error_id, name, message
ORDER BY {sort} {order}) AS details
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
) AS details
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
MIN(timestamp) AS first_occurrence
FROM events.errors
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
FROM (SELECT generated_timestamp AS timestamp,
COUNT(session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (SELECT DISTINCT session_id
FROM events.errors
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
WHERE {" AND ".join(pg_sub_query_chart)}
) AS sessions ON (TRUE)
GROUP BY timestamp
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
# print("--------------------")
# print(cur.mogrify(main_pg_query, params))
# print("--------------------")
cur.execute(cur.mogrify(main_pg_query, params))
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if total == 0:
rows = []
else:
if len(statuses) == 0:
query = cur.mogrify(
"""SELECT error_id
FROM public.errors
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
"user_id": user_id})
cur.execute(query=query)
statuses = helper.list_to_camel_case(cur.fetchall())
statuses = {
s["errorId"]: s for s in statuses
}
for r in rows:
r.pop("full_count")
return {
'total': total,
'errors': helper.list_to_camel_case(rows)
}
def __save_stacktrace(error_id, data):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""UPDATE public.errors
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
WHERE error_id = %(error_id)s;""",
{"error_id": error_id, "data": json.dumps(data)})
cur.execute(query=query)
def get_trace(project_id, error_id):
error = get(error_id=error_id, family=False)
if error is None:
return {"errors": ["error not found"]}
if error.get("source", "") != "js_exception":
return {"errors": ["this source of errors doesn't have a sourcemap"]}
if error.get("payload") is None:
return {"errors": ["null payload"]}
if error.get("stacktrace") is not None:
return {"sourcemapUploaded": True,
"trace": error.get("stacktrace"),
"preparsed": True}
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
if all_exists:
__save_stacktrace(error_id=error_id, data=trace)
return {"sourcemapUploaded": all_exists,
"trace": trace,
"preparsed": False}
def get_sessions(start_date, end_date, project_id, user_id, error_id):
extra_constraints = ["s.project_id = %(project_id)s",
"s.start_ts >= %(startDate)s",
"s.start_ts <= %(endDate)s",
"e.error_id = %(error_id)s"]
if start_date is None:
start_date = TimeUTC.now(-7)
if end_date is None:
end_date = TimeUTC.now()
params = {
"startDate": start_date,
"endDate": end_date,
"project_id": project_id,
"userId": user_id,
"error_id": error_id}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT s.project_id,
s.session_id::text AS session_id,
s.user_uuid,
s.user_id,
s.user_agent,
s.user_os,
s.user_browser,
s.user_device,
s.user_country,
s.start_ts,
s.duration,
s.events_count,
s.pages_count,
s.errors_count,
s.issue_types,
COALESCE((SELECT TRUE
FROM public.user_favorite_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
COALESCE((SELECT TRUE
FROM public.user_viewed_sessions AS fs
WHERE s.session_id = fs.session_id
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
WHERE {" AND ".join(extra_constraints)}
ORDER BY s.start_ts DESC;""",
params)
cur.execute(query=query)
sessions_list = []
total = cur.rowcount
row = cur.fetchone()
while row is not None and len(sessions_list) < 100:
sessions_list.append(row)
row = cur.fetchone()
return {
'total': total,
'sessions': helper.list_to_camel_case(sessions_list)
}

View file

@ -1,11 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
from . import helper as errors_helper
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
import chalicelib.core.sessions.sessions_ch as sessions
else:
import chalicelib.core.sessions.sessions_pg as sessions

View file

@ -1,58 +0,0 @@
from typing import Optional
import schemas
from chalicelib.core.sourcemaps import sourcemaps
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
chart: bool = False, step_size_name: str = "step_size",
project_key: Optional[str] = "project_id"):
if project_key is None:
ch_sub_query = []
else:
ch_sub_query = [f"{project_key} =%(project_id)s"]
if time_constraint:
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
f"timestamp < %({endTime_arg_name})s"]
if chart:
ch_sub_query += [f"timestamp >= generated_timestamp",
f"timestamp < generated_timestamp + %({step_size_name})s"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
table_name=None):
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
if table_name is not None:
table_name = table_name + "."
else:
table_name = ""
if type_condition:
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
if time_constraint:
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def format_first_stack_frame(error):
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
for s in error["stack"]:
for c in s.get("context", []):
for sci, sc in enumerate(c):
if isinstance(sc, str) and len(sc) > 1000:
c[sci] = sc[:1000]
# convert bytes to string:
if isinstance(s["filename"], bytes):
s["filename"] = s["filename"].decode("utf-8")
return error

View file

@ -0,0 +1,48 @@
from chalicelib.utils import pg_client
def add_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
VALUES (%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": True}
def remove_favorite_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
WHERE
user_id = %(userId)s
AND error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
return {"errorId": error_id, "favorite": False}
def favorite_error(project_id, user_id, error_id):
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
if not exists:
return {"errors": ["cannot bookmark non-rehydrated errors"]}
if favorite:
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
def error_exists_and_favorite(user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
FROM public.errors
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
WHERE error_id = %(error_id)s;""",
{"userId": user_id, "error_id": error_id})
)
r = cur.fetchone()
if r is None:
return False, False
return True, r.get("favorite") is not None

View file

@ -0,0 +1,37 @@
from chalicelib.utils import pg_client
def add_viewed_error(project_id, user_id, error_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
VALUES (%(userId)s,%(error_id)s);""",
{"userId": user_id, "error_id": error_id})
)
def viewed_error_exists(user_id, error_id):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT
errors.error_id AS hydrated,
COALESCE((SELECT TRUE
FROM public.user_viewed_errors AS ve
WHERE ve.error_id = %(error_id)s
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
FROM public.errors
WHERE error_id = %(error_id)s""",
{"userId": user_id, "error_id": error_id})
cur.execute(
query=query
)
r = cur.fetchone()
if r:
return r.get("viewed")
return True
def viewed_error(project_id, user_id, error_id):
if viewed_error_exists(user_id=user_id, error_id=error_id):
return None
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)

View file

@ -1,10 +1,9 @@
from functools import cache
from typing import Optional
import schemas
from chalicelib.core import autocomplete
from chalicelib.core import issues
from chalicelib.core.autocomplete import autocomplete
from chalicelib.core.sessions import sessions_metas
from chalicelib.core import sessions_metas
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
@ -103,7 +102,7 @@ def _search_tags(project_id, value, key=None, source=None):
with pg_client.PostgresClient() as cur:
query = f"""
SELECT public.tags.name
'TAG' AS type
'{events.EventType.TAG.ui_type}' AS type
FROM public.tags
WHERE public.tags.project_id = %(project_id)s
ORDER BY SIMILARITY(public.tags.name, %(value)s) DESC
@ -138,57 +137,52 @@ class EventType:
column=None) # column=None because errors are searched by name or message
@cache
def supported_types():
return {
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
SUPPORTED_TYPES = {
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
query=autocomplete.__generic_query(
typename=EventType.LOCATION.ui_type)),
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
query=autocomplete.__generic_query(
typename=EventType.REQUEST.ui_type)),
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
query=autocomplete.__generic_query(
typename=EventType.GRAPHQL.ui_type)),
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
query=autocomplete.__generic_query(
typename=EventType.STATEACTION.ui_type)),
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
query=None),
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
query=None),
# MOBILE
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.LOCATION.ui_type)),
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
query=autocomplete.__generic_query(
typename=EventType.CUSTOM.ui_type)),
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
typename=EventType.CLICK_MOBILE.ui_type)),
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.SWIPE_MOBILE.ui_type)),
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.INPUT_MOBILE.ui_type)),
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.REQUEST.ui_type)),
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
query=autocomplete.__generic_query(
typename=EventType.GRAPHQL.ui_type)),
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
query=autocomplete.__generic_query(
typename=EventType.STATEACTION.ui_type)),
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
query=None),
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
typename=EventType.VIEW_MOBILE.ui_type)),
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.CUSTOM_MOBILE.ui_type)),
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.REQUEST_MOBILE.ui_type)),
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
query=None),
# MOBILE
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.CLICK_MOBILE.ui_type)),
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.SWIPE_MOBILE.ui_type)),
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.INPUT_MOBILE.ui_type)),
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.VIEW_MOBILE.ui_type)),
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.CUSTOM_MOBILE.ui_type)),
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.REQUEST_MOBILE.ui_type)),
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
query=None),
}
}
def get_errors_by_session_id(session_id, project_id):
@ -208,17 +202,20 @@ def search(text, event_type, project_id, source, key):
if not event_type:
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
if event_type in supported_types().keys():
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
elif event_type + "_MOBILE" in supported_types().keys():
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
elif event_type in sessions_metas.supported_types().keys():
if event_type in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
# for IOS events autocomplete
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id)
elif event_type.endswith("_IOS") \
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id)
elif event_type.endswith("_MOBILE") \
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id)
else:
return {"errors": ["unsupported event"]}

View file

@ -1,14 +1,11 @@
import json
import logging
from typing import Any, List, Dict, Optional
import schemas
from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from typing import Any, List, Dict, Optional
from fastapi import HTTPException, status
logger = logging.getLogger(__name__)
import json
import logging
feature_flag_columns = (
"feature_flag_id",
@ -302,8 +299,7 @@ def create_conditions(feature_flag_id: int, conditions: List[schemas.FeatureFlag
with pg_client.PostgresClient() as cur:
params = [
(feature_flag_id, c.name, c.rollout_percentage,
json.dumps([filter_.model_dump() for filter_ in c.filters]))
(feature_flag_id, c.name, c.rollout_percentage, json.dumps([filter_.model_dump() for filter_ in c.filters]))
for c in conditions]
query = cur.mogrify(sql, params)
cur.execute(query)
@ -459,8 +455,7 @@ def create_variants(feature_flag_id: int, variants: List[schemas.FeatureFlagVari
"""
with pg_client.PostgresClient() as cur:
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in
variants]
params = [(feature_flag_id, v.value, v.description, json.dumps(v.payload), v.rollout_percentage) for v in variants]
query = cur.mogrify(sql, params)
cur.execute(query)
rows = cur.fetchall()

View file

@ -1,7 +1,7 @@
from typing import List
import schemas
from chalicelib.core.metrics.modules import significance
from chalicelib.core import significance
from chalicelib.utils import helper
from chalicelib.utils import sql_helper as sh
@ -35,6 +35,30 @@ def __fix_stages(f_events: List[schemas.SessionSearchEventSchema2]):
return events
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema,
metric_format: schemas.MetricExtendedFormatType):
data.events = filter_stages(__parse_events(data.events))
data.events = __fix_stages(data.events)
if len(data.events) == 0:
return {"stages": [], "totalDropDueToIssues": 0}
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data,
project_id=project_id,
metric_format=metric_format)
insights = helper.list_to_camel_case(insights)
if len(insights) > 0:
if metric_format == schemas.MetricFormatType.SESSION_COUNT and total_drop_due_to_issues > (
insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]):
total_drop_due_to_issues = insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]
elif metric_format == schemas.MetricExtendedFormatType.USER_COUNT and total_drop_due_to_issues > (
insights[0]["usersCount"] - insights[-1]["usersCount"]):
total_drop_due_to_issues = insights[0]["usersCount"] - insights[-1]["usersCount"]
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
return {"stages": insights,
"totalDropDueToIssues": total_drop_due_to_issues}
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchema):
data.events = filter_stages(data.events)
data.events = __fix_stages(data.events)
@ -45,16 +69,3 @@ def get_issues_on_the_fly_widget(project_id, data: schemas.CardSeriesFilterSchem
"issues": helper.dict_to_camel_case(
significance.get_issues_list(filter_d=data, project_id=project_id, first_stage=1,
last_stage=len(data.events)))}
def get_simple_funnel(project: schemas.ProjectContext, data: schemas.CardSeriesFilterSchema,
metric_format: schemas.MetricExtendedFormatType):
data.events = filter_stages(__parse_events(data.events))
data.events = __fix_stages(data.events)
if len(data.events) == 0:
return {"stages": [], "totalDropDueToIssues": 0}
insights = significance.get_simple_funnel(filter_d=data,
project=project,
metric_format=metric_format)
return {"stages": insights}

View file

@ -1,4 +1,4 @@
import logging
from urllib.parse import urlparse
import redis
import requests
@ -7,13 +7,11 @@ from decouple import config
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
def app_connection_string(name, port, path):
namespace = config("POD_NAMESPACE", default="app")
conn_string = config("CLUSTER_URL", default="svc.cluster.local")
return f"http://{name}.{namespace}.{conn_string}:{port}/{path}"
return f"http://{'.'.join(filter(None, [name, namespace, conn_string]))}:{port}/{path}"
HEALTH_ENDPOINTS = {
@ -27,6 +25,7 @@ HEALTH_ENDPOINTS = {
"http": app_connection_string("http-openreplay", 8888, "metrics"),
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
"peers": app_connection_string("peers-openreplay", 8888, "health"),
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
"sourcemapreader": app_connection_string(
"sourcemapreader-openreplay", 8888, "health"
@ -38,66 +37,78 @@ HEALTH_ENDPOINTS = {
def __check_database_pg(*_):
fail_response = {
"health": False,
"details": {"errors": ["Postgres health-check failed"]},
"details": {
"errors": ["Postgres health-check failed"]
}
}
with pg_client.PostgresClient() as cur:
try:
cur.execute("SHOW server_version;")
# server_version = cur.fetchone()
server_version = cur.fetchone()
except Exception as e:
logger.error("!! health failed: postgres not responding")
logger.exception(e)
print("!! health failed: postgres not responding")
print(str(e))
return fail_response
try:
cur.execute("SELECT openreplay_version() AS version;")
# schema_version = cur.fetchone()
schema_version = cur.fetchone()
except Exception as e:
logger.error("!! health failed: openreplay_version not defined")
logger.exception(e)
print("!! health failed: openreplay_version not defined")
print(str(e))
return fail_response
return {
"health": True,
"details": {
# "version": server_version["server_version"],
# "schema": schema_version["version"]
},
}
}
def __not_supported(*_):
return {"errors": ["not supported"]}
def __always_healthy(*_):
return {"health": True, "details": {}}
return {
"health": True,
"details": {}
}
def __check_be_service(service_name):
def fn(*_):
fail_response = {
"health": False,
"details": {"errors": ["server health-check failed"]},
"details": {
"errors": ["server health-check failed"]
}
}
try:
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
if results.status_code != 200:
logger.error(
f"!! issue with the {service_name}-health code:{results.status_code}"
)
logger.error(results.text)
print(f"!! issue with the {service_name}-health code:{results.status_code}")
print(results.text)
# fail_response["details"]["errors"].append(results.text)
return fail_response
except requests.exceptions.Timeout:
logger.error(f"!! Timeout getting {service_name}-health")
print(f"!! Timeout getting {service_name}-health")
# fail_response["details"]["errors"].append("timeout")
return fail_response
except Exception as e:
logger.error(f"!! Issue getting {service_name}-health response")
logger.exception(e)
print(f"!! Issue getting {service_name}-health response")
print(str(e))
try:
logger.error(results.text)
print(results.text)
# fail_response["details"]["errors"].append(results.text)
except Exception:
logger.error("couldn't get response")
print("couldn't get response")
# fail_response["details"]["errors"].append(str(e))
return fail_response
return {"health": True, "details": {}}
return {
"health": True,
"details": {}
}
return fn
@ -105,18 +116,18 @@ def __check_be_service(service_name):
def __check_redis(*_):
fail_response = {
"health": False,
"details": {"errors": ["server health-check failed"]},
"details": {"errors": ["server health-check failed"]}
}
if config("REDIS_STRING", default=None) is None:
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
return fail_response
try:
r = redis.from_url(config("REDIS_STRING"), socket_timeout=2)
r = redis.from_url(config("REDIS_STRING"))
r.ping()
except Exception as e:
logger.error("!! Issue getting redis-health response")
logger.exception(e)
print("!! Issue getting redis-health response")
print(str(e))
# fail_response["details"]["errors"].append(str(e))
return fail_response
@ -124,43 +135,53 @@ def __check_redis(*_):
"health": True,
"details": {
# "version": r.execute_command('INFO')['redis_version']
},
}
}
def __check_SSL(*_):
fail_response = {
"health": False,
"details": {"errors": ["SSL Certificate health-check failed"]},
"details": {
"errors": ["SSL Certificate health-check failed"]
}
}
try:
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
except Exception as e:
logger.error("!! health failed: SSL Certificate")
logger.exception(e)
print("!! health failed: SSL Certificate")
print(str(e))
return fail_response
return {"health": True, "details": {}}
return {
"health": True,
"details": {}
}
def __get_sessions_stats(*_):
with pg_client.PostgresClient() as cur:
constraints = ["projects.deleted_at IS NULL"]
query = cur.mogrify(
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
COALESCE(SUM(events_count),0) AS e_c
FROM public.projects_stats
INNER JOIN public.projects USING(project_id)
WHERE {" AND ".join(constraints)};"""
)
WHERE {" AND ".join(constraints)};""")
cur.execute(query)
row = cur.fetchone()
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
return {
"numberOfSessionsCaptured": row["s_c"],
"numberOfEventCaptured": row["e_c"]
}
def get_health(tenant_id=None):
health_map = {
"databases": {"postgres": __check_database_pg},
"ingestionPipeline": {"redis": __check_redis},
"databases": {
"postgres": __check_database_pg
},
"ingestionPipeline": {
"redis": __check_redis
},
"backendServices": {
"alerts": __check_be_service("alerts"),
"assets": __check_be_service("assets"),
@ -173,12 +194,13 @@ def get_health(tenant_id=None):
"http": __check_be_service("http"),
"ingress-nginx": __always_healthy,
"integrations": __check_be_service("integrations"),
"peers": __check_be_service("peers"),
"sink": __check_be_service("sink"),
"sourcemapreader": __check_be_service("sourcemapreader"),
"storage": __check_be_service("storage"),
"storage": __check_be_service("storage")
},
"details": __get_sessions_stats,
"ssl": __check_SSL,
"ssl": __check_SSL
}
return __process_health(health_map=health_map)
@ -190,16 +212,10 @@ def __process_health(health_map):
response.pop(parent_key)
elif isinstance(health_map[parent_key], dict):
for element_key in health_map[parent_key]:
if config(
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
cast=bool,
default=False,
):
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
response[parent_key].pop(element_key)
else:
response[parent_key][element_key] = health_map[parent_key][
element_key
]()
response[parent_key][element_key] = health_map[parent_key][element_key]()
else:
response[parent_key] = health_map[parent_key]()
return response
@ -207,8 +223,7 @@ def __process_health(health_map):
def cron():
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
"""SELECT projects.project_id,
query = cur.mogrify("""SELECT projects.project_id,
projects.created_at,
projects.sessions_last_check_at,
projects.first_recorded_session_at,
@ -216,8 +231,7 @@ def cron():
FROM public.projects
LEFT JOIN public.projects_stats USING (project_id)
WHERE projects.deleted_at IS NULL
ORDER BY project_id;"""
)
ORDER BY project_id;""")
cur.execute(query)
rows = cur.fetchall()
for r in rows:
@ -238,24 +252,20 @@ def cron():
count_start_from = r["last_update_at"]
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
params = {
"project_id": r["project_id"],
"start_ts": count_start_from,
"end_ts": TimeUTC.now(),
"sessions_count": 0,
"events_count": 0,
}
params = {"project_id": r["project_id"],
"start_ts": count_start_from,
"end_ts": TimeUTC.now(),
"sessions_count": 0,
"events_count": 0}
query = cur.mogrify(
"""SELECT COUNT(1) AS sessions_count,
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
COALESCE(SUM(events_count),0) AS events_count
FROM public.sessions
WHERE project_id=%(project_id)s
AND start_ts>=%(start_ts)s
AND start_ts<=%(end_ts)s
AND duration IS NOT NULL;""",
params,
)
params)
cur.execute(query)
row = cur.fetchone()
if row is not None:
@ -263,68 +273,56 @@ def cron():
params["events_count"] = row["events_count"]
if insert:
query = cur.mogrify(
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
params,
)
params)
else:
query = cur.mogrify(
"""UPDATE public.projects_stats
query = cur.mogrify("""UPDATE public.projects_stats
SET sessions_count=sessions_count+%(sessions_count)s,
events_count=events_count+%(events_count)s,
last_update_at=(now() AT TIME ZONE 'utc'::text)
WHERE project_id=%(project_id)s;""",
params,
)
params)
cur.execute(query)
# this cron is used to correct the sessions&events count every week
def weekly_cron():
with pg_client.PostgresClient(long_query=True) as cur:
query = cur.mogrify(
"""SELECT project_id,
query = cur.mogrify("""SELECT project_id,
projects_stats.last_update_at
FROM public.projects
LEFT JOIN public.projects_stats USING (project_id)
WHERE projects.deleted_at IS NULL
ORDER BY project_id;"""
)
ORDER BY project_id;""")
cur.execute(query)
rows = cur.fetchall()
for r in rows:
if r["last_update_at"] is None:
continue
params = {
"project_id": r["project_id"],
"end_ts": TimeUTC.now(),
"sessions_count": 0,
"events_count": 0,
}
params = {"project_id": r["project_id"],
"end_ts": TimeUTC.now(),
"sessions_count": 0,
"events_count": 0}
query = cur.mogrify(
"""SELECT COUNT(1) AS sessions_count,
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
COALESCE(SUM(events_count),0) AS events_count
FROM public.sessions
WHERE project_id=%(project_id)s
AND start_ts<=%(end_ts)s
AND duration IS NOT NULL;""",
params,
)
params)
cur.execute(query)
row = cur.fetchone()
if row is not None:
params["sessions_count"] = row["sessions_count"]
params["events_count"] = row["events_count"]
query = cur.mogrify(
"""UPDATE public.projects_stats
query = cur.mogrify("""UPDATE public.projects_stats
SET sessions_count=%(sessions_count)s,
events_count=%(events_count)s,
last_update_at=(now() AT TIME ZONE 'utc'::text)
WHERE project_id=%(project_id)s;""",
params,
)
params)
cur.execute(query)

View file

@ -1,35 +1,28 @@
import logging
import schemas
from chalicelib.core import sessions
from chalicelib.core.sessions import sessions_mobs
from chalicelib.core import sessions_mobs, sessions, events
from chalicelib.utils import pg_client, helper
from chalicelib.utils import sql_helper as sh
# from chalicelib.utils import sql_helper as sh
logger = logging.getLogger(__name__)
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
if data.url is None or data.url == "":
return []
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
"project_id": project_id, "url": data.url}
constraints = ["sessions.project_id = %(project_id)s",
"(url = %(url)s OR path= %(url)s)",
"clicks.timestamp >= %(startDate)s",
"clicks.timestamp <= %(endDate)s",
"start_ts >= %(startDate)s",
"start_ts <= %(endDate)s",
"duration IS NOT NULL",
"normalized_x IS NOT NULL"]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("path= %(url)s")
else:
constraints.append("path ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
query_from = "events.clicks INNER JOIN sessions USING (session_id)"
has_click_rage_filter = False
# TODO: is this used ?
# has_click_rage_filter = False
# if len(data.filters) > 0:
# for i, f in enumerate(data.filters):
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
@ -56,15 +49,15 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
# f.value, value_key=f_k))
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
# f.value, value_key=f_k))
# TODO: change this once click-rage is fixed
# if data.click_rage and not has_click_rage_filter:
# constraints.append("""(issues.session_id IS NULL
# OR (issues.timestamp >= %(startDate)s
# AND issues.timestamp <= %(endDate)s
# AND mis.project_id = %(project_id)s
# AND mis.type='click_rage'))""")
# query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
# LEFT JOIN issues AS mis USING (issue_id)"""
if data.click_rage and not has_click_rage_filter:
constraints.append("""(issues.session_id IS NULL
OR (issues.timestamp >= %(startDate)s
AND issues.timestamp <= %(endDate)s
AND mis.project_id = %(project_id)s
AND mis.type='click_rage'))""")
query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
LEFT JOIN issues AS mis USING (issue_id)"""
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""SELECT normalized_x, normalized_y
FROM {query_from}
@ -90,13 +83,8 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
args = {"session_id": session_id, "url": data.url}
constraints = ["session_id = %(session_id)s",
"(url = %(url)s OR path= %(url)s)",
"normalized_x IS NOT NULL"]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("path= %(url)s")
else:
constraints.append("path ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
query_from = "events.clicks"
with pg_client.PostgresClient() as cur:
@ -122,13 +110,8 @@ def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatM
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
args = {"session_id": session_id, "url": data.url}
constraints = ["session_id = %(session_id)s"]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("path= %(url)s")
else:
constraints.append("path ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
constraints = ["session_id = %(session_id)s",
"(url = %(url)s OR path= %(url)s)"]
query_from = "events.clicks"
with pg_client.PostgresClient() as cur:
@ -160,93 +143,29 @@ s.start_ts,
s.duration"""
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
start_time: int,
end_time: int) -> str | None:
full_args = {
"sessionId": session_id,
"projectId": project_id,
"start_time": start_time,
"end_time": end_time,
}
sub_condition = ["session_id = %(sessionId)s"]
if location_condition and len(location_condition.value) > 0:
f_k = "LOC"
op = sh.get_sql_operator(location_condition.operator)
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
sub_condition.append(
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
value_key=f_k))
with pg_client.PostgresClient() as cur:
main_query = cur.mogrify(f"""WITH paths AS (SELECT DISTINCT path
FROM events.clicks
WHERE {" AND ".join(sub_condition)})
SELECT path, COUNT(1) AS count
FROM events.clicks
INNER JOIN public.sessions USING (session_id)
INNER JOIN paths USING (path)
WHERE sessions.project_id = %(projectId)s
AND clicks.timestamp >= %(start_time)s
AND clicks.timestamp <= %(end_time)s
AND start_ts >= %(start_time)s
AND start_ts <= %(end_time)s
AND duration IS NOT NULL
GROUP BY path
ORDER BY count DESC
LIMIT 1;""", full_args)
logger.debug("--------------------")
logger.debug(main_query)
logger.debug("--------------------")
try:
cur.execute(main_query)
except Exception as err:
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION -----------")
logger.warning(main_query.decode('UTF-8'))
logger.warning("--------- PAYLOAD -----------")
logger.warning(full_args)
logger.warning("--------------------")
raise err
url = cur.fetchone()
if url is None:
return None
return url["path"]
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
include_mobs: bool = True, exclude_sessions: list[str] = [],
_depth: int = 3):
no_platform = True
location_condition = None
no_click = True
no_location = True
for f in data.filters:
if f.type == schemas.FilterType.PLATFORM:
no_platform = False
break
for f in data.events:
if f.type == schemas.EventType.LOCATION:
no_location = False
if len(f.value) == 0:
f.operator = schemas.SearchEventOperator.IS_ANY
location_condition = f.model_copy()
elif f.type == schemas.EventType.CLICK:
no_click = False
if len(f.value) == 0:
f.operator = schemas.SearchEventOperator.IS_ANY
if location_condition and not no_click:
break
if no_platform:
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
value=[schemas.PlatformType.DESKTOP],
operator=schemas.SearchEventOperator.IS))
if not location_condition:
if no_location:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
value=[],
operator=schemas.SearchEventOperator.IS_ANY))
if no_click:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
value=[],
operator=schemas.SearchEventOperator.IS_ANY))
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
value=[0],
@ -264,8 +183,7 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
main_query = cur.mogrify(f"""SELECT *
FROM (SELECT {SESSION_PROJECTION_COLS}
{query_part}
--ignoring the sort made the query faster (from 6s to 100ms)
--ORDER BY {data.sort} {data.order.value}
ORDER BY {data.sort} {data.order.value}
LIMIT 20) AS raw
ORDER BY random()
LIMIT 1;""", full_args)
@ -284,13 +202,6 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
session = cur.fetchone()
if session:
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
location_condition=location_condition,
start_time=data.startTimestamp, end_time=data.endTimestamp)
else:
session["path"] = location_condition.value[0]
if include_mobs:
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])

View file

@ -1,12 +1,12 @@
import schemas
from chalicelib.core.issue_tracking import base
from chalicelib.core.issue_tracking.github_issue import GithubIntegrationIssue
from chalicelib.core import integration_base
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
from chalicelib.utils import pg_client, helper
PROVIDER = schemas.IntegrationType.GITHUB
class GitHubIntegration(base.BaseIntegration):
class GitHubIntegration(integration_base.BaseIntegration):
def __init__(self, tenant_id, user_id):
self.__tenant_id = tenant_id

View file

@ -1,12 +1,12 @@
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
from chalicelib.utils import github_client_v3
from chalicelib.utils.github_client_v3 import github_formatters as formatter
class GithubIntegrationIssue(BaseIntegrationIssue):
def __init__(self, token):
self.__client = github_client_v3.githubV3Request(token)
super(GithubIntegrationIssue, self).__init__("GITHUB", token)
def __init__(self, integration_token):
self.__client = github_client_v3.githubV3Request(integration_token)
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
def get_current_user(self):
return formatter.user(self.__client.get("/user"))
@ -28,9 +28,9 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
return meta
def create_new_assignment(self, project_id, title, description, assignee,
def create_new_assignment(self, integration_project_id, title, description, assignee,
issue_type):
repoId = project_id
repoId = integration_project_id
assignees = [assignee]
labels = [str(issue_type)]
@ -59,11 +59,11 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
def get_by_ids(self, saved_issues):
results = []
for i in saved_issues:
results.append(self.get(project_id=i["integrationProjectId"], assignment_id=i["id"]))
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
return {"issues": results}
def get(self, project_id, assignment_id):
repoId = project_id
def get(self, integration_project_id, assignment_id):
repoId = integration_project_id
issueNumber = assignment_id
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
issue = formatter.issue(issue)
@ -72,17 +72,17 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
return issue
def comment(self, project_id, assignment_id, comment):
repoId = project_id
def comment(self, integration_project_id, assignment_id, comment):
repoId = integration_project_id
issueNumber = assignment_id
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
body={"body": comment})
return formatter.comment(commentCreated)
def get_metas(self, project_id):
def get_metas(self, integration_project_id):
current_user = self.get_current_user()
try:
users = self.__client.get(f"/repositories/{project_id}/collaborators")
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
except Exception as e:
users = []
users = [formatter.user(u) for u in users]
@ -92,7 +92,7 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
return {"provider": self.provider.lower(),
'users': users,
'issueTypes': [formatter.label(l) for l in
self.__client.get(f"/repositories/{project_id}/labels")]
self.__client.get(f"/repositories/{integration_project_id}/labels")]
}
def get_projects(self):

View file

@ -1,6 +1,6 @@
import schemas
from chalicelib.core.issue_tracking import base
from chalicelib.core.issue_tracking.jira_cloud_issue import JIRACloudIntegrationIssue
from chalicelib.core import integration_base
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
from chalicelib.utils import pg_client, helper
PROVIDER = schemas.IntegrationType.JIRA
@ -10,7 +10,7 @@ def obfuscate_string(string):
return "*" * (len(string) - 4) + string[-4:]
class JIRAIntegration(base.BaseIntegration):
class JIRAIntegration(integration_base.BaseIntegration):
def __init__(self, tenant_id, user_id):
self.__tenant_id = tenant_id
# TODO: enable super-constructor when OAuth is done
@ -41,7 +41,6 @@ class JIRAIntegration(base.BaseIntegration):
except Exception as e:
self._issue_handler = None
self.integration["valid"] = False
return {"errors": ["Something went wrong, please check your JIRA credentials."]}
return self._issue_handler
# TODO: remove this once jira-oauth is done
@ -50,8 +49,8 @@ class JIRAIntegration(base.BaseIntegration):
cur.execute(
cur.mogrify(
"""SELECT username, token, url
FROM public.jira_cloud
WHERE user_id = %(user_id)s;""",
FROM public.jira_cloud
WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id})
)
data = helper.dict_to_camel_case(cur.fetchone())
@ -95,9 +94,10 @@ class JIRAIntegration(base.BaseIntegration):
def add(self, username, token, url, obfuscate=False):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(""" \
INSERT INTO public.jira_cloud(username, token, user_id, url)
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
cur.mogrify("""\
INSERT INTO public.jira_cloud(username, token, user_id,url)
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
RETURNING username, token, url;""",
{"user_id": self._user_id, "username": username,
"token": token, "url": url})
)
@ -111,10 +111,9 @@ class JIRAIntegration(base.BaseIntegration):
def delete(self):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(""" \
DELETE
FROM public.jira_cloud
WHERE user_id = %(user_id)s;""",
cur.mogrify("""\
DELETE FROM public.jira_cloud
WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id})
)
return {"state": "success"}
@ -125,7 +124,7 @@ class JIRAIntegration(base.BaseIntegration):
changes={
"username": data.username,
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
else self.integration["token"],
else self.integration.token,
"url": str(data.url)
},
obfuscate=True

View file

@ -1,5 +1,5 @@
from chalicelib.utils import jira_client
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
@ -9,8 +9,8 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
self._client = jira_client.JiraManager(self.url, self.username, token, None)
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
def create_new_assignment(self, project_id, title, description, assignee, issue_type):
self._client.set_jira_project_id(project_id)
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
self._client.set_jira_project_id(integration_project_id)
data = {
'summary': title,
'description': description,
@ -28,26 +28,26 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
projects_map[i["integrationProjectId"]].append(i["id"])
results = []
for project_id in projects_map:
self._client.set_jira_project_id(project_id)
for integration_project_id in projects_map:
self._client.set_jira_project_id(integration_project_id)
jql = 'labels = OpenReplay'
if len(projects_map[project_id]) > 0:
jql += f" AND ID IN ({','.join(projects_map[project_id])})"
if len(projects_map[integration_project_id]) > 0:
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
issues = self._client.get_issues(jql, offset=0)
results += issues
return {"issues": results}
def get(self, project_id, assignment_id):
self._client.set_jira_project_id(project_id)
def get(self, integration_project_id, assignment_id):
self._client.set_jira_project_id(integration_project_id)
return self._client.get_issue_v3(assignment_id)
def comment(self, project_id, assignment_id, comment):
self._client.set_jira_project_id(project_id)
def comment(self, integration_project_id, assignment_id, comment):
self._client.set_jira_project_id(integration_project_id)
return self._client.add_comment_v3(assignment_id, comment)
def get_metas(self, project_id):
def get_metas(self, integration_project_id):
meta = {}
self._client.set_jira_project_id(project_id)
self._client.set_jira_project_id(integration_project_id)
meta['issueTypes'] = self._client.get_issue_types()
meta['users'] = self._client.get_assignable_users()
return {"provider": self.provider.lower(), **meta}

View file

@ -1,5 +1,4 @@
import schemas
from chalicelib.core.modules import TENANT_CONDITION
from chalicelib.utils import pg_client
@ -52,13 +51,10 @@ def get_global_integrations_status(tenant_id, user_id, project_id):
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
EXISTS((SELECT 1
FROM public.webhooks
WHERE type='slack' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.SLACK.value},
WHERE type='slack' AND deleted_at ISNULL)) AS {schemas.IntegrationType.SLACK.value},
EXISTS((SELECT 1
FROM public.webhooks
WHERE type='msteams' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.MS_TEAMS.value},
EXISTS((SELECT 1
FROM public.integrations
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
WHERE type='msteams' AND deleted_at ISNULL)) AS {schemas.IntegrationType.MS_TEAMS.value};""",
{"user_id": user_id, "tenant_id": tenant_id, "project_id": project_id})
)
current_integrations = cur.fetchone()

View file

@ -1,7 +1,7 @@
from chalicelib.core.issue_tracking import github, jira_cloud
from chalicelib.core import integration_github, integration_jira_cloud
from chalicelib.utils import pg_client
SUPPORTED_TOOLS = [github.PROVIDER, jira_cloud.PROVIDER]
SUPPORTED_TOOLS = [integration_github.PROVIDER, integration_jira_cloud.PROVIDER]
def get_available_integrations(user_id):
@ -23,7 +23,7 @@ def get_available_integrations(user_id):
def __get_default_integration(user_id):
current_integrations = get_available_integrations(user_id)
return github.PROVIDER if current_integrations["github"] else jira_cloud.PROVIDER if \
return integration_github.PROVIDER if current_integrations["github"] else integration_jira_cloud.PROVIDER if \
current_integrations["jira"] else None
@ -35,11 +35,11 @@ def get_integration(tenant_id, user_id, tool=None, for_delete=False):
tool = tool.upper()
if tool not in SUPPORTED_TOOLS:
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
if tool == jira_cloud.PROVIDER:
integration = jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
if tool == integration_jira_cloud.PROVIDER:
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
return None, integration
elif tool == github.PROVIDER:
return None, github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
elif tool == integration_github.PROVIDER:
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
return {"errors": ["lost integration"]}, None

View file

@ -1,5 +1,32 @@
from chalicelib.utils import pg_client, helper
ISSUE_TYPES = ['click_rage', 'dead_click', 'excessive_scrolling', 'bad_request', 'missing_resource', 'memory', 'cpu',
'slow_resource', 'slow_page_load', 'crash', 'ml_cpu', 'ml_memory', 'ml_dead_click', 'ml_click_rage',
'ml_mouse_thrashing', 'ml_excessive_scrolling', 'ml_slow_resources', 'custom', 'js_exception',
'custom_event_error', 'js_error']
ORDER_QUERY = """\
(CASE WHEN type = 'js_exception' THEN 0
WHEN type = 'bad_request' THEN 1
WHEN type = 'missing_resource' THEN 2
WHEN type = 'click_rage' THEN 3
WHEN type = 'dead_click' THEN 4
WHEN type = 'memory' THEN 5
WHEN type = 'cpu' THEN 6
WHEN type = 'crash' THEN 7
ELSE -1 END)::INTEGER
"""
NAME_QUERY = """\
(CASE WHEN type = 'js_exception' THEN 'Errors'
WHEN type = 'bad_request' THEN 'Bad Requests'
WHEN type = 'missing_resource' THEN 'Missing Images'
WHEN type = 'click_rage' THEN 'Click Rage'
WHEN type = 'dead_click' THEN 'Dead Clicks'
WHEN type = 'memory' THEN 'High Memory'
WHEN type = 'cpu' THEN 'High CPU'
WHEN type = 'crash' THEN 'Crashes'
ELSE type::text END)::text
"""
def get(project_id, issue_id):
with pg_client.PostgresClient() as cur:
@ -35,6 +62,20 @@ def get_by_session_id(session_id, project_id, issue_type=None):
return helper.list_to_camel_case(cur.fetchall())
def get_types_by_project(project_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""SELECT type,
{ORDER_QUERY}>=0 AS visible,
{ORDER_QUERY} AS order,
{NAME_QUERY} AS name
FROM (SELECT DISTINCT type
FROM public.issues
WHERE project_id = %(project_id)s) AS types
ORDER BY "order";""", {"project_id": project_id}))
return helper.list_to_camel_case(cur.fetchall())
def get_all_types():
return [
{

View file

@ -1,10 +1,6 @@
import logging
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
from chalicelib.core import sessions_mobs, sessions_devtool
class Actions:
@ -154,23 +150,23 @@ def get_scheduled_jobs():
def execute_jobs():
jobs = get_scheduled_jobs()
for job in jobs:
logger.info(f"Executing jobId:{job['jobId']}")
print(f"Executing jobId:{job['jobId']}")
try:
if job["action"] == Actions.DELETE_USER_DATA:
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
user_ids=[job["referenceId"]])
if len(session_ids) > 0:
logger.info(f"Deleting {len(session_ids)} sessions")
print(f"Deleting {len(session_ids)} sessions")
__delete_sessions_by_session_ids(session_ids=session_ids)
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
else:
raise Exception(f"The action '{job['action']}' not supported.")
job["status"] = JobStatus.COMPLETED
logger.info(f"Job completed {job['jobId']}")
print(f"Job completed {job['jobId']}")
except Exception as e:
job["status"] = JobStatus.FAILED
job["errors"] = str(e)
logger.error(f"Job failed {job['jobId']}")
print(f"Job failed {job['jobId']}")
update(job["jobId"], job)

View file

@ -1,5 +1,6 @@
from chalicelib.core import log_tools
import requests
from chalicelib.core.log_tools import log_tools
from schemas import schemas
IN_TY = "bugsnag"

View file

@ -1,5 +1,5 @@
import boto3
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "cloudwatch"
@ -19,6 +19,16 @@ def __find_groups(client, token):
return response["logGroups"] + __find_groups(client, response["nextToken"])
def __make_stream_filter(start_time, end_time):
def __valid_stream(stream):
return "firstEventTimestamp" in stream and not (
stream['firstEventTimestamp'] <= start_time and stream["lastEventTimestamp"] <= start_time
or stream['firstEventTimestamp'] >= end_time and stream["lastEventTimestamp"] >= end_time
)
return __valid_stream
def __find_streams(project_id, log_group, client, token, stream_filter):
d_args = {"logGroupName": log_group, "orderBy": 'LastEventTime', 'limit': 50}
if token is not None and len(token) > 0:

View file

@ -1,4 +1,4 @@
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "datadog"

View file

@ -1,7 +1,8 @@
import logging
from chalicelib.core.log_tools import log_tools
from elasticsearch import Elasticsearch
from chalicelib.core import log_tools
from schemas import schemas
logger = logging.getLogger(__name__)

View file

@ -1,4 +1,4 @@
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "newrelic"

View file

@ -1,4 +1,4 @@
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "rollbar"

View file

@ -1,5 +1,5 @@
import requests
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "sentry"

View file

@ -1,4 +1,4 @@
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "stackdriver"

View file

@ -1,4 +1,4 @@
from chalicelib.core.log_tools import log_tools
from chalicelib.core import log_tools
from schemas import schemas
IN_TY = "sumologic"

View file

@ -1,7 +1,5 @@
import json
from chalicelib.core.modules import TENANT_CONDITION
from chalicelib.utils import pg_client, helper
import json
EXCEPT = ["jira_server", "jira_cloud"]
@ -62,6 +60,20 @@ def get(project_id, integration):
return helper.dict_to_camel_case(helper.flatten_nested_dicts(r))
def get_all_by_type(integration):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
"""\
SELECT integrations.*
FROM public.integrations INNER JOIN public.projects USING(project_id)
WHERE provider = %(provider)s AND projects.deleted_at ISNULL;""",
{"provider": integration})
)
r = cur.fetchall()
return helper.list_to_camel_case(r, flatten=True)
def edit(project_id, integration, changes):
if "projectId" in changes:
changes.pop("project_id")
@ -96,11 +108,11 @@ def get_all_by_tenant(tenant_id, integration):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(
f"""SELECT integrations.*
"""SELECT integrations.*
FROM public.integrations INNER JOIN public.projects USING(project_id)
WHERE provider = %(provider)s AND {TENANT_CONDITION}
WHERE provider = %(provider)s
AND projects.deleted_at ISNULL;""",
{"tenant_id": tenant_id, "provider": integration})
{"provider": integration})
)
r = cur.fetchall()
return helper.list_to_camel_case(r, flatten=True)

View file

@ -98,23 +98,17 @@ def __edit(project_id, col_index, colname, new_name):
if col_index not in list(old_metas.keys()):
return {"errors": ["custom field not found"]}
if old_metas[col_index]["key"] != new_name:
with pg_client.PostgresClient() as cur:
with pg_client.PostgresClient() as cur:
if old_metas[col_index]["key"] != new_name:
query = cur.mogrify(f"""UPDATE public.projects
SET {colname} = %(value)s
WHERE project_id = %(project_id)s
AND deleted_at ISNULL
RETURNING {colname},
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
RETURNING {colname};""",
{"project_id": project_id, "value": new_name})
cur.execute(query=query)
row = cur.fetchone()
new_name = row[colname]
old_name = row['old_' + colname]
new_name = cur.fetchone()[colname]
old_metas[col_index]["key"] = new_name
projects.rename_metadata_condition(project_id=project_id,
old_metadata_key=old_name,
new_metadata_key=new_name)
return {"data": old_metas[col_index]}
@ -127,8 +121,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
def delete(tenant_id, project_id, index: int):
index = int(index)
old_segments = get(project_id)
old_indexes = [k["index"] for k in old_segments]
if index not in old_indexes:
old_segments = [k["index"] for k in old_segments]
if index not in old_segments:
return {"errors": ["custom field not found"]}
with pg_client.PostgresClient() as cur:
@ -138,8 +132,7 @@ def delete(tenant_id, project_id, index: int):
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
{"project_id": project_id})
cur.execute(query=query)
projects.delete_metadata_condition(project_id=project_id,
metadata_key=old_segments[old_indexes.index(index)]["key"])
return {"data": get(project_id)}
@ -194,6 +187,33 @@ def search(tenant_id, project_id, key, value):
return {"data": [k[key] for k in value]}
def get_available_keys(project_id):
all_metas = get(project_id=project_id)
return [k["key"] for k in all_metas]
def get_by_session_id(project_id, session_id):
all_metas = get(project_id=project_id)
if len(all_metas) == 0:
return []
keys = {index_to_colname(k["index"]): k["key"] for k in all_metas}
with pg_client.PostgresClient() as cur:
query = cur.mogrify(f"""SELECT {",".join(keys.keys())}
FROM public.sessions
WHERE project_id= %(project_id)s
AND session_id=%(session_id)s;""",
{"session_id": session_id, "project_id": project_id})
cur.execute(query=query)
session_metas = cur.fetchall()
results = []
for m in session_metas:
r = {}
for k in m.keys():
r[keys[k]] = m[k]
results.append(r)
return results
def get_keys_by_projects(project_ids):
if project_ids is None or len(project_ids) == 0:
return {}

File diff suppressed because it is too large Load diff

View file

@ -1,10 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental metrics")
else:
pass

View file

@ -1,11 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental heatmaps")
from .heatmaps_ch import *
else:
from .heatmaps import *

View file

@ -1,385 +0,0 @@
import logging
from decouple import config
import schemas
from chalicelib.core import events
from chalicelib.core.metrics.modules import sessions, sessions_mobs
from chalicelib.utils import sql_helper as sh
from chalicelib.utils import pg_client, helper, ch_client, exp_ch_helper
logger = logging.getLogger(__name__)
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
if data.url is None or data.url == "":
return []
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
"project_id": project_id, "url": data.url}
constraints = [
"main_events.project_id = toUInt16(%(project_id)s)",
"main_events.created_at >= toDateTime(%(startDate)s / 1000)",
"main_events.created_at <= toDateTime(%(endDate)s / 1000)",
"main_events.`$event_name` = 'CLICK'",
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
else:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
query_from = f"{exp_ch_helper.get_main_events_table(data.startTimestamp)} AS main_events"
# TODO: is this used ?
# has_click_rage_filter = False
# if len(data.filters) > 0:
# for i, f in enumerate(data.filters):
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
# has_click_rage_filter = True
# query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
# INNER JOIN issues AS mis USING (issue_id)
# INNER JOIN LATERAL (
# SELECT COUNT(1) AS real_count
# FROM events.clicks AS sc
# INNER JOIN sessions as ss USING (session_id)
# WHERE ss.project_id = 2
# AND (sc.url = %(url)s OR sc.path = %(url)s)
# AND sc.timestamp >= %(startDate)s
# AND sc.timestamp <= %(endDate)s
# AND ss.start_ts >= %(startDate)s
# AND ss.start_ts <= %(endDate)s
# AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
# constraints += ["mis.project_id = %(project_id)s",
# "issues.timestamp >= %(startDate)s",
# "issues.timestamp <= %(endDate)s"]
# f_k = f"issue_value{i}"
# args = {**args, **sh.multi_values(f.value, value_key=f_k)}
# constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
# f.value, value_key=f_k))
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
# f.value, value_key=f_k))
# TODO: change this once click-rage is fixed
# if data.click_rage and not has_click_rage_filter:
# constraints.append("""(issues_t.session_id IS NULL
# OR (issues_t.datetime >= toDateTime(%(startDate)s/1000)
# AND issues_t.datetime <= toDateTime(%(endDate)s/1000)
# AND issues_t.project_id = toUInt16(%(project_id)s)
# AND issues_t.event_type = 'ISSUE'
# AND issues_t.project_id = toUInt16(%(project_id)s)
# AND mis.project_id = toUInt16(%(project_id)s)
# AND mis.type='click_rage'))""")
# query_from += """ LEFT JOIN experimental.events AS issues_t ON (main_events.session_id=issues_t.session_id)
# LEFT JOIN experimental.issues AS mis ON (issues_t.issue_id=mis.issue_id)"""
with ch_client.ClickHouseClient() as cur:
query = cur.format(query=f"""SELECT
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_x') AS normalized_x,
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_y') AS normalized_y
FROM {query_from}
WHERE {" AND ".join(constraints)}
LIMIT 500;""",
parameters=args)
logger.debug("---------")
logger.debug(query)
logger.debug("---------")
try:
rows = cur.execute(query=query)
except Exception as err:
logger.warning("--------- HEATMAP 2 SEARCH QUERY EXCEPTION CH -----------")
logger.warning(query)
logger.warning("--------- PAYLOAD -----------")
logger.warning(data)
logger.warning("--------------------")
raise err
return helper.list_to_camel_case(rows)
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
constraints = [
"main_events.project_id = toUInt16(%(project_id)s)",
"main_events.session_id = %(session_id)s",
"main_events.`$event_name`='CLICK'",
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
else:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
with ch_client.ClickHouseClient() as cur:
query = cur.format(query=f"""SELECT main_events.normalized_x AS normalized_x,
main_events.normalized_y AS normalized_y
FROM {query_from}
WHERE {" AND ".join(constraints)};""",
parameters=args)
logger.debug("---------")
logger.debug(query)
logger.debug("---------")
try:
rows = cur.execute(query=query)
except Exception as err:
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
logger.warning(query)
logger.warning("--------- PAYLOAD -----------")
logger.warning(data)
logger.warning("--------------------")
raise err
return helper.list_to_camel_case(rows)
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
constraints = ["main_events.project_id = toUInt16(%(project_id)s)",
"main_events.session_id = %(session_id)s",
"main_events.`$event_name`='CLICK'"]
if data.operator == schemas.SearchEventOperator.IS:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
else:
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
args["url"] = helper.values_for_operator(data.url, data.operator)
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
with ch_client.ClickHouseClient() as cur:
query = cur.format(query=f"""SELECT CAST(`$properties`.selector AS String) AS selector,
COUNT(1) AS count
FROM {query_from}
WHERE {" AND ".join(constraints)}
GROUP BY 1
ORDER BY count DESC;""",
parameters=args)
logger.debug("---------")
logger.debug(query)
logger.debug("---------")
try:
rows = cur.execute(query=query)
except Exception as err:
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
logger.warning(query)
logger.warning("--------- PAYLOAD -----------")
logger.warning(data)
logger.warning("--------------------")
raise err
return helper.list_to_camel_case(rows)
# use CH
SESSION_PROJECTION_COLS = """s.project_id,
s.session_id AS session_id,
toUnixTimestamp(s.datetime)*1000 AS start_ts,
s.duration AS duration"""
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
start_time: int,
end_time: int) -> str | None:
full_args = {
"sessionId": session_id,
"projectId": project_id,
"start_time": start_time,
"end_time": end_time,
}
sub_condition = ["session_id = %(sessionId)s", "`$event_name` = 'CLICK'", "project_id = %(projectId)s"]
if location_condition and len(location_condition.value) > 0:
f_k = "LOC"
op = sh.get_sql_operator(location_condition.operator)
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
sub_condition.append(
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
value_key=f_k))
with ch_client.ClickHouseClient() as cur:
main_query = cur.format(query=f"""WITH paths AS (
SELECT DISTINCT
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url_path
FROM product_analytics.events
WHERE {" AND ".join(sub_condition)}
)
SELECT
paths.url_path,
COUNT(*) AS count
FROM product_analytics.events
INNER JOIN paths
ON JSON_VALUE(CAST(product_analytics.events.$properties AS String), '$.url_path') = paths.url_path
WHERE `$event_name` = 'CLICK'
AND project_id = %(projectId)s
AND created_at >= toDateTime(%(start_time)s / 1000)
AND created_at <= toDateTime(%(end_time)s / 1000)
GROUP BY paths.url_path
ORDER BY count DESC
LIMIT 1;""",
parameters=full_args)
logger.debug("--------------------")
logger.debug(main_query)
logger.debug("--------------------")
try:
url = cur.execute(query=main_query)
except Exception as err:
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION CH-----------")
logger.warning(main_query.decode('UTF-8'))
logger.warning("--------- PAYLOAD -----------")
logger.warning(full_args)
logger.warning("--------------------")
raise err
if url is None or len(url) == 0:
return None
return url[0]["url_path"]
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
include_mobs: bool = True, exclude_sessions: list[str] = [],
_depth: int = 3):
no_platform = True
location_condition = None
no_click = True
for f in data.filters:
if f.type == schemas.FilterType.PLATFORM:
no_platform = False
break
for f in data.events:
if f.type == schemas.EventType.LOCATION:
if len(f.value) == 0:
f.operator = schemas.SearchEventOperator.IS_ANY
location_condition = f.model_copy()
elif f.type == schemas.EventType.CLICK:
no_click = False
if len(f.value) == 0:
f.operator = schemas.SearchEventOperator.IS_ANY
if location_condition and not no_click:
break
if no_platform:
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
value=[schemas.PlatformType.DESKTOP],
operator=schemas.SearchEventOperator.IS))
if not location_condition:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
value=[],
operator=schemas.SearchEventOperator.IS_ANY))
if no_click:
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
value=[],
operator=schemas.SearchEventOperator.IS_ANY))
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
value=[0],
operator=schemas.MathOperator.GREATER))
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
favorite_only=data.bookmarked, issue=None,
project_id=project_id, user_id=user_id)
full_args["exclude_sessions"] = tuple(exclude_sessions)
if len(exclude_sessions) > 0:
query_part += "\n AND session_id NOT IN (%(exclude_sessions)s)"
with ch_client.ClickHouseClient() as cur:
data.order = schemas.SortOrderType.DESC
data.sort = 'duration'
main_query = cur.format(query=f"""SELECT *
FROM (SELECT {SESSION_PROJECTION_COLS}
{query_part}
-- ORDER BY {data.sort} {data.order.value}
LIMIT 20) AS raw
ORDER BY rand()
LIMIT 1;""",
parameters=full_args)
logger.debug("--------------------")
logger.debug(main_query)
logger.debug("--------------------")
try:
session = cur.execute(query=main_query)
except Exception as err:
logger.warning("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION CH -----------")
logger.warning(main_query)
logger.warning("--------- PAYLOAD -----------")
logger.warning(data.model_dump_json())
logger.warning("--------------------")
raise err
if len(session) > 0:
session = session[0]
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
location_condition=location_condition,
start_time=data.startTimestamp, end_time=data.endTimestamp)
else:
session["path"] = location_condition.value[0]
if include_mobs:
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
if _depth > 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
return search_short_session(data=data, project_id=project_id, user_id=user_id,
include_mobs=include_mobs,
exclude_sessions=exclude_sessions + [session["session_id"]],
_depth=_depth - 1)
elif _depth == 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
logger.info("couldn't find an existing replay after 3 iterations for heatmap")
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
event_type=schemas.EventType.LOCATION)
else:
return None
return helper.dict_to_camel_case(session)
def get_selected_session(project_id, session_id):
with ch_client.ClickHouseClient() as cur:
main_query = cur.format(query=f"""SELECT {SESSION_PROJECTION_COLS}
FROM experimental.sessions AS s
WHERE session_id=%(session_id)s;""",
parameters={"session_id": session_id})
logger.debug("--------------------")
logger.debug(main_query)
logger.debug("--------------------")
try:
session = cur.execute(query=main_query)
except Exception as err:
logger.warning("--------- CLICK MAP GET SELECTED SESSION QUERY EXCEPTION -----------")
logger.warning(main_query.decode('UTF-8'))
raise err
if len(session) > 0:
session = session[0]
else:
session = None
if session:
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
if len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
session["_issue"] = "mob file not found"
logger.info("can't find selected mob file for heatmap")
session['events'] = get_page_events(session_id=session["session_id"], project_id=project_id)
return helper.dict_to_camel_case(session)
def get_page_events(session_id, project_id):
with ch_client.ClickHouseClient() as cur:
query = cur.format(query=f"""SELECT
event_id as message_id,
toUnixTimestamp(created_at)*1000 AS timestamp,
JSON_VALUE(CAST(`$properties` AS String), '$.url_host') AS host,
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS path,
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS value,
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url,
'LOCATION' AS type
FROM product_analytics.events
WHERE session_id = %(session_id)s
AND `$event_name`='LOCATION'
AND project_id= %(project_id)s
ORDER BY created_at,message_id;""",
parameters={"session_id": session_id, "project_id": project_id})
rows = cur.execute(query=query)
rows = helper.list_to_camel_case(rows)
return rows

View file

@ -1,12 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
import chalicelib.core.sessions.sessions_ch as sessions
else:
import chalicelib.core.sessions.sessions_pg as sessions
from chalicelib.core.sessions import sessions_mobs

View file

@ -1,10 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
from .significance import *
if config("EXP_METRICS", cast=bool, default=False):
from .significance_ch import *

View file

@ -1,276 +0,0 @@
import logging
from typing import List
from psycopg2.extras import RealDictRow
import schemas
from chalicelib.utils import ch_client
from chalicelib.utils import exp_ch_helper
from chalicelib.utils import helper
from chalicelib.utils import sql_helper as sh
from chalicelib.core import events
logger = logging.getLogger(__name__)
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
platform = project.platform
constraints = ["e.project_id = %(project_id)s",
"e.created_at >= toDateTime(%(startTimestamp)s/1000)",
"e.created_at <= toDateTime(%(endTimestamp)s/1000)",
"e.`$event_name` IN %(eventTypes)s"]
full_args = {"project_id": project.project_id, "startTimestamp": filter_d.startTimestamp,
"endTimestamp": filter_d.endTimestamp}
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(timestamp=filter_d.startTimestamp,
platform=platform)
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(filter_d.startTimestamp)
full_args["MAIN_EVENTS_TABLE"] = MAIN_EVENTS_TABLE
full_args["MAIN_SESSIONS_TABLE"] = MAIN_SESSIONS_TABLE
n_stages_query = []
n_stages_query_not = []
event_types = []
values = {}
has_filters = False
if len(filters) > 0:
meta_keys = None
for i, f in enumerate(filters):
if len(f.value) == 0:
continue
has_filters = True
f.value = helper.values_for_operator(value=f.value, op=f.operator)
op = sh.get_sql_operator(f.operator)
filter_type = f.type
f_k = f"f_value{i}"
values = {**values,
**sh.multi_values(f.value, value_key=f_k)}
is_not = False
if sh.is_negation_operator(f.operator):
is_not = True
if filter_type == schemas.FilterType.USER_BROWSER:
constraints.append(
sh.multi_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
constraints.append(
sh.multi_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
constraints.append(
sh.multi_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
constraints.append(
sh.multi_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type == schemas.FilterType.DURATION:
if len(f.value) > 0 and f.value[0] is not None:
constraints.append(f's.duration >= %(minDuration)s')
values["minDuration"] = f.value[0]
if len(f["value"]) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
constraints.append('s.duration <= %(maxDuration)s')
values["maxDuration"] = f.value[1]
elif filter_type == schemas.FilterType.REFERRER:
constraints.append(
sh.multi_conditions(f"s.base_referrer {op} %({f_k})s", f.value, is_not=is_not, value_key=f_k))
elif filter_type == events.EventType.METADATA.ui_type:
if meta_keys is None:
meta_keys = metadata.get(project_id=project.project_id)
meta_keys = {m["key"]: m["index"] for m in meta_keys}
if f.source in meta_keys.keys():
constraints.append(
sh.multi_conditions(
f's.{metadata.index_to_colname(meta_keys[f.source])} {op} %({f_k})s', f.value,
is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
constraints.append(
sh.multi_conditions(f's.user_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
constraints.append(
sh.multi_conditions(f's.user_anonymous_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
constraints.append(
sh.multi_conditions(f's.rev_id {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
i = -1
for s in stages:
if s.operator is None:
s.operator = schemas.SearchEventOperator.IS
if not isinstance(s.value, list):
s.value = [s.value]
is_any = sh.isAny_opreator(s.operator)
if not is_any and isinstance(s.value, list) and len(s.value) == 0:
continue
i += 1
op = sh.get_sql_operator(s.operator)
is_not = False
if sh.is_negation_operator(s.operator):
is_not = True
op = sh.reverse_sql_operator(op)
specific_condition = None
e_k = f"e_value{i}"
event_type = s.type
next_event_type = exp_ch_helper.get_event_type(event_type, platform=platform)
if event_type == events.EventType.CLICK.ui_type:
if platform == "web":
next_col_name = events.EventType.CLICK.column
if not is_any:
if schemas.ClickEventExtraOperator.has_value(s.operator):
specific_condition = sh.multi_conditions(f"selector {op} %({e_k})s", s.value, value_key=e_k)
else:
next_col_name = events.EventType.CLICK_MOBILE.column
elif event_type == events.EventType.INPUT.ui_type:
next_col_name = events.EventType.INPUT.column
elif event_type == events.EventType.LOCATION.ui_type:
next_col_name = 'url_path'
elif event_type == events.EventType.CUSTOM.ui_type:
next_col_name = events.EventType.CUSTOM.column
# IOS --------------
elif event_type == events.EventType.CLICK_MOBILE.ui_type:
next_col_name = events.EventType.CLICK_MOBILE.column
elif event_type == events.EventType.INPUT_MOBILE.ui_type:
next_col_name = events.EventType.INPUT_MOBILE.column
elif event_type == events.EventType.VIEW_MOBILE.ui_type:
next_col_name = events.EventType.VIEW_MOBILE.column
elif event_type == events.EventType.CUSTOM_MOBILE.ui_type:
next_col_name = events.EventType.CUSTOM_MOBILE.column
else:
logger.warning(f"=================UNDEFINED:{event_type}")
continue
values = {**values, **sh.multi_values(helper.values_for_operator(value=s.value, op=s.operator), value_key=e_k)}
if next_event_type not in event_types:
event_types.append(next_event_type)
full_args[f"event_type_{i}"] = next_event_type
n_stages_query.append(f"`$event_name`=%(event_type_{i})s")
if is_not:
n_stages_query_not.append(n_stages_query[-1] + " AND " +
(sh.multi_conditions(
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
s.value,
is_not=is_not,
value_key=e_k
) if not specific_condition else specific_condition))
elif not is_any:
n_stages_query[-1] += " AND " + (
sh.multi_conditions(
f"JSON_VALUE(CAST(`$properties` AS String), '$.{next_col_name}') {op} %({e_k})s",
s.value,
is_not=is_not,
value_key=e_k
) if not specific_condition else specific_condition)
full_args = {"eventTypes": event_types, **full_args, **values}
n_stages = len(n_stages_query)
if n_stages == 0:
return []
extra_from = ""
if has_filters or metric_format == schemas.MetricExtendedFormatType.USER_COUNT:
extra_from = f"INNER JOIN {MAIN_SESSIONS_TABLE} AS s ON (e.session_id=s.session_id)"
constraints += ["s.project_id = %(project_id)s",
"s.datetime >= toDateTime(%(startTimestamp)s/1000)",
"s.datetime <= toDateTime(%(endTimestamp)s/1000)"]
if metric_format == schemas.MetricExtendedFormatType.SESSION_COUNT:
group_by = 'e.session_id'
else:
constraints.append("isNotNull(s.user_id)")
group_by = 's.user_id'
if len(n_stages_query_not) > 0:
value_conditions_not_base = ["project_id = %(project_id)s",
"created_at >= toDateTime(%(startTimestamp)s/1000)",
"created_at <= toDateTime(%(endTimestamp)s/1000)"]
_value_conditions_not = []
value_conditions_not = []
for c in n_stages_query_not:
_p = c % full_args
if _p not in _value_conditions_not:
_value_conditions_not.append(_p)
value_conditions_not.append(c)
extra_from += f"""LEFT ANTI JOIN (SELECT DISTINCT sub.session_id
FROM {MAIN_EVENTS_TABLE} AS sub
WHERE {' AND '.join(value_conditions_not_base)}
AND ({' OR '.join([c for c in value_conditions_not])})
) AS sub ON(e.session_id=sub.session_id)"""
del _value_conditions_not
del value_conditions_not
del value_conditions_not_base
sequences = []
projections = []
for i, s in enumerate(n_stages_query):
projections.append(f"coalesce(SUM(T{i + 1}),0) AS stage{i + 1}")
if i == 0:
sequences.append(f"anyIf(1,{s}) AS T1")
else:
pattern = ""
conditions = []
j = 0
while j <= i:
pattern += f"(?{j + 1})"
conditions.append(n_stages_query[j])
j += 1
sequences.append(f"sequenceMatch('{pattern}')(toDateTime(e.created_at), {','.join(conditions)}) AS T{i + 1}")
n_stages_query = f"""
SELECT {",".join(projections)}
FROM (SELECT {",".join(sequences)}
FROM {MAIN_EVENTS_TABLE} AS e {extra_from}
WHERE {" AND ".join(constraints)}
GROUP BY {group_by}) AS raw;"""
with ch_client.ClickHouseClient() as cur:
query = cur.format(query=n_stages_query, parameters=full_args)
logger.debug("---------------------------------------------------")
logger.debug(query)
logger.debug("---------------------------------------------------")
try:
row = cur.execute(query=query)
except Exception as err:
logger.warning("--------- SIMPLE FUNNEL SEARCH QUERY EXCEPTION CH-----------")
logger.warning(query)
logger.warning("--------- PAYLOAD -----------")
logger.warning(filter_d.model_dump_json())
logger.warning("--------------------")
raise err
stages_list = []
row = row[0]
for i, stage in enumerate(stages):
count = row[f"stage{i + 1}"]
drop = None
if i != 0:
base_count = row[f"stage{i}"]
if base_count == 0:
drop = 0
elif base_count > 0:
drop = int(100 * (base_count - count) / base_count)
stages_list.append(
{"value": stage.value,
"type": stage.type,
"operator": stage.operator,
"dropPct": drop,
"count": count
}
)
return stages_list

View file

@ -1,10 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental product-analytics")
from .product_analytics_ch import *
else:
from .product_analytics import *

View file

@ -1,14 +0,0 @@
from chalicelib.utils.ch_client import ClickHouseClient
def search_events(project_id: int, data: dict):
with ClickHouseClient() as ch_client:
r = ch_client.format(
"""SELECT *
FROM taha.events
WHERE project_id=%(project_id)s
ORDER BY created_at;""",
params={"project_id": project_id})
x = ch_client.execute(r)
return x

View file

@ -1,6 +0,0 @@
TENANT_CONDITION = "TRUE"
MOB_KEY = ""
def get_file_key(project_id, session_id):
return {}

View file

@ -1,86 +1,56 @@
import logging
from time import time
from typing import List
import schemas
from chalicelib.core import metadata
from chalicelib.utils import helper
from chalicelib.core.metrics import __get_constraints, __get_constraint_values
from chalicelib.utils import helper, dev
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils import sql_helper as sh
from time import time
import logging
logger = logging.getLogger(__name__)
def __transform_journey(rows, reverse_path=False):
total_100p = 0
number_of_step1 = 0
for r in rows:
if r["event_number_in_session"] > 1:
break
number_of_step1 += 1
total_100p += r["sessions_count"]
# for i in range(number_of_step1):
# rows[i]["value"] = 100 / number_of_step1
# for i in range(number_of_step1, len(rows)):
for i in range(len(rows)):
rows[i]["value"] = rows[i]["sessions_count"] * 100 / total_100p
nodes = []
nodes_values = []
links = []
drops = []
max_depth = 0
for r in rows:
r["value"] = r["sessions_count"] * 100 / total_100p
source = f"{r['event_number_in_session'] - 1}_{r['event_type']}_{r['e_value']}"
source = f"{r['event_number_in_session']}_{r['event_type']}_{r['e_value']}"
if source not in nodes:
nodes.append(source)
nodes_values.append({"depth": r['event_number_in_session'] - 1,
"name": r['e_value'],
"eventType": r['event_type'],
"id": len(nodes_values)})
target = f"{r['event_number_in_session']}_{r['next_type']}_{r['next_value']}"
if target not in nodes:
nodes.append(target)
nodes_values.append({"depth": r['event_number_in_session'],
"name": r['next_value'],
"eventType": r['next_type'],
"id": len(nodes_values)})
sr_idx = nodes.index(source)
tg_idx = nodes.index(target)
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"], "value": r["value"]}
if not reverse_path:
link["source"] = sr_idx
link["target"] = tg_idx
else:
link["source"] = tg_idx
link["target"] = sr_idx
links.append(link)
max_depth = r['event_number_in_session']
if r["next_type"] == "DROP":
for d in drops:
if d["depth"] == r['event_number_in_session']:
d["sessions_count"] += r["sessions_count"]
break
else:
drops.append({"depth": r['event_number_in_session'], "sessions_count": r["sessions_count"]})
for i in range(len(drops)):
if drops[i]["depth"] < max_depth:
source = f"{drops[i]['depth']}_DROP_None"
target = f"{drops[i]['depth'] + 1}_DROP_None"
sr_idx = nodes.index(source)
if i < len(drops) - 1 and drops[i]["depth"] + 1 == drops[i + 1]["depth"]:
tg_idx = nodes.index(target)
else:
nodes_values.append({"name": r['e_value'], "eventType": r['event_type'],
"avgTimeFromPrevious": 0, "sessionsCount": 0})
if r['next_value']:
target = f"{r['event_number_in_session'] + 1}_{r['next_type']}_{r['next_value']}"
if target not in nodes:
nodes.append(target)
nodes_values.append({"depth": drops[i]["depth"] + 1,
"name": None,
"eventType": "DROP",
"id": len(nodes_values)})
tg_idx = len(nodes) - 1
nodes_values.append({"name": r['next_value'], "eventType": r['next_type'],
"avgTimeFromPrevious": 0, "sessionsCount": 0})
link = {"eventType": "DROP",
"sessionsCount": drops[i]["sessions_count"],
"value": drops[i]["sessions_count"] * 100 / total_100p}
sr_idx = nodes.index(source)
tg_idx = nodes.index(target)
if r["avg_time_from_previous"] is not None:
nodes_values[tg_idx]["avgTimeFromPrevious"] += r["avg_time_from_previous"] * r["sessions_count"]
nodes_values[tg_idx]["sessionsCount"] += r["sessions_count"]
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"],
"value": r["value"], "avgTimeFromPrevious": r["avg_time_from_previous"]}
if not reverse_path:
link["source"] = sr_idx
link["target"] = tg_idx
@ -88,10 +58,13 @@ def __transform_journey(rows, reverse_path=False):
link["source"] = tg_idx
link["target"] = sr_idx
links.append(link)
for n in nodes_values:
if n["sessionsCount"] > 0:
n["avgTimeFromPrevious"] = n["avgTimeFromPrevious"] / n["sessionsCount"]
else:
n["avgTimeFromPrevious"] = None
n.pop("sessionsCount")
if reverse_path:
for n in nodes_values:
n["depth"] = max_depth - n["depth"]
return {"nodes": nodes_values,
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
@ -433,9 +406,7 @@ WITH sub_sessions AS (SELECT session_id {sub_sessions_extra_projection}
{"UNION ALL".join(projection_query)};"""
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
"endTimestamp": data.endTimestamp, "density": data.density,
# This is ignored because UI will take care of it
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
"eventThresholdNumberInGroup": 8,
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
**extra_values}
query = cur.mogrify(pg_query, params)
_now = time()

View file

@ -1,7 +1,6 @@
import json
import logging
from collections import Counter
from typing import Optional, List
from collections import Counter
from fastapi import HTTPException, status
@ -10,8 +9,6 @@ from chalicelib.core import users
from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC
logger = logging.getLogger(__name__)
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
with pg_client.PostgresClient() as cur:
@ -413,6 +410,7 @@ def update_project_conditions(project_id, conditions):
create_project_conditions(project_id, to_be_created)
if to_be_updated:
print(to_be_updated)
update_project_condition(project_id, to_be_updated)
return get_conditions(project_id)
@ -427,45 +425,3 @@ def get_projects_ids(tenant_id):
cur.execute(query=query)
rows = cur.fetchall()
return [r["project_id"] for r in rows]
def delete_metadata_condition(project_id, metadata_key):
sql = """\
UPDATE public.projects_conditions
SET filters=(SELECT COALESCE(jsonb_agg(elem), '[]'::jsonb)
FROM jsonb_array_elements(filters) AS elem
WHERE NOT (elem ->> 'type' = 'metadata'
AND elem ->> 'source' = %(metadata_key)s))
WHERE project_id = %(project_id)s
AND jsonb_typeof(filters) = 'array'
AND EXISTS (SELECT 1
FROM jsonb_array_elements(filters) AS elem
WHERE elem ->> 'type' = 'metadata'
AND elem ->> 'source' = %(metadata_key)s);"""
with pg_client.PostgresClient() as cur:
query = cur.mogrify(sql, {"project_id": project_id, "metadata_key": metadata_key})
cur.execute(query)
def rename_metadata_condition(project_id, old_metadata_key, new_metadata_key):
sql = """\
UPDATE public.projects_conditions
SET filters = (SELECT jsonb_agg(CASE
WHEN elem ->> 'type' = 'metadata' AND elem ->> 'source' = %(old_metadata_key)s
THEN elem || ('{"source": "'||%(new_metadata_key)s||'"}')::jsonb
ELSE elem END)
FROM jsonb_array_elements(filters) AS elem)
WHERE project_id = %(project_id)s
AND jsonb_typeof(filters) = 'array'
AND EXISTS (SELECT 1
FROM jsonb_array_elements(filters) AS elem
WHERE elem ->> 'type' = 'metadata'
AND elem ->> 'source' = %(old_metadata_key)s);"""
with pg_client.PostgresClient() as cur:
query = cur.mogrify(sql, {"project_id": project_id, "old_metadata_key": old_metadata_key,
"new_metadata_key": new_metadata_key})
cur.execute(query)
# TODO: make project conditions use metadata-column-name instead of metadata-key

Some files were not shown because too many files have changed in this diff Show more