Compare commits
121 commits
main
...
timepicker
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
ce53dd3a92 | ||
|
|
502e1135f1 | ||
|
|
fc542f4671 | ||
|
|
1ac90a6059 | ||
|
|
41b9e38085 | ||
|
|
6e7a3b22ab | ||
|
|
a319146519 | ||
|
|
075beaf5dc | ||
|
|
7385ed957f | ||
|
|
87085ee191 | ||
|
|
e20043a639 | ||
|
|
26630b7454 | ||
|
|
c499114c78 | ||
|
|
83897cb89c | ||
|
|
47411e7f2b | ||
|
|
6bf370b25f | ||
|
|
5ec3855491 | ||
|
|
d95cfd9ff6 | ||
|
|
f0d6d8ea12 | ||
|
|
9c57f3db32 | ||
|
|
fa59621482 | ||
|
|
e4ae3c8ba4 | ||
|
|
af8cfacd98 | ||
|
|
a2ad342a8f | ||
|
|
d108e1db82 | ||
|
|
51496ae5e2 | ||
|
|
6b26f85d6e | ||
|
|
0ca9327c7b | ||
|
|
8914399cad | ||
|
|
885c20827e | ||
|
|
8888399359 | ||
|
|
eca379feed | ||
|
|
f9ec7bce68 | ||
|
|
3975962f70 | ||
|
|
0780b17a1a | ||
|
|
4b34eceb15 | ||
|
|
917dad581d | ||
|
|
72e42dfd5f | ||
|
|
a9a5829000 | ||
|
|
1af02eb2c3 | ||
|
|
ac519a0a10 | ||
|
|
14f72dbbd7 | ||
|
|
8326b913f1 | ||
|
|
fdbed5139c | ||
|
|
76b0ae7b0d | ||
|
|
1a4893bde8 | ||
|
|
9e0f309453 | ||
|
|
262392f32a | ||
|
|
f8a18006d3 | ||
|
|
ddaaa1be71 | ||
|
|
0417f2c756 | ||
|
|
af6dc4b839 | ||
|
|
cceb4f0c25 | ||
|
|
1f16816252 | ||
|
|
77c084cac0 | ||
|
|
c8775f3c15 | ||
|
|
d52a47e2cc | ||
|
|
82854d014d | ||
|
|
65d83e91c5 | ||
|
|
df67acc78f | ||
|
|
e5997c662f | ||
|
|
9ae88e62fd | ||
|
|
e73bd5fef5 | ||
|
|
93dc49bf41 | ||
|
|
c2a3853bd9 | ||
|
|
5cca953a16 | ||
|
|
9367c977ca | ||
|
|
30dba23530 | ||
|
|
21c30af4f0 | ||
|
|
da617747dc | ||
|
|
ac2d12d95f | ||
|
|
aa7b3fd617 | ||
|
|
94b541c758 | ||
|
|
45ef98b163 | ||
|
|
35eb7d4152 | ||
|
|
7c23521cb8 | ||
|
|
8a8df0a8cb | ||
|
|
e54f62a0e6 | ||
|
|
0bdb416594 | ||
|
|
3484da2f60 | ||
|
|
2f164708e7 | ||
|
|
c66296a050 | ||
|
|
a1cf508cb3 | ||
|
|
38594319f0 | ||
|
|
c963ec5e91 | ||
|
|
c04090a778 | ||
|
|
fc48ba4149 | ||
|
|
04db322e54 | ||
|
|
c9ea3651db | ||
|
|
1dc63bf88b | ||
|
|
2fb7b3d542 | ||
|
|
57a21eb31d | ||
|
|
e9a1a8c4eb | ||
|
|
14191c1de4 | ||
|
|
7e52c97d62 | ||
|
|
1cdb9bd06d | ||
|
|
e7ad4c8bd0 | ||
|
|
29d69e5b24 | ||
|
|
2e5517509b | ||
|
|
c95a4f6770 | ||
|
|
8af7d1a263 | ||
|
|
332cbb3516 | ||
|
|
1b564f53d5 | ||
|
|
1aa3b4b4e5 | ||
|
|
d531b5da7e | ||
|
|
e173591d88 | ||
|
|
359ecc85af | ||
|
|
f0e8100283 | ||
|
|
251d727375 | ||
|
|
b00a90484e | ||
|
|
ce0686eec3 | ||
|
|
34232ed23c | ||
|
|
954bfbf8f7 | ||
|
|
c0197cdfeb | ||
|
|
12ab110e0e | ||
|
|
f48808f42e | ||
|
|
b080a98764 | ||
|
|
dd885c65ac | ||
|
|
0ad2836650 | ||
|
|
20b76a0ed9 | ||
|
|
884f3499ef |
2860 changed files with 72721 additions and 115740 deletions
5
.github/workflows/assist-stats.yaml
vendored
5
.github/workflows/assist-stats.yaml
vendored
|
|
@ -15,7 +15,7 @@ on:
|
||||||
- "!assist-stats/*-dev.sh"
|
- "!assist-stats/*-dev.sh"
|
||||||
- "!assist-stats/requirements-*.txt"
|
- "!assist-stats/requirements-*.txt"
|
||||||
|
|
||||||
name: Build and Deploy Assist Stats ee
|
name: Build and Deploy Assist Stats
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -123,9 +123,8 @@ jobs:
|
||||||
tag: ${IMAGE_TAG}
|
tag: ${IMAGE_TAG}
|
||||||
EOF
|
EOF
|
||||||
|
|
||||||
export IMAGE_TAG=${IMAGE_TAG}
|
|
||||||
# Update changed image tag
|
# Update changed image tag
|
||||||
yq '.utilities.apiCrons.assiststats.image.tag = strenv(IMAGE_TAG)' -i /tmp/image_override.yaml
|
sed -i "/assist-stats/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
|
|
|
||||||
41
.github/workflows/crons-ee.yaml
vendored
41
.github/workflows/crons-ee.yaml
vendored
|
|
@ -100,32 +100,33 @@ jobs:
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
- name: Creating old image input
|
- name: Creating old image input
|
||||||
env:
|
|
||||||
# We're not passing -ee flag, because helm will add that.
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
|
||||||
run: |
|
run: |
|
||||||
cd scripts/helmcharts/
|
#
|
||||||
cat <<EOF>/tmp/image_override.yaml
|
# Create yaml with existing image tags
|
||||||
image: &image
|
#
|
||||||
tag: "${IMAGE_TAG}"
|
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||||
utilities:
|
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||||
apiCrons:
|
|
||||||
assiststats:
|
echo > /tmp/image_override.yaml
|
||||||
image: *image
|
|
||||||
report:
|
for line in `cat /tmp/image_tag.txt`;
|
||||||
image: *image
|
do
|
||||||
sessionsCleaner:
|
image_array=($(echo "$line" | tr ':' '\n'))
|
||||||
image: *image
|
cat <<EOF >> /tmp/image_override.yaml
|
||||||
projectsStats:
|
${image_array[0]}:
|
||||||
image: *image
|
image:
|
||||||
fixProjectsStats:
|
# We've to strip off the -ee, as helm will append it.
|
||||||
image: *image
|
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||||
EOF
|
EOF
|
||||||
|
done
|
||||||
|
|
||||||
- name: Deploy to kubernetes
|
- name: Deploy to kubernetes
|
||||||
run: |
|
run: |
|
||||||
cd scripts/helmcharts/
|
cd scripts/helmcharts/
|
||||||
|
|
||||||
|
# Update changed image tag
|
||||||
|
sed -i "/crons/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
mkdir -p /tmp/charts
|
mkdir -p /tmp/charts
|
||||||
|
|
@ -135,6 +136,8 @@ jobs:
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
# We're not passing -ee flag, because helm will add that.
|
||||||
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
|
|
||||||
- name: Alert slack
|
- name: Alert slack
|
||||||
|
|
|
||||||
189
.github/workflows/patch-build-old.yaml
vendored
189
.github/workflows/patch-build-old.yaml
vendored
|
|
@ -1,189 +0,0 @@
|
||||||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
services:
|
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
|
||||||
required: true
|
|
||||||
default: 'chalice,frontend'
|
|
||||||
tag:
|
|
||||||
description: 'Tag to update.'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: Build Patch from old tag
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 4
|
|
||||||
ref: ${{ github.event.inputs.tag }}
|
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
|
||||||
run: |
|
|
||||||
git config --unset http.https://github.com/.extraheader
|
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
|
||||||
|
|
||||||
- name: Create backup tag with timestamp
|
|
||||||
run: |
|
|
||||||
set -e # Exit immediately if a command exits with a non-zero status
|
|
||||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
|
||||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
|
||||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
|
||||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
|
||||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
|
||||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
|
||||||
echo "Created backup tag: $BACKUP_TAG"
|
|
||||||
|
|
||||||
# Get the oldest commit date from the last 3 commits in raw format
|
|
||||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
|
||||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
|
||||||
# Add 1 second to the timestamp
|
|
||||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
|
||||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
|
|
||||||
- name: Setup yq
|
|
||||||
uses: mikefarah/yq@master
|
|
||||||
|
|
||||||
# Configure AWS credentials for the first registry
|
|
||||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1
|
|
||||||
with:
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
|
||||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
|
||||||
|
|
||||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
|
||||||
id: login-ecr-arm
|
|
||||||
run: |
|
|
||||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
|
||||||
- name: Get HEAD Commit ID
|
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
|
||||||
- name: Define Branch Name
|
|
||||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
id: build-image
|
|
||||||
env:
|
|
||||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
|
||||||
run: |
|
|
||||||
set -exo pipefail
|
|
||||||
git config --local user.email "action@github.com"
|
|
||||||
git config --local user.name "GitHub Action"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
working_dir=$(pwd)
|
|
||||||
function image_version(){
|
|
||||||
local service=$1
|
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
|
||||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
|
||||||
echo $new_version
|
|
||||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
|
||||||
}
|
|
||||||
function clone_msaas() {
|
|
||||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
|
||||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
|
||||||
cd $MSAAS_REPO_FOLDER
|
|
||||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
|
||||||
git log -1
|
|
||||||
cd $MSAAS_REPO_FOLDER
|
|
||||||
bash git-init.sh
|
|
||||||
git checkout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
function build_managed() {
|
|
||||||
local service=$1
|
|
||||||
local version=$2
|
|
||||||
echo building managed
|
|
||||||
clone_msaas
|
|
||||||
if [[ $service == 'chalice' ]]; then
|
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
|
||||||
else
|
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
|
||||||
fi
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
|
||||||
}
|
|
||||||
# Checking for backend images
|
|
||||||
ls backend/cmd >> /tmp/backend.txt
|
|
||||||
echo Services: "${{ github.event.inputs.services }}"
|
|
||||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
|
||||||
BUILD_SCRIPT_NAME="build.sh"
|
|
||||||
# Build FOSS
|
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
|
||||||
# Check if service is backend
|
|
||||||
if grep -q $SERVICE /tmp/backend.txt; then
|
|
||||||
cd backend
|
|
||||||
foss_build_args="nil $SERVICE"
|
|
||||||
ee_build_args="ee $SERVICE"
|
|
||||||
else
|
|
||||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
|
||||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
|
||||||
ee_build_args="ee"
|
|
||||||
fi
|
|
||||||
version=$(image_version $SERVICE)
|
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
else
|
|
||||||
build_managed $SERVICE $version
|
|
||||||
fi
|
|
||||||
cd $working_dir
|
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
|
||||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
|
||||||
git add $chart_path
|
|
||||||
git commit -m "Increment $SERVICE chart version"
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Change commit timestamp
|
|
||||||
run: |
|
|
||||||
# Convert the timestamp to a date format git can understand
|
|
||||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
|
||||||
echo "Setting commit date to: $NEW_DATE"
|
|
||||||
|
|
||||||
# Amend the commit with the new date
|
|
||||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
|
||||||
|
|
||||||
# Verify the change
|
|
||||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
|
||||||
|
|
||||||
# git tag and push
|
|
||||||
git tag $INPUT_TAG -f
|
|
||||||
git push origin $INPUT_TAG -f
|
|
||||||
|
|
||||||
|
|
||||||
# - name: Debug Job
|
|
||||||
# if: ${{ failure() }}
|
|
||||||
# uses: mxschmitt/action-tmate@v3
|
|
||||||
# env:
|
|
||||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
|
||||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
|
||||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
|
||||||
# with:
|
|
||||||
# limit-access-to-actor: true
|
|
||||||
247
.github/workflows/patch-build.yaml
vendored
247
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||||
inputs:
|
inputs:
|
||||||
services:
|
services:
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
description: 'Comma separated names of services to build(in small letters).'
|
||||||
|
|
@ -19,20 +20,12 @@ jobs:
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 1
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||||
if: github.ref != 'refs/heads/main'
|
|
||||||
run: |
|
run: |
|
||||||
git remote -v
|
git pull --rebase origin main
|
||||||
git config --global user.email "action@github.com"
|
|
||||||
git config --global user.name "GitHub Action"
|
|
||||||
git config --global rebase.autoStash true
|
|
||||||
git fetch origin main:main
|
|
||||||
git rebase main
|
|
||||||
git log -3
|
|
||||||
|
|
||||||
- name: Downloading yq
|
- name: Downloading yq
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -55,8 +48,6 @@ jobs:
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
- uses: depot/setup-action@v1
|
||||||
env:
|
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
- name: Get HEAD Commit ID
|
- name: Get HEAD Commit ID
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
- name: Define Branch Name
|
- name: Define Branch Name
|
||||||
|
|
@ -74,168 +65,79 @@ jobs:
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
|
||||||
run: |
|
run: |
|
||||||
#!/bin/bash
|
set -exo pipefail
|
||||||
set -euo pipefail
|
git config --local user.email "action@github.com"
|
||||||
|
git config --local user.name "GitHub Action"
|
||||||
# Configuration
|
git checkout -b $BRANCH_NAME
|
||||||
readonly WORKING_DIR=$(pwd)
|
working_dir=$(pwd)
|
||||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
function image_version(){
|
||||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
local service=$1
|
||||||
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
# Initialize git configuration
|
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||||
setup_git() {
|
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||||
git config --local user.email "action@github.com"
|
echo $new_version
|
||||||
git config --local user.name "GitHub Action"
|
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||||
git checkout -b "$BRANCH_NAME"
|
|
||||||
}
|
}
|
||||||
|
function clone_msaas() {
|
||||||
# Get and increment image version
|
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||||
image_version() {
|
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||||
local service=$1
|
cd $MSAAS_REPO_FOLDER
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
git log -1
|
||||||
local current_version new_version
|
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
||||||
|
git log -1
|
||||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
cd $MSAAS_REPO_FOLDER
|
||||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
bash git-init.sh
|
||||||
echo "$new_version"
|
git checkout
|
||||||
|
}
|
||||||
}
|
}
|
||||||
|
function build_managed() {
|
||||||
# Clone MSAAS repository if not exists
|
local service=$1
|
||||||
clone_msaas() {
|
local version=$2
|
||||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
echo building managed
|
||||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
clone_msaas
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
if [[ $service == 'chalice' ]]; then
|
||||||
cd openreplay && git fetch origin && git checkout main
|
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||||
git log -1
|
else
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||||
bash git-init.sh
|
fi
|
||||||
git checkout
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
# Checking for backend images
|
||||||
# Build managed services
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
build_managed() {
|
echo Services: "${{ github.event.inputs.services }}"
|
||||||
local service=$1
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
local version=$2
|
BUILD_SCRIPT_NAME="build.sh"
|
||||||
|
# Build FOSS
|
||||||
echo "Building managed service: $service"
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
clone_msaas
|
# Check if service is backend
|
||||||
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
if [[ $service == 'chalice' ]]; then
|
cd backend
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
foss_build_args="nil $SERVICE"
|
||||||
else
|
ee_build_args="ee $SERVICE"
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
else
|
||||||
fi
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
echo "Executing: $build_cmd"
|
version=$(image_version $SERVICE)
|
||||||
if ! eval "$build_cmd" 2>&1; then
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
echo "Build failed for $service"
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
exit 1
|
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
fi
|
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
}
|
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
# Build service with given arguments
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
build_service() {
|
else
|
||||||
local service=$1
|
build_managed $SERVICE $version
|
||||||
local version=$2
|
fi
|
||||||
local build_args=$3
|
cd $working_dir
|
||||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||||
|
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
git add $chart_path
|
||||||
echo "Executing: $command"
|
git commit -m "Increment $SERVICE chart version"
|
||||||
eval "$command"
|
git push --set-upstream origin $BRANCH_NAME
|
||||||
}
|
done
|
||||||
|
|
||||||
# Update chart version and commit changes
|
|
||||||
update_chart_version() {
|
|
||||||
local service=$1
|
|
||||||
local version=$2
|
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
|
|
||||||
# Ensure we're in the original working directory/repository
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
|
||||||
git add "$chart_path"
|
|
||||||
git commit -m "Increment $service chart version to $version"
|
|
||||||
git push --set-upstream origin "$BRANCH_NAME"
|
|
||||||
cd -
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
setup_git
|
|
||||||
|
|
||||||
# Get backend services list
|
|
||||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
|
||||||
|
|
||||||
# Parse services input (fix for GitHub Actions syntax)
|
|
||||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
|
||||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
|
||||||
|
|
||||||
# Process each service
|
|
||||||
for service in "${services[@]}"; do
|
|
||||||
echo "Processing service: $service"
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
|
||||||
|
|
||||||
# Determine build configuration based on service type
|
|
||||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
|
||||||
# Backend service
|
|
||||||
cd backend
|
|
||||||
foss_build_args="nil $service"
|
|
||||||
ee_build_args="ee $service"
|
|
||||||
else
|
|
||||||
# Non-backend service
|
|
||||||
case "$service" in
|
|
||||||
chalice | alerts | crons)
|
|
||||||
cd "$WORKING_DIR/api"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
cd "$service"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# Special build scripts for alerts/crons
|
|
||||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
|
||||||
build_script="build_${service}.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ee_build_args="ee"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get version and build
|
|
||||||
local version
|
|
||||||
version=$(image_version "$service")
|
|
||||||
|
|
||||||
# Build FOSS and EE versions
|
|
||||||
build_service "$service" "$version" "$foss_build_args"
|
|
||||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
|
||||||
|
|
||||||
# Build managed version for specific services
|
|
||||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
|
||||||
echo "Nothing to build in managed for service $service"
|
|
||||||
else
|
|
||||||
build_managed "$service" "$version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update chart and commit
|
|
||||||
update_chart_version "$service" "$version"
|
|
||||||
done
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
rm -f "$BACKEND_SERVICES_FILE"
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "Working directory: $WORKING_DIR"
|
|
||||||
# Run main function with all arguments
|
|
||||||
main "$SERVICES_INPUT"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: repo-sync/pull-request@v2
|
uses: repo-sync/pull-request@v2
|
||||||
|
|
@ -246,7 +148,8 @@ jobs:
|
||||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||||
pr_body: |
|
pr_body: |
|
||||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||||
Once this PR is merged, tag update job will run automatically.
|
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||||
|
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||||
|
|
||||||
# - name: Debug Job
|
# - name: Debug Job
|
||||||
# if: ${{ failure() }}
|
# if: ${{ failure() }}
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
# This action will push the assist changes to aws
|
# This action will push the peers changes to aws
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|
@ -10,9 +10,12 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
paths:
|
paths:
|
||||||
- "ee/assist-server/**"
|
- "ee/peers/**"
|
||||||
|
- "peers/**"
|
||||||
|
- "!peers/.gitignore"
|
||||||
|
- "!peers/*-dev.sh"
|
||||||
|
|
||||||
name: Build and Deploy Assist-Server EE
|
name: Build and Deploy Peers EE
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -53,7 +56,12 @@ jobs:
|
||||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||||
id: setcontext
|
id: setcontext
|
||||||
|
|
||||||
- name: Building and Pushing Assist-Server image
|
# Caching docker images
|
||||||
|
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||||
|
# Ignore the failure of a step and avoid terminating the job.
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Building and Pushing peers image
|
||||||
id: build-image
|
id: build-image
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
|
@ -61,11 +69,11 @@ jobs:
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
run: |
|
run: |
|
||||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||||
cd assist-server
|
cd peers
|
||||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||||
images=("assist-server")
|
images=("peers")
|
||||||
for image in ${images[*]};do
|
for image in ${images[*]};do
|
||||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
|
|
@ -76,7 +84,7 @@ jobs:
|
||||||
} && {
|
} && {
|
||||||
echo "Skipping Security Checks"
|
echo "Skipping Security Checks"
|
||||||
}
|
}
|
||||||
images=("assist-server")
|
images=("peers")
|
||||||
for image in ${images[*]};do
|
for image in ${images[*]};do
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
done
|
done
|
||||||
|
|
@ -100,23 +108,43 @@ jobs:
|
||||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||||
EOF
|
EOF
|
||||||
done
|
done
|
||||||
|
|
||||||
- name: Deploy to kubernetes
|
- name: Deploy to kubernetes
|
||||||
run: |
|
run: |
|
||||||
pwd
|
|
||||||
cd scripts/helmcharts/
|
cd scripts/helmcharts/
|
||||||
|
|
||||||
# Update changed image tag
|
# Update changed image tag
|
||||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
cat /tmp/image_override.yaml
|
||||||
# Deploy command
|
# Deploy command
|
||||||
mkdir -p /tmp/charts
|
mkdir -p /tmp/charts
|
||||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||||
rm -rf openreplay/charts/*
|
rm -rf openreplay/charts/*
|
||||||
mv /tmp/charts/* openreplay/charts/
|
mv /tmp/charts/* openreplay/charts/
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
# We're not passing -ee flag, because helm will add that.
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
|
|
||||||
|
- name: Alert slack
|
||||||
|
if: ${{ failure() }}
|
||||||
|
uses: rtCamp/action-slack-notify@v2
|
||||||
|
env:
|
||||||
|
SLACK_CHANNEL: ee
|
||||||
|
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||||
|
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||||
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||||
|
SLACK_USERNAME: "OR Bot"
|
||||||
|
SLACK_MESSAGE: "Build failed :bomb:"
|
||||||
|
|
||||||
|
# - name: Debug Job
|
||||||
|
# # if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# env:
|
||||||
|
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||||
|
# ENVIRONMENT: staging
|
||||||
|
# with:
|
||||||
|
# iimit-access-to-actor: true
|
||||||
148
.github/workflows/peers.yaml
vendored
Normal file
148
.github/workflows/peers.yaml
vendored
Normal file
|
|
@ -0,0 +1,148 @@
|
||||||
|
# This action will push the peers changes to aws
|
||||||
|
on:
|
||||||
|
workflow_dispatch:
|
||||||
|
inputs:
|
||||||
|
skip_security_checks:
|
||||||
|
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||||
|
required: false
|
||||||
|
default: "false"
|
||||||
|
push:
|
||||||
|
branches:
|
||||||
|
- dev
|
||||||
|
paths:
|
||||||
|
- "peers/**"
|
||||||
|
- "!peers/.gitignore"
|
||||||
|
- "!peers/*-dev.sh"
|
||||||
|
|
||||||
|
name: Build and Deploy Peers
|
||||||
|
|
||||||
|
jobs:
|
||||||
|
deploy:
|
||||||
|
name: Deploy
|
||||||
|
runs-on: ubuntu-latest
|
||||||
|
|
||||||
|
steps:
|
||||||
|
- name: Checkout
|
||||||
|
uses: actions/checkout@v2
|
||||||
|
with:
|
||||||
|
# We need to diff with old commit
|
||||||
|
# to see which workers got changed.
|
||||||
|
fetch-depth: 2
|
||||||
|
|
||||||
|
- uses: ./.github/composite-actions/update-keys
|
||||||
|
with:
|
||||||
|
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||||
|
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||||
|
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||||
|
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||||
|
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||||
|
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||||
|
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||||
|
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||||
|
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||||
|
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||||
|
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||||
|
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
|
name: Update Keys
|
||||||
|
|
||||||
|
- name: Docker login
|
||||||
|
run: |
|
||||||
|
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||||
|
|
||||||
|
- uses: azure/k8s-set-context@v1
|
||||||
|
with:
|
||||||
|
method: kubeconfig
|
||||||
|
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||||
|
id: setcontext
|
||||||
|
|
||||||
|
# Caching docker images
|
||||||
|
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||||
|
# Ignore the failure of a step and avoid terminating the job.
|
||||||
|
continue-on-error: true
|
||||||
|
|
||||||
|
- name: Building and Pushing peers image
|
||||||
|
id: build-image
|
||||||
|
env:
|
||||||
|
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
|
ENVIRONMENT: staging
|
||||||
|
run: |
|
||||||
|
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||||
|
cd peers
|
||||||
|
PUSH_IMAGE=0 bash -x ./build.sh
|
||||||
|
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||||
|
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||||
|
images=("peers")
|
||||||
|
for image in ${images[*]};do
|
||||||
|
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
|
done
|
||||||
|
err_code=$?
|
||||||
|
[[ $err_code -ne 0 ]] && {
|
||||||
|
exit $err_code
|
||||||
|
}
|
||||||
|
} && {
|
||||||
|
echo "Skipping Security Checks"
|
||||||
|
}
|
||||||
|
images=("peers")
|
||||||
|
for image in ${images[*]};do
|
||||||
|
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||||
|
done
|
||||||
|
- name: Creating old image input
|
||||||
|
run: |
|
||||||
|
#
|
||||||
|
# Create yaml with existing image tags
|
||||||
|
#
|
||||||
|
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||||
|
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||||
|
|
||||||
|
echo > /tmp/image_override.yaml
|
||||||
|
|
||||||
|
for line in `cat /tmp/image_tag.txt`;
|
||||||
|
do
|
||||||
|
image_array=($(echo "$line" | tr ':' '\n'))
|
||||||
|
cat <<EOF >> /tmp/image_override.yaml
|
||||||
|
${image_array[0]}:
|
||||||
|
image:
|
||||||
|
tag: ${image_array[1]}
|
||||||
|
EOF
|
||||||
|
done
|
||||||
|
|
||||||
|
- name: Deploy to kubernetes
|
||||||
|
run: |
|
||||||
|
cd scripts/helmcharts/
|
||||||
|
|
||||||
|
# Update changed image tag
|
||||||
|
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||||
|
|
||||||
|
cat /tmp/image_override.yaml
|
||||||
|
# Deploy command
|
||||||
|
mkdir -p /tmp/charts
|
||||||
|
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||||
|
rm -rf openreplay/charts/*
|
||||||
|
mv /tmp/charts/* openreplay/charts/
|
||||||
|
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||||
|
env:
|
||||||
|
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||||
|
ENVIRONMENT: staging
|
||||||
|
|
||||||
|
- name: Alert slack
|
||||||
|
if: ${{ failure() }}
|
||||||
|
uses: rtCamp/action-slack-notify@v2
|
||||||
|
env:
|
||||||
|
SLACK_CHANNEL: foss
|
||||||
|
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||||
|
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||||
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||||
|
SLACK_USERNAME: "OR Bot"
|
||||||
|
SLACK_MESSAGE: "Build failed :bomb:"
|
||||||
|
|
||||||
|
# - name: Debug Job
|
||||||
|
# # if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# env:
|
||||||
|
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||||
|
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||||
|
# ENVIRONMENT: staging
|
||||||
|
# with:
|
||||||
|
# limit-access-to-actor: true
|
||||||
31
.github/workflows/release-deployment.yaml
vendored
31
.github/workflows/release-deployment.yaml
vendored
|
|
@ -4,7 +4,7 @@ on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
services:
|
services:
|
||||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
description: 'Comma-separated list of services to deploy'
|
||||||
required: true
|
required: true
|
||||||
branch:
|
branch:
|
||||||
description: 'Branch to deploy (defaults to dev)'
|
description: 'Branch to deploy (defaults to dev)'
|
||||||
|
|
@ -14,7 +14,7 @@ on:
|
||||||
env:
|
env:
|
||||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
DOCKER_REPO_OSS: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -26,13 +26,13 @@ jobs:
|
||||||
ref: ${{ github.event.inputs.branch }}
|
ref: ${{ github.event.inputs.branch }}
|
||||||
- name: Docker login
|
- name: Docker login
|
||||||
run: |
|
run: |
|
||||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||||
|
|
||||||
- name: Set image tag with branch info
|
- name: Set image tag with branch info
|
||||||
run: |
|
run: |
|
||||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
echo "IMAGE_TAG=${IMAGE_TAG}-${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||||
echo "Using image tag: $IMAGE_TAG"
|
echo "Using image tag: ${IMAGE_TAG}-${{ github.event.inputs.branch }}-${SHORT_SHA}"
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
- uses: depot/setup-action@v1
|
||||||
|
|
||||||
|
|
@ -40,34 +40,24 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
# Parse the comma-separated services list into an array
|
# Parse the comma-separated services list into an array
|
||||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
working_dir=$(pwd)
|
|
||||||
|
|
||||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||||
ls backend/cmd >> /tmp/backend.txt
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
BUILD_SCRIPT_NAME="build.sh"
|
|
||||||
|
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
# Check if service is backend
|
# Check if service is backend
|
||||||
if grep -q $SERVICE /tmp/backend.txt; then
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
cd $working_dir/backend
|
cd backend
|
||||||
foss_build_args="nil $SERVICE"
|
foss_build_args="nil $SERVICE"
|
||||||
ee_build_args="ee $SERVICE"
|
ee_build_args="ee $SERVICE"
|
||||||
else
|
else
|
||||||
cd $working_dir
|
|
||||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
ee_build_args="ee"
|
ee_build_args="ee"
|
||||||
fi
|
fi
|
||||||
{
|
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
}&
|
|
||||||
{
|
|
||||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
}&
|
|
||||||
done
|
done
|
||||||
wait
|
|
||||||
|
|
||||||
- uses: azure/k8s-set-context@v1
|
- uses: azure/k8s-set-context@v1
|
||||||
name: Using ee release cluster
|
name: Using ee release cluster
|
||||||
|
|
@ -82,7 +72,7 @@ jobs:
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${{ secrets.RELEASE_OSS_REGISTRY }}/$SERVICE:${IMAGE_TAG}
|
||||||
done
|
done
|
||||||
|
|
||||||
- uses: azure/k8s-set-context@v1
|
- uses: azure/k8s-set-context@v1
|
||||||
|
|
@ -98,6 +88,5 @@ jobs:
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${{ secrets.RELEASE_OSS_REGISTRY }}/$SERVICE:${IMAGE_TAG}
|
||||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
|
||||||
done
|
done
|
||||||
|
|
|
||||||
9
.github/workflows/sourcemaps-reader-ee.yaml
vendored
9
.github/workflows/sourcemaps-reader-ee.yaml
vendored
|
|
@ -1,4 +1,4 @@
|
||||||
# This action will push the sourcemapreader changes to ee
|
# This action will push the sourcemapreader changes to aws
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
inputs:
|
inputs:
|
||||||
|
|
@ -10,12 +10,11 @@ on:
|
||||||
branches:
|
branches:
|
||||||
- dev
|
- dev
|
||||||
paths:
|
paths:
|
||||||
- "ee/sourcemap-reader/**"
|
|
||||||
- "sourcemap-reader/**"
|
- "sourcemap-reader/**"
|
||||||
- "!sourcemap-reader/.gitignore"
|
- "!sourcemap-reader/.gitignore"
|
||||||
- "!sourcemap-reader/*-dev.sh"
|
- "!sourcemap-reader/*-dev.sh"
|
||||||
|
|
||||||
name: Build and Deploy sourcemap-reader EE
|
name: Build and Deploy sourcemap-reader
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
|
|
@ -64,7 +63,7 @@ jobs:
|
||||||
- name: Building and Pushing sourcemaps-reader image
|
- name: Building and Pushing sourcemaps-reader image
|
||||||
id: build-image
|
id: build-image
|
||||||
env:
|
env:
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||||
ENVIRONMENT: staging
|
ENVIRONMENT: staging
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -132,7 +131,7 @@ jobs:
|
||||||
if: ${{ failure() }}
|
if: ${{ failure() }}
|
||||||
uses: rtCamp/action-slack-notify@v2
|
uses: rtCamp/action-slack-notify@v2
|
||||||
env:
|
env:
|
||||||
SLACK_CHANNEL: ee
|
SLACK_CHANNEL: foss
|
||||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||||
|
|
|
||||||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,42 +1,35 @@
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_dispatch:
|
||||||
types: [closed]
|
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||||
branches:
|
inputs:
|
||||||
- main
|
services:
|
||||||
name: Release tag update --force
|
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||||
|
required: true
|
||||||
|
default: "false"
|
||||||
|
|
||||||
|
name: Force Push tag with main branch HEAD
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
name: Build Patch from main
|
name: Build Patch from main
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
env:
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Get latest release tag using GitHub API
|
|
||||||
id: get-latest-tag
|
|
||||||
run: |
|
|
||||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
|
||||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
|
||||||
| jq -r .tag_name)
|
|
||||||
|
|
||||||
# Fallback to git command if API doesn't return a tag
|
|
||||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
|
||||||
echo "Not found latest tag"
|
|
||||||
exit 100
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
|
||||||
echo "Latest tag: $LATEST_TAG"
|
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
- name: Set Remote with GITHUB_TOKEN
|
||||||
run: |
|
run: |
|
||||||
git config --unset http.https://github.com/.extraheader
|
git config --unset http.https://github.com/.extraheader
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||||
|
|
||||||
- name: Push main branch to tag
|
- name: Push main branch to tag
|
||||||
run: |
|
run: |
|
||||||
|
git fetch --tags
|
||||||
git checkout main
|
git checkout main
|
||||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
# - name: Debug Job
|
||||||
|
# if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# with:
|
||||||
|
# limit-access-to-actor: true
|
||||||
|
|
|
||||||
2
LICENSE
2
LICENSE
|
|
@ -1,4 +1,4 @@
|
||||||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
Copyright (c) 2021-2024 Asayer, Inc dba OpenReplay
|
||||||
|
|
||||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,10 @@
|
||||||
FROM python:3.12-alpine AS builder
|
FROM python:3.11-alpine
|
||||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
|
|
||||||
RUN apk add --no-cache build-base
|
|
||||||
WORKDIR /work
|
|
||||||
COPY requirements.txt ./requirements.txt
|
|
||||||
RUN pip install --no-cache-dir --upgrade uv && \
|
|
||||||
export UV_SYSTEM_PYTHON=true && \
|
|
||||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
|
||||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
|
||||||
|
|
||||||
FROM python:3.12-alpine
|
|
||||||
ARG GIT_SHA
|
ARG GIT_SHA
|
||||||
|
LABEL GIT_SHA=$GIT_SHA
|
||||||
|
|
||||||
|
RUN apk add --no-cache build-base tini
|
||||||
ARG envarg
|
ARG envarg
|
||||||
# Add Tini
|
# Add Tini
|
||||||
# Startup daemon
|
# Startup daemon
|
||||||
|
|
@ -21,11 +14,19 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||||
PRIVATE_ENDPOINTS=false \
|
PRIVATE_ENDPOINTS=false \
|
||||||
ENTERPRISE_BUILD=${envarg} \
|
ENTERPRISE_BUILD=${envarg} \
|
||||||
GIT_SHA=$GIT_SHA
|
GIT_SHA=$GIT_SHA
|
||||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
|
||||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
|
COPY requirements.txt ./requirements.txt
|
||||||
|
RUN pip install --no-cache-dir --upgrade uv
|
||||||
|
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||||
|
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN apk add --no-cache tini && mv env.default .env
|
RUN mv env.default .env
|
||||||
|
|
||||||
|
RUN adduser -u 1001 openreplay -D
|
||||||
|
USER 1001
|
||||||
|
|
||||||
ENTRYPOINT ["/sbin/tini", "--"]
|
ENTRYPOINT ["/sbin/tini", "--"]
|
||||||
CMD ["./entrypoint.sh"]
|
CMD ./entrypoint.sh
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
FROM python:3.12-alpine
|
FROM python:3.11-alpine
|
||||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
ARG GIT_SHA
|
ARG GIT_SHA
|
||||||
|
|
|
||||||
25
api/Pipfile
25
api/Pipfile
|
|
@ -4,26 +4,23 @@ verify_ssl = true
|
||||||
name = "pypi"
|
name = "pypi"
|
||||||
|
|
||||||
[packages]
|
[packages]
|
||||||
urllib3 = "==2.3.0"
|
urllib3 = "==1.26.16"
|
||||||
requests = "==2.32.3"
|
requests = "==2.32.3"
|
||||||
boto3 = "==1.36.12"
|
boto3 = "==1.35.60"
|
||||||
pyjwt = "==2.10.1"
|
pyjwt = "==2.9.0"
|
||||||
psycopg2-binary = "==2.9.10"
|
psycopg2-binary = "==2.9.10"
|
||||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
psycopg = {extras = ["pool", "binary"], version = "==3.2.3"}
|
||||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
elasticsearch = "==8.16.0"
|
||||||
clickhouse-connect = "==0.8.15"
|
|
||||||
elasticsearch = "==8.17.1"
|
|
||||||
jira = "==3.8.0"
|
jira = "==3.8.0"
|
||||||
cachetools = "==5.5.1"
|
cachetools = "==5.5.0"
|
||||||
fastapi = "==0.115.8"
|
fastapi = "==0.115.5"
|
||||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
uvicorn = {extras = ["standard"], version = "==0.32.0"}
|
||||||
python-decouple = "==3.8"
|
python-decouple = "==3.8"
|
||||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
pydantic = {extras = ["email"], version = "==2.9.2"}
|
||||||
apscheduler = "==3.11.0"
|
apscheduler = "==3.10.4"
|
||||||
redis = "==5.2.1"
|
redis = "==5.2.0"
|
||||||
|
|
||||||
[dev-packages]
|
[dev-packages]
|
||||||
|
|
||||||
[requires]
|
[requires]
|
||||||
python_version = "3.12"
|
python_version = "3.12"
|
||||||
python_full_version = "3.12.8"
|
|
||||||
|
|
|
||||||
10
api/app.py
10
api/app.py
|
|
@ -13,16 +13,17 @@ from psycopg.rows import dict_row
|
||||||
from starlette.responses import StreamingResponse
|
from starlette.responses import StreamingResponse
|
||||||
|
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client, ch_client
|
from chalicelib.utils import pg_client
|
||||||
from crons import core_crons, core_dynamic_crons
|
from crons import core_crons, core_dynamic_crons
|
||||||
from routers import core, core_dynamic
|
from routers import core, core_dynamic
|
||||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot
|
||||||
|
|
||||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||||
print(f">Loglevel set to: {loglevel}")
|
print(f">Loglevel set to: {loglevel}")
|
||||||
logging.basicConfig(level=loglevel)
|
logging.basicConfig(level=loglevel)
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
class ORPYAsyncConnection(AsyncConnection):
|
class ORPYAsyncConnection(AsyncConnection):
|
||||||
|
|
||||||
def __init__(self, *args, **kwargs):
|
def __init__(self, *args, **kwargs):
|
||||||
|
|
@ -38,7 +39,6 @@ async def lifespan(app: FastAPI):
|
||||||
|
|
||||||
app.schedule = AsyncIOScheduler()
|
app.schedule = AsyncIOScheduler()
|
||||||
await pg_client.init()
|
await pg_client.init()
|
||||||
await ch_client.init()
|
|
||||||
app.schedule.start()
|
app.schedule.start()
|
||||||
|
|
||||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||||
|
|
@ -128,7 +128,3 @@ app.include_router(usability_tests.app_apikey)
|
||||||
app.include_router(spot.public_app)
|
app.include_router(spot.public_app)
|
||||||
app.include_router(spot.app)
|
app.include_router(spot.app)
|
||||||
app.include_router(spot.app_apikey)
|
app.include_router(spot.app_apikey)
|
||||||
|
|
||||||
app.include_router(product_anaytics.public_app)
|
|
||||||
app.include_router(product_anaytics.app)
|
|
||||||
app.include_router(product_anaytics.app_apikey)
|
|
||||||
|
|
|
||||||
|
|
@ -5,14 +5,14 @@ from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||||
from decouple import config
|
from decouple import config
|
||||||
from fastapi import FastAPI
|
from fastapi import FastAPI
|
||||||
|
|
||||||
from chalicelib.core.alerts import alerts_processor
|
from chalicelib.core import alerts_processor
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
@asynccontextmanager
|
@asynccontextmanager
|
||||||
async def lifespan(app: FastAPI):
|
async def lifespan(app: FastAPI):
|
||||||
# Startup
|
# Startup
|
||||||
ap_logger.info(">>>>> starting up <<<<<")
|
logging.info(">>>>> starting up <<<<<")
|
||||||
await pg_client.init()
|
await pg_client.init()
|
||||||
app.schedule.start()
|
app.schedule.start()
|
||||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||||
|
|
@ -27,22 +27,14 @@ async def lifespan(app: FastAPI):
|
||||||
yield
|
yield
|
||||||
|
|
||||||
# Shutdown
|
# Shutdown
|
||||||
ap_logger.info(">>>>> shutting down <<<<<")
|
logging.info(">>>>> shutting down <<<<<")
|
||||||
app.schedule.shutdown(wait=False)
|
app.schedule.shutdown(wait=False)
|
||||||
await pg_client.terminate()
|
await pg_client.terminate()
|
||||||
|
|
||||||
|
|
||||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
|
||||||
print(f">Loglevel set to: {loglevel}")
|
|
||||||
logging.basicConfig(level=loglevel)
|
|
||||||
ap_logger = logging.getLogger('apscheduler')
|
|
||||||
ap_logger.setLevel(loglevel)
|
|
||||||
|
|
||||||
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
app = FastAPI(root_path=config("root_path", default="/alerts"), docs_url=config("docs_url", default=""),
|
||||||
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
redoc_url=config("redoc_url", default=""), lifespan=lifespan)
|
||||||
|
logging.info("============= ALERTS =============")
|
||||||
app.schedule = AsyncIOScheduler()
|
|
||||||
ap_logger.info("============= ALERTS =============")
|
|
||||||
|
|
||||||
|
|
||||||
@app.get("/")
|
@app.get("/")
|
||||||
|
|
@ -58,8 +50,17 @@ async def get_health_status():
|
||||||
}}
|
}}
|
||||||
|
|
||||||
|
|
||||||
|
app.schedule = AsyncIOScheduler()
|
||||||
|
|
||||||
|
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||||
|
print(f">Loglevel set to: {loglevel}")
|
||||||
|
logging.basicConfig(level=loglevel)
|
||||||
|
ap_logger = logging.getLogger('apscheduler')
|
||||||
|
ap_logger.setLevel(loglevel)
|
||||||
|
app.schedule = AsyncIOScheduler()
|
||||||
|
|
||||||
if config("LOCAL_DEV", default=False, cast=bool):
|
if config("LOCAL_DEV", default=False, cast=bool):
|
||||||
@app.get('/trigger', tags=["private"])
|
@app.get('/trigger', tags=["private"])
|
||||||
async def trigger_main_cron():
|
async def trigger_main_cron():
|
||||||
ap_logger.info("Triggering main cron")
|
logging.info("Triggering main cron")
|
||||||
alerts_processor.process()
|
alerts_processor.process()
|
||||||
|
|
|
||||||
|
|
@ -45,6 +45,8 @@ class JWTAuth(HTTPBearer):
|
||||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST,
|
||||||
detail="Invalid authentication scheme.")
|
detail="Invalid authentication scheme.")
|
||||||
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
jwt_payload = authorizers.jwt_authorizer(scheme=credentials.scheme, token=credentials.credentials)
|
||||||
|
logger.info("------ jwt_payload ------")
|
||||||
|
logger.info(jwt_payload)
|
||||||
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
auth_exists = jwt_payload is not None and users.auth_exists(user_id=jwt_payload.get("userId", -1),
|
||||||
jwt_iat=jwt_payload.get("iat", 100))
|
jwt_iat=jwt_payload.get("iat", 100))
|
||||||
if jwt_payload is None \
|
if jwt_payload is None \
|
||||||
|
|
@ -118,7 +120,8 @@ class JWTAuth(HTTPBearer):
|
||||||
jwt_payload = None
|
jwt_payload = None
|
||||||
else:
|
else:
|
||||||
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
jwt_payload = authorizers.jwt_refresh_authorizer(scheme="Bearer", token=request.cookies["spotRefreshToken"])
|
||||||
|
logger.info("__process_spot_refresh_call")
|
||||||
|
logger.info(jwt_payload)
|
||||||
if jwt_payload is None or jwt_payload.get("jti") is None:
|
if jwt_payload is None or jwt_payload.get("jti") is None:
|
||||||
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
logger.warning("Null spotRefreshToken's payload, or null JTI.")
|
||||||
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
raise HTTPException(status_code=status.HTTP_403_FORBIDDEN,
|
||||||
|
|
|
||||||
|
|
@ -7,8 +7,8 @@ from decouple import config
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import notifications, webhook
|
from chalicelib.core import notifications, webhook
|
||||||
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
from chalicelib.core.collaboration_msteams import MSTeams
|
||||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
from chalicelib.core.collaboration_slack import Slack
|
||||||
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
from chalicelib.utils import pg_client, helper, email_helper, smtp
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
if config("EXP_ALERTS", cast=bool, default=False):
|
|
||||||
logging.info(">>> Using experimental alerts")
|
|
||||||
from . import alerts_processor_ch as alerts_processor
|
|
||||||
else:
|
|
||||||
from . import alerts_processor as alerts_processor
|
|
||||||
|
|
@ -1,3 +0,0 @@
|
||||||
TENANT_ID = "-1"
|
|
||||||
|
|
||||||
from . import helpers as alert_helpers
|
|
||||||
|
|
@ -1,74 +0,0 @@
|
||||||
import decimal
|
|
||||||
import logging
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
# This is the frequency of execution for each threshold
|
|
||||||
TimeInterval = {
|
|
||||||
15: 3,
|
|
||||||
30: 5,
|
|
||||||
60: 10,
|
|
||||||
120: 20,
|
|
||||||
240: 30,
|
|
||||||
1440: 60,
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def __format_value(x):
|
|
||||||
if x % 1 == 0:
|
|
||||||
x = int(x)
|
|
||||||
else:
|
|
||||||
x = round(x, 2)
|
|
||||||
return f"{x:,}"
|
|
||||||
|
|
||||||
|
|
||||||
def can_check(a) -> bool:
|
|
||||||
now = TimeUTC.now()
|
|
||||||
|
|
||||||
repetitionBase = a["options"]["currentPeriod"] \
|
|
||||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
|
||||||
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
|
||||||
else a["options"]["previousPeriod"]
|
|
||||||
|
|
||||||
if TimeInterval.get(repetitionBase) is None:
|
|
||||||
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
|
||||||
return False
|
|
||||||
|
|
||||||
return (a["options"]["renotifyInterval"] <= 0 or
|
|
||||||
a["options"].get("lastNotification") is None or
|
|
||||||
a["options"]["lastNotification"] <= 0 or
|
|
||||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
|
||||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
|
||||||
|
|
||||||
|
|
||||||
def generate_notification(alert, result):
|
|
||||||
left = __format_value(result['value'])
|
|
||||||
right = __format_value(alert['query']['right'])
|
|
||||||
return {
|
|
||||||
"alertId": alert["alertId"],
|
|
||||||
"tenantId": alert["tenantId"],
|
|
||||||
"title": alert["name"],
|
|
||||||
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
|
||||||
"buttonText": "Check metrics for more details",
|
|
||||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
|
||||||
"imageUrl": None,
|
|
||||||
"projectId": alert["projectId"],
|
|
||||||
"projectName": alert["projectName"],
|
|
||||||
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
|
||||||
"sourceMeta": alert["detectionMethod"],
|
|
||||||
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
|
||||||
"data": {"title": alert["name"],
|
|
||||||
"limitValue": alert["query"]["right"],
|
|
||||||
"actualValue": float(result["value"]) \
|
|
||||||
if isinstance(result["value"], decimal.Decimal) \
|
|
||||||
else result["value"],
|
|
||||||
"operator": alert["query"]["operator"],
|
|
||||||
"trigger": alert["query"]["left"],
|
|
||||||
"alertId": alert["alertId"],
|
|
||||||
"detectionMethod": alert["detectionMethod"],
|
|
||||||
"currentPeriod": alert["options"]["currentPeriod"],
|
|
||||||
"previousPeriod": alert["options"]["previousPeriod"],
|
|
||||||
"createdAt": TimeUTC.now()}},
|
|
||||||
}
|
|
||||||
|
|
@ -1,10 +1,9 @@
|
||||||
from chalicelib.core.alerts.modules import TENANT_ID
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
|
|
||||||
def get_all_alerts():
|
def get_all_alerts():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = f"""SELECT {TENANT_ID} AS tenant_id,
|
query = """SELECT -1 AS tenant_id,
|
||||||
alert_id,
|
alert_id,
|
||||||
projects.project_id,
|
projects.project_id,
|
||||||
projects.name AS project_name,
|
projects.name AS project_name,
|
||||||
|
|
@ -1,16 +1,16 @@
|
||||||
|
import decimal
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from pydantic_core._pydantic_core import ValidationError
|
from pydantic_core._pydantic_core import ValidationError
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.alerts import alerts, alerts_listener
|
from chalicelib.core import alerts
|
||||||
from chalicelib.core.alerts.modules import alert_helpers
|
from chalicelib.core import alerts_listener
|
||||||
from chalicelib.core.sessions import sessions_pg as sessions
|
from chalicelib.core import sessions
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
LeftToDb = {
|
LeftToDb = {
|
||||||
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
schemas.AlertColumn.PERFORMANCE__DOM_CONTENT_LOADED__AVERAGE: {
|
||||||
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
"table": "events.pages INNER JOIN public.sessions USING(session_id)",
|
||||||
|
|
@ -46,6 +46,35 @@ LeftToDb = {
|
||||||
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
"formula": "COUNT(DISTINCT session_id)", "condition": "source!='js_exception'", "joinSessions": False},
|
||||||
}
|
}
|
||||||
|
|
||||||
|
# This is the frequency of execution for each threshold
|
||||||
|
TimeInterval = {
|
||||||
|
15: 3,
|
||||||
|
30: 5,
|
||||||
|
60: 10,
|
||||||
|
120: 20,
|
||||||
|
240: 30,
|
||||||
|
1440: 60,
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def can_check(a) -> bool:
|
||||||
|
now = TimeUTC.now()
|
||||||
|
|
||||||
|
repetitionBase = a["options"]["currentPeriod"] \
|
||||||
|
if a["detectionMethod"] == schemas.AlertDetectionMethod.CHANGE \
|
||||||
|
and a["options"]["currentPeriod"] > a["options"]["previousPeriod"] \
|
||||||
|
else a["options"]["previousPeriod"]
|
||||||
|
|
||||||
|
if TimeInterval.get(repetitionBase) is None:
|
||||||
|
logger.error(f"repetitionBase: {repetitionBase} NOT FOUND")
|
||||||
|
return False
|
||||||
|
|
||||||
|
return (a["options"]["renotifyInterval"] <= 0 or
|
||||||
|
a["options"].get("lastNotification") is None or
|
||||||
|
a["options"]["lastNotification"] <= 0 or
|
||||||
|
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||||
|
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||||
|
|
||||||
|
|
||||||
def Build(a):
|
def Build(a):
|
||||||
now = TimeUTC.now()
|
now = TimeUTC.now()
|
||||||
|
|
@ -132,12 +161,11 @@ def Build(a):
|
||||||
|
|
||||||
|
|
||||||
def process():
|
def process():
|
||||||
logger.info("> processing alerts on PG")
|
|
||||||
notifications = []
|
notifications = []
|
||||||
all_alerts = alerts_listener.get_all_alerts()
|
all_alerts = alerts_listener.get_all_alerts()
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
for alert in all_alerts:
|
for alert in all_alerts:
|
||||||
if alert_helpers.can_check(alert):
|
if can_check(alert):
|
||||||
query, params = Build(alert)
|
query, params = Build(alert)
|
||||||
try:
|
try:
|
||||||
query = cur.mogrify(query, params)
|
query = cur.mogrify(query, params)
|
||||||
|
|
@ -153,7 +181,7 @@ def process():
|
||||||
result = cur.fetchone()
|
result = cur.fetchone()
|
||||||
if result["valid"]:
|
if result["valid"]:
|
||||||
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
logger.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||||
notifications.append(alert_helpers.generate_notification(alert, result))
|
notifications.append(generate_notification(alert, result))
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error(
|
logger.error(
|
||||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||||
|
|
@ -167,3 +195,42 @@ def process():
|
||||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||||
if len(notifications) > 0:
|
if len(notifications) > 0:
|
||||||
alerts.process_notifications(notifications)
|
alerts.process_notifications(notifications)
|
||||||
|
|
||||||
|
|
||||||
|
def __format_value(x):
|
||||||
|
if x % 1 == 0:
|
||||||
|
x = int(x)
|
||||||
|
else:
|
||||||
|
x = round(x, 2)
|
||||||
|
return f"{x:,}"
|
||||||
|
|
||||||
|
|
||||||
|
def generate_notification(alert, result):
|
||||||
|
left = __format_value(result['value'])
|
||||||
|
right = __format_value(alert['query']['right'])
|
||||||
|
return {
|
||||||
|
"alertId": alert["alertId"],
|
||||||
|
"tenantId": alert["tenantId"],
|
||||||
|
"title": alert["name"],
|
||||||
|
"description": f"{alert['seriesName']} = {left} ({alert['query']['operator']} {right}).",
|
||||||
|
"buttonText": "Check metrics for more details",
|
||||||
|
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||||
|
"imageUrl": None,
|
||||||
|
"projectId": alert["projectId"],
|
||||||
|
"projectName": alert["projectName"],
|
||||||
|
"options": {"source": "ALERT", "sourceId": alert["alertId"],
|
||||||
|
"sourceMeta": alert["detectionMethod"],
|
||||||
|
"message": alert["options"]["message"], "projectId": alert["projectId"],
|
||||||
|
"data": {"title": alert["name"],
|
||||||
|
"limitValue": alert["query"]["right"],
|
||||||
|
"actualValue": float(result["value"]) \
|
||||||
|
if isinstance(result["value"], decimal.Decimal) \
|
||||||
|
else result["value"],
|
||||||
|
"operator": alert["query"]["operator"],
|
||||||
|
"trigger": alert["query"]["left"],
|
||||||
|
"alertId": alert["alertId"],
|
||||||
|
"detectionMethod": alert["detectionMethod"],
|
||||||
|
"currentPeriod": alert["options"]["currentPeriod"],
|
||||||
|
"previousPeriod": alert["options"]["previousPeriod"],
|
||||||
|
"createdAt": TimeUTC.now()}},
|
||||||
|
}
|
||||||
|
|
@ -1,4 +1,3 @@
|
||||||
import logging
|
|
||||||
from os import access, R_OK
|
from os import access, R_OK
|
||||||
from os.path import exists as path_exists, getsize
|
from os.path import exists as path_exists, getsize
|
||||||
|
|
||||||
|
|
@ -11,8 +10,6 @@ import schemas
|
||||||
from chalicelib.core import projects
|
from chalicelib.core import projects
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
ASSIST_KEY = config("ASSIST_KEY")
|
ASSIST_KEY = config("ASSIST_KEY")
|
||||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||||
|
|
||||||
|
|
@ -55,21 +52,21 @@ def __get_live_sessions_ws(project_id, data):
|
||||||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
print(f"!! issue with the peer-server code:{results.status_code} for __get_live_sessions_ws")
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
return {"total": 0, "sessions": []}
|
return {"total": 0, "sessions": []}
|
||||||
live_peers = results.json().get("data", [])
|
live_peers = results.json().get("data", [])
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logger.error("!! Timeout getting Assist response")
|
print("!! Timeout getting Assist response")
|
||||||
live_peers = {"total": 0, "sessions": []}
|
live_peers = {"total": 0, "sessions": []}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("!! Issue getting Live-Assist response")
|
print("!! Issue getting Live-Assist response")
|
||||||
logger.exception(e)
|
print(str(e))
|
||||||
logger.error("expected JSON, received:")
|
print("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
except:
|
except:
|
||||||
logger.error("couldn't get response")
|
print("couldn't get response")
|
||||||
live_peers = {"total": 0, "sessions": []}
|
live_peers = {"total": 0, "sessions": []}
|
||||||
_live_peers = live_peers
|
_live_peers = live_peers
|
||||||
if "sessions" in live_peers:
|
if "sessions" in live_peers:
|
||||||
|
|
@ -105,8 +102,8 @@ def get_live_session_by_id(project_id, session_id):
|
||||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
print(f"!! issue with the peer-server code:{results.status_code} for get_live_session_by_id")
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
return None
|
return None
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
if results is None:
|
if results is None:
|
||||||
|
|
@ -114,16 +111,16 @@ def get_live_session_by_id(project_id, session_id):
|
||||||
results["live"] = True
|
results["live"] = True
|
||||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logger.error("!! Timeout getting Assist response")
|
print("!! Timeout getting Assist response")
|
||||||
return None
|
return None
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("!! Issue getting Assist response")
|
print("!! Issue getting Assist response")
|
||||||
logger.exception(e)
|
print(str(e))
|
||||||
logger.error("expected JSON, received:")
|
print("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
except:
|
except:
|
||||||
logger.error("couldn't get response")
|
print("couldn't get response")
|
||||||
return None
|
return None
|
||||||
return results
|
return results
|
||||||
|
|
||||||
|
|
@ -135,21 +132,21 @@ def is_live(project_id, session_id, project_key=None):
|
||||||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
print(f"!! issue with the peer-server code:{results.status_code} for is_live")
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
return False
|
return False
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logger.error("!! Timeout getting Assist response")
|
print("!! Timeout getting Assist response")
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("!! Issue getting Assist response")
|
print("!! Issue getting Assist response")
|
||||||
logger.exception(e)
|
print(str(e))
|
||||||
logger.error("expected JSON, received:")
|
print("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
except:
|
except:
|
||||||
logger.error("couldn't get response")
|
print("couldn't get response")
|
||||||
return False
|
return False
|
||||||
return str(session_id) == results
|
return str(session_id) == results
|
||||||
|
|
||||||
|
|
@ -164,21 +161,21 @@ def autocomplete(project_id, q: str, key: str = None):
|
||||||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
print(f"!! issue with the peer-server code:{results.status_code} for autocomplete")
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||||
results = results.json().get("data", [])
|
results = results.json().get("data", [])
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logger.error("!! Timeout getting Assist response")
|
print("!! Timeout getting Assist response")
|
||||||
return {"errors": ["Assist request timeout"]}
|
return {"errors": ["Assist request timeout"]}
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("!! Issue getting Assist response")
|
print("!! Issue getting Assist response")
|
||||||
logger.exception(e)
|
print(str(e))
|
||||||
logger.error("expected JSON, received:")
|
print("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
except:
|
except:
|
||||||
logger.error("couldn't get response")
|
print("couldn't get response")
|
||||||
return {"errors": ["Something went wrong wile calling assist"]}
|
return {"errors": ["Something went wrong wile calling assist"]}
|
||||||
for r in results:
|
for r in results:
|
||||||
r["type"] = __change_keys(r["type"])
|
r["type"] = __change_keys(r["type"])
|
||||||
|
|
@ -242,24 +239,24 @@ def session_exists(project_id, session_id):
|
||||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||||
timeout=config("assistTimeout", cast=int, default=5))
|
timeout=config("assistTimeout", cast=int, default=5))
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
print(f"!! issue with the peer-server code:{results.status_code} for session_exists")
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
return None
|
return None
|
||||||
results = results.json().get("data")
|
results = results.json().get("data")
|
||||||
if results is None:
|
if results is None:
|
||||||
return False
|
return False
|
||||||
return True
|
return True
|
||||||
except requests.exceptions.Timeout:
|
except requests.exceptions.Timeout:
|
||||||
logger.error("!! Timeout getting Assist response")
|
print("!! Timeout getting Assist response")
|
||||||
return False
|
return False
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
logger.error("!! Issue getting Assist response")
|
print("!! Issue getting Assist response")
|
||||||
logger.exception(e)
|
print(str(e))
|
||||||
logger.error("expected JSON, received:")
|
print("expected JSON, received:")
|
||||||
try:
|
try:
|
||||||
logger.error(results.text)
|
print(results.text)
|
||||||
except:
|
except:
|
||||||
logger.error("couldn't get response")
|
print("couldn't get response")
|
||||||
return False
|
return False
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -37,7 +37,8 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
||||||
logger.debug("! JWT Expired signature")
|
logger.debug("! JWT Expired signature")
|
||||||
return None
|
return None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.warning("! JWT Base Exception", exc_info=e)
|
logger.warning("! JWT Base Exception")
|
||||||
|
logger.debug(e)
|
||||||
return None
|
return None
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
@ -55,7 +56,8 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
||||||
logger.debug("! JWT-refresh Expired signature")
|
logger.debug("! JWT-refresh Expired signature")
|
||||||
return None
|
return None
|
||||||
except BaseException as e:
|
except BaseException as e:
|
||||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
logger.warning("! JWT-refresh Base Exception")
|
||||||
|
logger.debug(e)
|
||||||
return None
|
return None
|
||||||
return payload
|
return payload
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,11 +61,11 @@ def __get_autocomplete_table(value, project_id):
|
||||||
try:
|
try:
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logger.exception("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
|
||||||
logger.exception(query.decode('UTF-8'))
|
print(query.decode('UTF-8'))
|
||||||
logger.exception("--------- VALUE -----------")
|
print("--------- VALUE -----------")
|
||||||
logger.exception(value)
|
print(value)
|
||||||
logger.exception("--------------------")
|
print("--------------------")
|
||||||
raise err
|
raise err
|
||||||
results = cur.fetchall()
|
results = cur.fetchall()
|
||||||
for r in results:
|
for r in results:
|
||||||
|
|
@ -85,8 +85,7 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
return f"""(SELECT DISTINCT value, type
|
||||||
((SELECT DISTINCT value, type
|
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -102,7 +101,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5)) AS raw;"""
|
LIMIT 5);"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -125,7 +124,7 @@ def __generic_autocomplete(event: Event):
|
||||||
return f
|
return f
|
||||||
|
|
||||||
|
|
||||||
def generic_autocomplete_metas(typename):
|
def __generic_autocomplete_metas(typename):
|
||||||
def f(project_id, text):
|
def f(project_id, text):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||||
|
|
@ -327,7 +326,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(cur.mogrify(f"""\
|
cur.execute(cur.mogrify(f"""\
|
||||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
SELECT key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
from chalicelib.core import projects
|
|
||||||
from chalicelib.core import users
|
|
||||||
from chalicelib.core.log_tools import datadog, stackdriver, sentry
|
|
||||||
from chalicelib.core.modules import TENANT_CONDITION
|
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
from chalicelib.core import projects, log_tool_datadog, log_tool_stackdriver, log_tool_sentry
|
||||||
|
|
||||||
|
from chalicelib.core import users
|
||||||
|
|
||||||
|
|
||||||
def get_state(tenant_id):
|
def get_state(tenant_id):
|
||||||
|
|
@ -13,61 +12,47 @@ def get_state(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||||
"""SELECT EXISTS(( SELECT 1
|
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)},
|
{"ids": tuple(pids)})
|
||||||
)
|
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
meta = False
|
meta = False
|
||||||
if recorded:
|
if recorded:
|
||||||
query = cur.mogrify(
|
cur.execute("""SELECT EXISTS((SELECT 1
|
||||||
f"""SELECT EXISTS((SELECT 1
|
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE sessions.project_id = p.project_id
|
WHERE sessions.project_id = p.project_id
|
||||||
AND sessions.user_id IS NOT NULL
|
AND sessions.user_id IS NOT NULL
|
||||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
WHERE p.deleted_at ISNULL
|
||||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""",
|
)) AS exists;""")
|
||||||
{"tenant_id": tenant_id},
|
|
||||||
)
|
|
||||||
cur.execute(query)
|
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return [
|
return [
|
||||||
{
|
{"task": "Install OpenReplay",
|
||||||
"task": "Install OpenReplay",
|
"done": recorded,
|
||||||
"done": recorded,
|
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
{"task": "Identify Users",
|
||||||
},
|
"done": meta,
|
||||||
{
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||||
"task": "Identify Users",
|
{"task": "Invite Team Members",
|
||||||
"done": meta,
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||||
},
|
{"task": "Integrations",
|
||||||
{
|
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"task": "Invite Team Members",
|
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
"URL": "https://app.openreplay.com/client/manage-users",
|
"URL": "https://docs.openreplay.com/integrations"}
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Integrations",
|
|
||||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
|
||||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
|
||||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
|
||||||
"URL": "https://docs.openreplay.com/integrations",
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -78,66 +63,52 @@ def get_state_installing(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||||
"""SELECT EXISTS(( SELECT 1
|
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)},
|
{"ids": tuple(pids)})
|
||||||
)
|
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {
|
return {"task": "Install OpenReplay",
|
||||||
"task": "Install OpenReplay",
|
"done": recorded,
|
||||||
"done": recorded,
|
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_identify_users(tenant_id):
|
def get_state_identify_users(tenant_id):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(
|
cur.execute("""SELECT EXISTS((SELECT 1
|
||||||
f"""SELECT EXISTS((SELECT 1
|
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE sessions.project_id = p.project_id
|
WHERE sessions.project_id = p.project_id
|
||||||
AND sessions.user_id IS NOT NULL
|
AND sessions.user_id IS NOT NULL
|
||||||
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
LIMIT 1) AS sessions(user_id) ON (TRUE)
|
||||||
WHERE {TENANT_CONDITION} AND p.deleted_at ISNULL
|
WHERE p.deleted_at ISNULL
|
||||||
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
AND ( sessions.user_id IS NOT NULL OR p.metadata_1 IS NOT NULL
|
||||||
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
OR p.metadata_2 IS NOT NULL OR p.metadata_3 IS NOT NULL
|
||||||
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
OR p.metadata_4 IS NOT NULL OR p.metadata_5 IS NOT NULL
|
||||||
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
OR p.metadata_6 IS NOT NULL OR p.metadata_7 IS NOT NULL
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""",
|
)) AS exists;""")
|
||||||
{"tenant_id": tenant_id},
|
|
||||||
)
|
|
||||||
cur.execute(query)
|
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {
|
return {"task": "Identify Users",
|
||||||
"task": "Identify Users",
|
"done": meta,
|
||||||
"done": meta,
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_manage_users(tenant_id):
|
def get_state_manage_users(tenant_id):
|
||||||
return {
|
return {"task": "Invite Team Members",
|
||||||
"task": "Invite Team Members",
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||||
"URL": "https://app.openreplay.com/client/manage-users",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_integrations(tenant_id):
|
def get_state_integrations(tenant_id):
|
||||||
return {
|
return {"task": "Integrations",
|
||||||
"task": "Integrations",
|
"done": len(log_tool_datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
or len(log_tool_sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
or len(log_tool_stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
"URL": "https://docs.openreplay.com/integrations"}
|
||||||
"URL": "https://docs.openreplay.com/integrations",
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import webhook
|
from chalicelib.core import webhook
|
||||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -6,7 +6,7 @@ from fastapi import HTTPException, status
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import webhook
|
from chalicelib.core import webhook
|
||||||
from chalicelib.core.collaborations.collaboration_base import BaseCollaboration
|
from chalicelib.core.collaboration_base import BaseCollaboration
|
||||||
|
|
||||||
|
|
||||||
class Slack(BaseCollaboration):
|
class Slack(BaseCollaboration):
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from . import collaboration_base as _
|
|
||||||
653
api/chalicelib/core/custom_metrics.py
Normal file
653
api/chalicelib/core/custom_metrics.py
Normal file
|
|
@ -0,0 +1,653 @@
|
||||||
|
import json
|
||||||
|
import logging
|
||||||
|
|
||||||
|
from fastapi import HTTPException, status
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import sessions, funnels, errors, issues, heatmaps, product_analytics, \
|
||||||
|
custom_metrics_predefined
|
||||||
|
from chalicelib.utils import helper, pg_client
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
# TODO: refactor this to split
|
||||||
|
# timeseries /
|
||||||
|
# table of errors / table of issues / table of browsers / table of devices / table of countries / table of URLs
|
||||||
|
# remove "table of" calls from this function
|
||||||
|
def __try_live(project_id, data: schemas.CardSchema):
|
||||||
|
results = []
|
||||||
|
for i, s in enumerate(data.series):
|
||||||
|
results.append(sessions.search2_series(data=s.filter, project_id=project_id, density=data.density,
|
||||||
|
view_type=data.view_type, metric_type=data.metric_type,
|
||||||
|
metric_of=data.metric_of, metric_value=data.metric_value))
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_series(project_id, data: schemas.CardSchema):
|
||||||
|
results = []
|
||||||
|
for i, s in enumerate(data.series):
|
||||||
|
results.append(sessions.search2_table(data=s.filter, project_id=project_id, density=data.density,
|
||||||
|
metric_of=data.metric_of, metric_value=data.metric_value,
|
||||||
|
metric_format=data.metric_format))
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def __get_funnel_chart(project: schemas.ProjectContext, data: schemas.CardFunnel, user_id: int = None):
|
||||||
|
if len(data.series) == 0:
|
||||||
|
return {
|
||||||
|
"stages": [],
|
||||||
|
"totalDropDueToIssues": 0
|
||||||
|
}
|
||||||
|
|
||||||
|
# return funnels.get_top_insights_on_the_fly_widget(project_id=project_id,
|
||||||
|
# data=data.series[0].filter,
|
||||||
|
# metric_format=data.metric_format)
|
||||||
|
return funnels.get_simple_funnel(project=project,
|
||||||
|
data=data.series[0].filter,
|
||||||
|
metric_format=data.metric_format)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_errors_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||||
|
if len(data.series) == 0:
|
||||||
|
return {
|
||||||
|
"total": 0,
|
||||||
|
"errors": []
|
||||||
|
}
|
||||||
|
return errors.search(data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_sessions_list(project: schemas.ProjectContext, user_id, data: schemas.CardSchema):
|
||||||
|
if len(data.series) == 0:
|
||||||
|
logger.debug("empty series")
|
||||||
|
return {
|
||||||
|
"total": 0,
|
||||||
|
"sessions": []
|
||||||
|
}
|
||||||
|
return sessions.search_sessions(data=data.series[0].filter, project_id=project.project_id, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_heat_map_chart(project: schemas.ProjectContext, user_id, data: schemas.CardHeatMap,
|
||||||
|
include_mobs: bool = True):
|
||||||
|
if len(data.series) == 0:
|
||||||
|
return None
|
||||||
|
data.series[0].filter.filters += data.series[0].filter.events
|
||||||
|
data.series[0].filter.events = []
|
||||||
|
return heatmaps.search_short_session(project_id=project.project_id, user_id=user_id,
|
||||||
|
data=schemas.HeatMapSessionsSearch(
|
||||||
|
**data.series[0].filter.model_dump()),
|
||||||
|
include_mobs=include_mobs)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_path_analysis_chart(project: schemas.ProjectContext, user_id: int, data: schemas.CardPathAnalysis):
|
||||||
|
if len(data.series) == 0:
|
||||||
|
data.series.append(
|
||||||
|
schemas.CardPathAnalysisSeriesSchema(startTimestamp=data.startTimestamp, endTimestamp=data.endTimestamp))
|
||||||
|
elif not isinstance(data.series[0].filter, schemas.PathAnalysisSchema):
|
||||||
|
data.series[0].filter = schemas.PathAnalysisSchema()
|
||||||
|
|
||||||
|
return product_analytics.path_analysis(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_timeseries_chart(project: schemas.ProjectContext, data: schemas.CardTimeSeries, user_id: int = None):
|
||||||
|
series_charts = __try_live(project_id=project.project_id, data=data)
|
||||||
|
results = [{}] * len(series_charts[0])
|
||||||
|
for i in range(len(results)):
|
||||||
|
for j, series_chart in enumerate(series_charts):
|
||||||
|
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
|
||||||
|
data.series[j].name if data.series[j].name else j + 1: series_chart[i]["count"]}
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def not_supported(**args):
|
||||||
|
raise Exception("not supported")
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_user_ids(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_sessions(project: schemas.ProjectContext, data: schemas.CardTable, user_id):
|
||||||
|
return __get_sessions_list(project=project, user_id=user_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_errors(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||||
|
return __get_errors_list(project=project, user_id=user_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_issues(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_browsers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_devises(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_countries(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_urls(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_referrers(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_of_requests(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int = None):
|
||||||
|
return __get_table_of_series(project_id=project.project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_table_chart(project: schemas.ProjectContext, data: schemas.CardTable, user_id: int):
|
||||||
|
supported = {
|
||||||
|
schemas.MetricOfTable.SESSIONS: __get_table_of_sessions,
|
||||||
|
schemas.MetricOfTable.ERRORS: __get_table_of_errors,
|
||||||
|
schemas.MetricOfTable.USER_ID: __get_table_of_user_ids,
|
||||||
|
schemas.MetricOfTable.ISSUES: __get_table_of_issues,
|
||||||
|
schemas.MetricOfTable.USER_BROWSER: __get_table_of_browsers,
|
||||||
|
schemas.MetricOfTable.USER_DEVICE: __get_table_of_devises,
|
||||||
|
schemas.MetricOfTable.USER_COUNTRY: __get_table_of_countries,
|
||||||
|
schemas.MetricOfTable.VISITED_URL: __get_table_of_urls,
|
||||||
|
schemas.MetricOfTable.REFERRER: __get_table_of_referrers,
|
||||||
|
schemas.MetricOfTable.FETCH: __get_table_of_requests
|
||||||
|
}
|
||||||
|
return supported.get(data.metric_of, not_supported)(project=project, data=data, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_chart(project: schemas.ProjectContext, data: schemas.CardSchema, user_id: int):
|
||||||
|
if data.is_predefined:
|
||||||
|
return custom_metrics_predefined.get_metric(key=data.metric_of,
|
||||||
|
project_id=project.project_id,
|
||||||
|
data=data.model_dump())
|
||||||
|
|
||||||
|
supported = {
|
||||||
|
schemas.MetricType.TIMESERIES: __get_timeseries_chart,
|
||||||
|
schemas.MetricType.TABLE: __get_table_chart,
|
||||||
|
schemas.MetricType.HEAT_MAP: __get_heat_map_chart,
|
||||||
|
schemas.MetricType.FUNNEL: __get_funnel_chart,
|
||||||
|
schemas.MetricType.INSIGHTS: not_supported,
|
||||||
|
schemas.MetricType.PATH_ANALYSIS: __get_path_analysis_chart
|
||||||
|
}
|
||||||
|
return supported.get(data.metric_type, not_supported)(project=project, data=data, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_sessions_by_card_id(project_id, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||||
|
# No need for this because UI is sending the full payload
|
||||||
|
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||||
|
# if card is None:
|
||||||
|
# return None
|
||||||
|
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||||
|
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||||
|
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||||
|
return None
|
||||||
|
results = []
|
||||||
|
for s in data.series:
|
||||||
|
results.append({"seriesId": s.series_id, "seriesName": s.name,
|
||||||
|
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_sessions(project_id, user_id, data: schemas.CardSessionsSchema):
|
||||||
|
results = []
|
||||||
|
if len(data.series) == 0:
|
||||||
|
return results
|
||||||
|
for s in data.series:
|
||||||
|
if len(data.filters) > 0:
|
||||||
|
s.filter.filters += data.filters
|
||||||
|
s.filter = schemas.SessionsSearchPayloadSchema(**s.filter.model_dump(by_alias=True))
|
||||||
|
|
||||||
|
results.append({"seriesId": None, "seriesName": s.name,
|
||||||
|
**sessions.search_sessions(data=s.filter, project_id=project_id, user_id=user_id)})
|
||||||
|
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_issues(project: schemas.ProjectContext, user_id: int, data: schemas.CardSchema):
|
||||||
|
if data.is_predefined:
|
||||||
|
return not_supported()
|
||||||
|
if data.metric_of == schemas.MetricOfTable.ISSUES:
|
||||||
|
return __get_table_of_issues(project=project, user_id=user_id, data=data)
|
||||||
|
supported = {
|
||||||
|
schemas.MetricType.TIMESERIES: not_supported,
|
||||||
|
schemas.MetricType.TABLE: not_supported,
|
||||||
|
schemas.MetricType.HEAT_MAP: not_supported,
|
||||||
|
schemas.MetricType.INSIGHTS: not_supported,
|
||||||
|
schemas.MetricType.PATH_ANALYSIS: not_supported,
|
||||||
|
}
|
||||||
|
return supported.get(data.metric_type, not_supported)()
|
||||||
|
|
||||||
|
|
||||||
|
def __get_path_analysis_card_info(data: schemas.CardPathAnalysis):
|
||||||
|
r = {"start_point": [s.model_dump() for s in data.start_point],
|
||||||
|
"start_type": data.start_type,
|
||||||
|
"excludes": [e.model_dump() for e in data.excludes],
|
||||||
|
"hideExcess": data.hide_excess}
|
||||||
|
return r
|
||||||
|
|
||||||
|
|
||||||
|
def create_card(project: schemas.ProjectContext, user_id, data: schemas.CardSchema, dashboard=False):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
session_data = None
|
||||||
|
if data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||||
|
if data.session_id is not None:
|
||||||
|
session_data = {"sessionId": data.session_id}
|
||||||
|
else:
|
||||||
|
session_data = __get_heat_map_chart(project=project, user_id=user_id,
|
||||||
|
data=data, include_mobs=False)
|
||||||
|
if session_data is not None:
|
||||||
|
session_data = {"sessionId": session_data["sessionId"]}
|
||||||
|
|
||||||
|
_data = {"session_data": json.dumps(session_data) if session_data is not None else None}
|
||||||
|
for i, s in enumerate(data.series):
|
||||||
|
for k in s.model_dump().keys():
|
||||||
|
_data[f"{k}_{i}"] = s.__getattribute__(k)
|
||||||
|
_data[f"index_{i}"] = i
|
||||||
|
_data[f"filter_{i}"] = s.filter.json()
|
||||||
|
series_len = len(data.series)
|
||||||
|
params = {"user_id": user_id, "project_id": project.project_id, **data.model_dump(), **_data,
|
||||||
|
"default_config": json.dumps(data.default_config.model_dump()), "card_info": None}
|
||||||
|
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||||
|
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||||
|
|
||||||
|
query = """INSERT INTO metrics (project_id, user_id, name, is_public,
|
||||||
|
view_type, metric_type, metric_of, metric_value,
|
||||||
|
metric_format, default_config, thumbnail, data,
|
||||||
|
card_info)
|
||||||
|
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
|
||||||
|
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
|
||||||
|
%(metric_format)s, %(default_config)s, %(thumbnail)s, %(session_data)s,
|
||||||
|
%(card_info)s)
|
||||||
|
RETURNING metric_id"""
|
||||||
|
if len(data.series) > 0:
|
||||||
|
query = f"""WITH m AS ({query})
|
||||||
|
INSERT INTO metric_series(metric_id, index, name, filter)
|
||||||
|
VALUES {",".join([f"((SELECT metric_id FROM m), %(index_{i})s, %(name_{i})s, %(filter_{i})s::jsonb)"
|
||||||
|
for i in range(series_len)])}
|
||||||
|
RETURNING metric_id;"""
|
||||||
|
|
||||||
|
query = cur.mogrify(query, params)
|
||||||
|
cur.execute(query)
|
||||||
|
r = cur.fetchone()
|
||||||
|
if dashboard:
|
||||||
|
return r["metric_id"]
|
||||||
|
return {"data": get_card(metric_id=r["metric_id"], project_id=project.project_id, user_id=user_id)}
|
||||||
|
|
||||||
|
|
||||||
|
def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
||||||
|
metric: dict = get_card(metric_id=metric_id, project_id=project_id,
|
||||||
|
user_id=user_id, flatten=False, include_data=True)
|
||||||
|
if metric is None:
|
||||||
|
return None
|
||||||
|
series_ids = [r["seriesId"] for r in metric["series"]]
|
||||||
|
n_series = []
|
||||||
|
d_series_ids = []
|
||||||
|
u_series = []
|
||||||
|
u_series_ids = []
|
||||||
|
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
||||||
|
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
||||||
|
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
||||||
|
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
||||||
|
"config": json.dumps(data.default_config.model_dump()), "thumbnail": data.thumbnail}
|
||||||
|
for i, s in enumerate(data.series):
|
||||||
|
prefix = "u_"
|
||||||
|
if s.index is None:
|
||||||
|
s.index = i
|
||||||
|
if s.series_id is None or s.series_id not in series_ids:
|
||||||
|
n_series.append({"i": i, "s": s})
|
||||||
|
prefix = "n_"
|
||||||
|
else:
|
||||||
|
u_series.append({"i": i, "s": s})
|
||||||
|
u_series_ids.append(s.series_id)
|
||||||
|
ns = s.model_dump()
|
||||||
|
for k in ns.keys():
|
||||||
|
if k == "filter":
|
||||||
|
ns[k] = json.dumps(ns[k])
|
||||||
|
params[f"{prefix}{k}_{i}"] = ns[k]
|
||||||
|
for i in series_ids:
|
||||||
|
if i not in u_series_ids:
|
||||||
|
d_series_ids.append(i)
|
||||||
|
params["d_series_ids"] = tuple(d_series_ids)
|
||||||
|
params["card_info"] = None
|
||||||
|
params["session_data"] = json.dumps(metric["data"])
|
||||||
|
if data.metric_type == schemas.MetricType.PATH_ANALYSIS:
|
||||||
|
params["card_info"] = json.dumps(__get_path_analysis_card_info(data=data))
|
||||||
|
elif data.metric_type == schemas.MetricType.HEAT_MAP:
|
||||||
|
if data.session_id is not None:
|
||||||
|
params["session_data"] = json.dumps({"sessionId": data.session_id})
|
||||||
|
elif metric.get("data") and metric["data"].get("sessionId"):
|
||||||
|
params["session_data"] = json.dumps({"sessionId": metric["data"]["sessionId"]})
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
sub_queries = []
|
||||||
|
if len(n_series) > 0:
|
||||||
|
sub_queries.append(f"""\
|
||||||
|
n AS (INSERT INTO metric_series (metric_id, index, name, filter)
|
||||||
|
VALUES {",".join([f"(%(metric_id)s, %(n_index_{s['i']})s, %(n_name_{s['i']})s, %(n_filter_{s['i']})s::jsonb)"
|
||||||
|
for s in n_series])}
|
||||||
|
RETURNING 1)""")
|
||||||
|
if len(u_series) > 0:
|
||||||
|
sub_queries.append(f"""\
|
||||||
|
u AS (UPDATE metric_series
|
||||||
|
SET name=series.name,
|
||||||
|
filter=series.filter,
|
||||||
|
index=series.index
|
||||||
|
FROM (VALUES {",".join([f"(%(u_series_id_{s['i']})s,%(u_index_{s['i']})s,%(u_name_{s['i']})s,%(u_filter_{s['i']})s::jsonb)"
|
||||||
|
for s in u_series])}) AS series(series_id, index, name, filter)
|
||||||
|
WHERE metric_series.metric_id =%(metric_id)s AND metric_series.series_id=series.series_id
|
||||||
|
RETURNING 1)""")
|
||||||
|
if len(d_series_ids) > 0:
|
||||||
|
sub_queries.append("""\
|
||||||
|
d AS (DELETE FROM metric_series WHERE metric_id =%(metric_id)s AND series_id IN %(d_series_ids)s
|
||||||
|
RETURNING 1)""")
|
||||||
|
query = cur.mogrify(f"""\
|
||||||
|
{"WITH " if len(sub_queries) > 0 else ""}{",".join(sub_queries)}
|
||||||
|
UPDATE metrics
|
||||||
|
SET name = %(name)s, is_public= %(is_public)s,
|
||||||
|
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||||
|
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||||
|
metric_format= %(metric_format)s,
|
||||||
|
edited_at = timezone('utc'::text, now()),
|
||||||
|
default_config = %(config)s,
|
||||||
|
thumbnail = %(thumbnail)s,
|
||||||
|
card_info = %(card_info)s,
|
||||||
|
data = %(session_data)s
|
||||||
|
WHERE metric_id = %(metric_id)s
|
||||||
|
AND project_id = %(project_id)s
|
||||||
|
AND (user_id = %(user_id)s OR is_public)
|
||||||
|
RETURNING metric_id;""", params)
|
||||||
|
cur.execute(query)
|
||||||
|
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||||
|
constraints = ["metrics.project_id = %(project_id)s",
|
||||||
|
"metrics.deleted_at ISNULL"]
|
||||||
|
params = {"project_id": project_id, "user_id": user_id,
|
||||||
|
"offset": (data.page - 1) * data.limit,
|
||||||
|
"limit": data.limit, }
|
||||||
|
if data.mine_only:
|
||||||
|
constraints.append("user_id = %(user_id)s")
|
||||||
|
else:
|
||||||
|
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||||
|
if data.shared_only:
|
||||||
|
constraints.append("is_public")
|
||||||
|
|
||||||
|
if data.query is not None and len(data.query) > 0:
|
||||||
|
constraints.append("(name ILIKE %(query)s OR owner.owner_email ILIKE %(query)s)")
|
||||||
|
params["query"] = helper.values_for_operator(value=data.query,
|
||||||
|
op=schemas.SearchEventOperator.CONTAINS)
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
sub_join = ""
|
||||||
|
if include_series:
|
||||||
|
sub_join = """LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||||
|
FROM metric_series
|
||||||
|
WHERE metric_series.metric_id = metrics.metric_id
|
||||||
|
AND metric_series.deleted_at ISNULL
|
||||||
|
) AS metric_series ON (TRUE)"""
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||||
|
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||||
|
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||||
|
FROM metrics
|
||||||
|
{sub_join}
|
||||||
|
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||||
|
FROM (SELECT DISTINCT dashboard_id, name, is_public
|
||||||
|
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||||
|
AND project_id = %(project_id)s
|
||||||
|
AND ((dashboards.user_id = %(user_id)s OR is_public))) AS connected_dashboards
|
||||||
|
) AS connected_dashboards ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (SELECT email AS owner_email, name AS owner_name
|
||||||
|
FROM users
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND users.user_id = metrics.user_id
|
||||||
|
) AS owner ON (TRUE)
|
||||||
|
WHERE {" AND ".join(constraints)}
|
||||||
|
ORDER BY created_at {data.order.value}
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""", params)
|
||||||
|
logger.debug("---------")
|
||||||
|
logger.debug(query)
|
||||||
|
logger.debug("---------")
|
||||||
|
cur.execute(query)
|
||||||
|
rows = cur.fetchall()
|
||||||
|
if include_series:
|
||||||
|
for r in rows:
|
||||||
|
for s in r["series"]:
|
||||||
|
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||||
|
else:
|
||||||
|
for r in rows:
|
||||||
|
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||||
|
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||||
|
rows = helper.list_to_camel_case(rows)
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_all(project_id, user_id):
|
||||||
|
default_search = schemas.SearchCardsSchema()
|
||||||
|
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||||
|
result = rows
|
||||||
|
while len(rows) == default_search.limit:
|
||||||
|
default_search.page += 1
|
||||||
|
rows = search_all(project_id=project_id, user_id=user_id, data=default_search)
|
||||||
|
result += rows
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def delete_card(project_id, metric_id, user_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify("""\
|
||||||
|
UPDATE public.metrics
|
||||||
|
SET deleted_at = timezone('utc'::text, now()), edited_at = timezone('utc'::text, now())
|
||||||
|
WHERE project_id = %(project_id)s
|
||||||
|
AND metric_id = %(metric_id)s
|
||||||
|
AND (user_id = %(user_id)s OR is_public)
|
||||||
|
RETURNING data;""",
|
||||||
|
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id})
|
||||||
|
)
|
||||||
|
|
||||||
|
return {"state": "success"}
|
||||||
|
|
||||||
|
|
||||||
|
def __get_path_analysis_attributes(row):
|
||||||
|
card_info = row.pop("cardInfo")
|
||||||
|
row["excludes"] = card_info.get("excludes", [])
|
||||||
|
row["startPoint"] = card_info.get("startPoint", [])
|
||||||
|
row["startType"] = card_info.get("startType", "start")
|
||||||
|
row["hideExcess"] = card_info.get("hideExcess", False)
|
||||||
|
return row
|
||||||
|
|
||||||
|
|
||||||
|
def get_card(metric_id, project_id, user_id, flatten: bool = True, include_data: bool = False):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, deleted_at, edited_at, metric_type,
|
||||||
|
view_type, metric_of, metric_value, metric_format, is_pinned, default_config,
|
||||||
|
default_config AS config,series, dashboards, owner_email, card_info
|
||||||
|
{',data' if include_data else ''}
|
||||||
|
FROM metrics
|
||||||
|
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||||
|
FROM metric_series
|
||||||
|
WHERE metric_series.metric_id = metrics.metric_id
|
||||||
|
AND metric_series.deleted_at ISNULL
|
||||||
|
) AS metric_series ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||||
|
FROM (SELECT dashboard_id, name, is_public
|
||||||
|
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND project_id = %(project_id)s
|
||||||
|
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||||
|
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||||
|
) AS connected_dashboards ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||||
|
FROM users
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND users.user_id = metrics.user_id
|
||||||
|
) AS owner ON (TRUE)
|
||||||
|
WHERE metrics.project_id = %(project_id)s
|
||||||
|
AND metrics.deleted_at ISNULL
|
||||||
|
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||||
|
AND metrics.metric_id = %(metric_id)s
|
||||||
|
ORDER BY created_at;""",
|
||||||
|
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||||
|
)
|
||||||
|
cur.execute(query)
|
||||||
|
row = cur.fetchone()
|
||||||
|
if row is None:
|
||||||
|
return None
|
||||||
|
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||||
|
row["edited_at"] = TimeUTC.datetime_to_timestamp(row["edited_at"])
|
||||||
|
if flatten:
|
||||||
|
for s in row["series"]:
|
||||||
|
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||||
|
row = helper.dict_to_camel_case(row)
|
||||||
|
if row["metricType"] == schemas.MetricType.PATH_ANALYSIS:
|
||||||
|
row = __get_path_analysis_attributes(row=row)
|
||||||
|
return row
|
||||||
|
|
||||||
|
|
||||||
|
def get_series_for_alert(project_id, user_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify(
|
||||||
|
"""SELECT series_id AS value,
|
||||||
|
metrics.name || '.' || (COALESCE(metric_series.name, 'series ' || index)) || '.count' AS name,
|
||||||
|
'count' AS unit,
|
||||||
|
FALSE AS predefined,
|
||||||
|
metric_id,
|
||||||
|
series_id
|
||||||
|
FROM metric_series
|
||||||
|
INNER JOIN metrics USING (metric_id)
|
||||||
|
WHERE metrics.deleted_at ISNULL
|
||||||
|
AND metrics.project_id = %(project_id)s
|
||||||
|
AND metrics.metric_type = 'timeseries'
|
||||||
|
AND (user_id = %(user_id)s OR is_public)
|
||||||
|
ORDER BY name;""",
|
||||||
|
{"project_id": project_id, "user_id": user_id}
|
||||||
|
)
|
||||||
|
)
|
||||||
|
rows = cur.fetchall()
|
||||||
|
return helper.list_to_camel_case(rows)
|
||||||
|
|
||||||
|
|
||||||
|
def change_state(project_id, metric_id, user_id, status):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify("""\
|
||||||
|
UPDATE public.metrics
|
||||||
|
SET active = %(status)s
|
||||||
|
WHERE metric_id = %(metric_id)s
|
||||||
|
AND (user_id = %(user_id)s OR is_public);""",
|
||||||
|
{"metric_id": metric_id, "status": status, "user_id": user_id})
|
||||||
|
)
|
||||||
|
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
|
||||||
|
data: schemas.CardSessionsSchema
|
||||||
|
# , range_value=None, start_date=None, end_date=None
|
||||||
|
):
|
||||||
|
# No need for this because UI is sending the full payload
|
||||||
|
# card: dict = get_card(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
|
||||||
|
# if card is None:
|
||||||
|
# return None
|
||||||
|
# metric: schemas.CardSchema = schemas.CardSchema(**card)
|
||||||
|
# metric: schemas.CardSchema = __merge_metric_with_data(metric=metric, data=data)
|
||||||
|
# if metric is None:
|
||||||
|
# return None
|
||||||
|
if not card_exists(metric_id=metric_id, project_id=project_id, user_id=user_id):
|
||||||
|
return None
|
||||||
|
for s in data.series:
|
||||||
|
s.filter.startTimestamp = data.startTimestamp
|
||||||
|
s.filter.endTimestamp = data.endTimestamp
|
||||||
|
s.filter.limit = data.limit
|
||||||
|
s.filter.page = data.page
|
||||||
|
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
|
||||||
|
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
|
||||||
|
issue = None
|
||||||
|
for i in issues_list:
|
||||||
|
if i.get("issueId", "") == issue_id:
|
||||||
|
issue = i
|
||||||
|
break
|
||||||
|
if issue is None:
|
||||||
|
issue = issues.get(project_id=project_id, issue_id=issue_id)
|
||||||
|
if issue is not None:
|
||||||
|
issue = {**issue,
|
||||||
|
"affectedSessions": 0,
|
||||||
|
"affectedUsers": 0,
|
||||||
|
"conversionImpact": 0,
|
||||||
|
"lostConversions": 0,
|
||||||
|
"unaffectedSessions": 0}
|
||||||
|
return {"seriesId": s.series_id, "seriesName": s.name,
|
||||||
|
"sessions": sessions.search_sessions(user_id=user_id, project_id=project_id,
|
||||||
|
issue=issue, data=s.filter)
|
||||||
|
if issue is not None else {"total": 0, "sessions": []},
|
||||||
|
"issue": issue}
|
||||||
|
|
||||||
|
|
||||||
|
def make_chart_from_card(project: schemas.ProjectContext, user_id, metric_id, data: schemas.CardSessionsSchema):
|
||||||
|
raw_metric: dict = get_card(metric_id=metric_id, project_id=project.project_id, user_id=user_id, include_data=True)
|
||||||
|
|
||||||
|
if raw_metric is None:
|
||||||
|
raise HTTPException(status_code=status.HTTP_404_NOT_FOUND, detail="card not found")
|
||||||
|
raw_metric["startTimestamp"] = data.startTimestamp
|
||||||
|
raw_metric["endTimestamp"] = data.endTimestamp
|
||||||
|
raw_metric["limit"] = data.limit
|
||||||
|
raw_metric["density"] = data.density
|
||||||
|
metric: schemas.CardSchema = schemas.CardSchema(**raw_metric)
|
||||||
|
|
||||||
|
if metric.is_predefined:
|
||||||
|
return custom_metrics_predefined.get_metric(key=metric.metric_of,
|
||||||
|
project_id=project.project_id,
|
||||||
|
data=data.model_dump())
|
||||||
|
elif metric.metric_type == schemas.MetricType.HEAT_MAP:
|
||||||
|
if raw_metric["data"] and raw_metric["data"].get("sessionId"):
|
||||||
|
return heatmaps.get_selected_session(project_id=project.project_id,
|
||||||
|
session_id=raw_metric["data"]["sessionId"])
|
||||||
|
else:
|
||||||
|
return heatmaps.search_short_session(project_id=project.project_id,
|
||||||
|
data=schemas.HeatMapSessionsSearch(**metric.model_dump()),
|
||||||
|
user_id=user_id)
|
||||||
|
|
||||||
|
return get_chart(project=project, data=metric, user_id=user_id)
|
||||||
|
|
||||||
|
|
||||||
|
def card_exists(metric_id, project_id, user_id) -> bool:
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT 1
|
||||||
|
FROM metrics
|
||||||
|
LEFT JOIN LATERAL (SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public,name),'[]'::jsonb) AS dashboards
|
||||||
|
FROM (SELECT dashboard_id, name, is_public
|
||||||
|
FROM dashboards INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND project_id = %(project_id)s
|
||||||
|
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||||
|
AND metric_id = %(metric_id)s) AS connected_dashboards
|
||||||
|
) AS connected_dashboards ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (SELECT email AS owner_email
|
||||||
|
FROM users
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND users.user_id = metrics.user_id
|
||||||
|
) AS owner ON (TRUE)
|
||||||
|
WHERE metrics.project_id = %(project_id)s
|
||||||
|
AND metrics.deleted_at ISNULL
|
||||||
|
AND (metrics.user_id = %(user_id)s OR metrics.is_public)
|
||||||
|
AND metrics.metric_id = %(metric_id)s
|
||||||
|
ORDER BY created_at;""",
|
||||||
|
{"metric_id": metric_id, "project_id": project_id, "user_id": user_id}
|
||||||
|
)
|
||||||
|
cur.execute(query)
|
||||||
|
row = cur.fetchone()
|
||||||
|
return row is not None
|
||||||
25
api/chalicelib/core/custom_metrics_predefined.py
Normal file
25
api/chalicelib/core/custom_metrics_predefined.py
Normal file
|
|
@ -0,0 +1,25 @@
|
||||||
|
import logging
|
||||||
|
from typing import Union
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import metrics
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_metric(key: Union[schemas.MetricOfWebVitals, schemas.MetricOfErrors], project_id: int, data: dict):
|
||||||
|
supported = {
|
||||||
|
schemas.MetricOfWebVitals.COUNT_SESSIONS: metrics.get_processed_sessions,
|
||||||
|
schemas.MetricOfWebVitals.AVG_VISITED_PAGES: metrics.get_user_activity_avg_visited_pages,
|
||||||
|
schemas.MetricOfWebVitals.COUNT_REQUESTS: metrics.get_top_metrics_count_requests,
|
||||||
|
schemas.MetricOfErrors.IMPACTED_SESSIONS_BY_JS_ERRORS: metrics.get_impacted_sessions_by_js_errors,
|
||||||
|
schemas.MetricOfErrors.DOMAINS_ERRORS_4XX: metrics.get_domains_errors_4xx,
|
||||||
|
schemas.MetricOfErrors.DOMAINS_ERRORS_5XX: metrics.get_domains_errors_5xx,
|
||||||
|
schemas.MetricOfErrors.ERRORS_PER_DOMAINS: metrics.get_errors_per_domains,
|
||||||
|
schemas.MetricOfErrors.ERRORS_PER_TYPE: metrics.get_errors_per_type,
|
||||||
|
schemas.MetricOfErrors.RESOURCES_BY_PARTY: metrics.get_resources_by_party,
|
||||||
|
schemas.MetricOfWebVitals.COUNT_USERS: metrics.get_unique_users,
|
||||||
|
schemas.MetricOfWebVitals.SPEED_LOCATION: metrics.get_speed_index_location,
|
||||||
|
}
|
||||||
|
|
||||||
|
return supported.get(key, lambda *args: None)(project_id=project_id, **data)
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import json
|
import json
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.metrics import custom_metrics
|
from chalicelib.core import custom_metrics
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
602
api/chalicelib/core/errors.py
Normal file
602
api/chalicelib/core/errors.py
Normal file
|
|
@ -0,0 +1,602 @@
|
||||||
|
import json
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import sourcemaps, sessions
|
||||||
|
from chalicelib.utils import errors_helper
|
||||||
|
from chalicelib.utils import pg_client, helper
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils.metrics_helper import __get_step_size
|
||||||
|
|
||||||
|
|
||||||
|
def get(error_id, family=False):
|
||||||
|
if family:
|
||||||
|
return get_batch([error_id])
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
# trying: return only 1 error, without event details
|
||||||
|
query = cur.mogrify(
|
||||||
|
# "SELECT * FROM events.errors AS e INNER JOIN public.errors AS re USING(error_id) WHERE error_id = %(error_id)s;",
|
||||||
|
"SELECT * FROM public.errors WHERE error_id = %(error_id)s LIMIT 1;",
|
||||||
|
{"error_id": error_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
result = cur.fetchone()
|
||||||
|
if result is not None:
|
||||||
|
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
||||||
|
return helper.dict_to_camel_case(result)
|
||||||
|
|
||||||
|
|
||||||
|
def get_batch(error_ids):
|
||||||
|
if len(error_ids) == 0:
|
||||||
|
return []
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""
|
||||||
|
WITH RECURSIVE error_family AS (
|
||||||
|
SELECT *
|
||||||
|
FROM public.errors
|
||||||
|
WHERE error_id IN %(error_ids)s
|
||||||
|
UNION
|
||||||
|
SELECT child_errors.*
|
||||||
|
FROM public.errors AS child_errors
|
||||||
|
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM error_family;""",
|
||||||
|
{"error_ids": tuple(error_ids)})
|
||||||
|
cur.execute(query=query)
|
||||||
|
errors = cur.fetchall()
|
||||||
|
for e in errors:
|
||||||
|
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
||||||
|
return helper.list_to_camel_case(errors)
|
||||||
|
|
||||||
|
|
||||||
|
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||||
|
if data is None:
|
||||||
|
return []
|
||||||
|
return sorted(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": f'{o["name"]}@{v["version"]}',
|
||||||
|
"count": v["count"]
|
||||||
|
} for o in data for v in o["partition"]
|
||||||
|
],
|
||||||
|
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": o["name"],
|
||||||
|
"count": o["count"],
|
||||||
|
} for o in data
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def __process_tags(row):
|
||||||
|
return [
|
||||||
|
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
||||||
|
{"name": "browser.ver",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
||||||
|
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
||||||
|
{"name": "OS.ver",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
||||||
|
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
||||||
|
{"name": "device",
|
||||||
|
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
||||||
|
{"name": "country", "partitions": row.pop("country_partition")}
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
|
def get_details(project_id, error_id, user_id, **data):
|
||||||
|
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||||
|
pg_sub_query24.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
||||||
|
startTime_arg_name="startDate30",
|
||||||
|
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
||||||
|
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||||
|
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||||
|
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
||||||
|
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||||
|
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||||
|
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||||
|
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||||
|
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||||
|
pg_sub_query30_err.append("source ='js_exception'")
|
||||||
|
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||||
|
pg_sub_query30.append("error_id = %(error_id)s")
|
||||||
|
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
||||||
|
pg_basic_query.append("error_id = %(error_id)s")
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
data["startDate24"] = TimeUTC.now(-1)
|
||||||
|
data["endDate24"] = TimeUTC.now()
|
||||||
|
data["startDate30"] = TimeUTC.now(-30)
|
||||||
|
data["endDate30"] = TimeUTC.now()
|
||||||
|
density24 = int(data.get("density24", 24))
|
||||||
|
step_size24 = __get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
||||||
|
density30 = int(data.get("density30", 30))
|
||||||
|
step_size30 = __get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
||||||
|
params = {
|
||||||
|
"startDate24": data['startDate24'],
|
||||||
|
"endDate24": data['endDate24'],
|
||||||
|
"startDate30": data['startDate30'],
|
||||||
|
"endDate30": data['endDate30'],
|
||||||
|
"project_id": project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"step_size24": step_size24,
|
||||||
|
"step_size30": step_size30,
|
||||||
|
"error_id": error_id}
|
||||||
|
|
||||||
|
main_pg_query = f"""\
|
||||||
|
SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
users,
|
||||||
|
sessions,
|
||||||
|
last_occurrence,
|
||||||
|
first_occurrence,
|
||||||
|
last_session_id,
|
||||||
|
browsers_partition,
|
||||||
|
os_partition,
|
||||||
|
device_partition,
|
||||||
|
country_partition,
|
||||||
|
chart24,
|
||||||
|
chart30,
|
||||||
|
custom_tags
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
COUNT(DISTINCT user_id) AS users,
|
||||||
|
COUNT(DISTINCT session_id) AS sessions
|
||||||
|
FROM public.errors
|
||||||
|
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||||
|
GROUP BY error_id, name, message) AS details
|
||||||
|
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||||
|
MIN(timestamp) AS first_occurrence
|
||||||
|
FROM events.errors
|
||||||
|
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT session_id AS last_session_id,
|
||||||
|
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||||
|
FROM events.errors
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||||
|
FROM errors_tags
|
||||||
|
WHERE errors_tags.error_id = %(error_id)s
|
||||||
|
AND errors_tags.session_id = errors.session_id
|
||||||
|
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||||
|
WHERE error_id = %(error_id)s
|
||||||
|
ORDER BY errors.timestamp DESC
|
||||||
|
LIMIT 1) AS last_session_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_browser AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_browser
|
||||||
|
ORDER BY count DESC) AS count_per_browser_query
|
||||||
|
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||||
|
FROM (SELECT user_browser_version AS version,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_browser = count_per_browser_query.name
|
||||||
|
GROUP BY user_browser_version
|
||||||
|
ORDER BY count DESC) AS version_details
|
||||||
|
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_os AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_os
|
||||||
|
ORDER BY count DESC) AS count_per_os_details
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||||
|
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_os = count_per_os_details.name
|
||||||
|
GROUP BY user_os_version
|
||||||
|
ORDER BY count DESC) AS count_per_version_details
|
||||||
|
GROUP BY count_per_os_details.name ) AS os_version_details
|
||||||
|
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
||||||
|
FROM (SELECT *
|
||||||
|
FROM (SELECT user_device_type AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_device_type
|
||||||
|
ORDER BY count DESC) AS count_per_device_details
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||||
|
FROM (SELECT CASE
|
||||||
|
WHEN user_device = '' OR user_device ISNULL
|
||||||
|
THEN 'unknown'
|
||||||
|
ELSE user_device END AS version,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
AND sessions.user_device_type = count_per_device_details.name
|
||||||
|
GROUP BY user_device
|
||||||
|
ORDER BY count DESC) AS count_per_device_v_details
|
||||||
|
GROUP BY count_per_device_details.name ) AS device_version_details
|
||||||
|
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
||||||
|
FROM (SELECT user_country AS name,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||||
|
GROUP BY user_country
|
||||||
|
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query24)}
|
||||||
|
) AS chart_details ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
||||||
|
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
||||||
|
ON (TRUE)
|
||||||
|
GROUP BY timestamp
|
||||||
|
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
||||||
|
"""
|
||||||
|
|
||||||
|
# print("--------------------")
|
||||||
|
# print(cur.mogrify(main_pg_query, params))
|
||||||
|
# print("--------------------")
|
||||||
|
cur.execute(cur.mogrify(main_pg_query, params))
|
||||||
|
row = cur.fetchone()
|
||||||
|
if row is None:
|
||||||
|
return {"errors": ["error not found"]}
|
||||||
|
row["tags"] = __process_tags(row)
|
||||||
|
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT error_id, status, session_id, start_ts,
|
||||||
|
parent_error_id,session_id, user_anonymous_id,
|
||||||
|
user_id, user_uuid, user_browser, user_browser_version,
|
||||||
|
user_os, user_os_version, user_device, payload,
|
||||||
|
FALSE AS favorite,
|
||||||
|
True AS viewed
|
||||||
|
FROM public.errors AS pe
|
||||||
|
INNER JOIN events.errors AS ee USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE pe.project_id = %(project_id)s
|
||||||
|
AND error_id = %(error_id)s
|
||||||
|
ORDER BY start_ts DESC
|
||||||
|
LIMIT 1;""",
|
||||||
|
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
status = cur.fetchone()
|
||||||
|
|
||||||
|
if status is not None:
|
||||||
|
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
||||||
|
row["status"] = status.pop("status")
|
||||||
|
row["parent_error_id"] = status.pop("parent_error_id")
|
||||||
|
row["favorite"] = status.pop("favorite")
|
||||||
|
row["viewed"] = status.pop("viewed")
|
||||||
|
row["last_hydrated_session"] = status
|
||||||
|
else:
|
||||||
|
row["stack"] = []
|
||||||
|
row["last_hydrated_session"] = None
|
||||||
|
row["status"] = "untracked"
|
||||||
|
row["parent_error_id"] = None
|
||||||
|
row["favorite"] = False
|
||||||
|
row["viewed"] = False
|
||||||
|
return {"data": helper.dict_to_camel_case(row)}
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||||
|
endTime_arg_name="endDate", chart=False, step_size_name="step_size",
|
||||||
|
project_key="project_id"):
|
||||||
|
if project_key is None:
|
||||||
|
ch_sub_query = []
|
||||||
|
else:
|
||||||
|
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||||
|
f"timestamp < %({endTime_arg_name})s"]
|
||||||
|
if chart:
|
||||||
|
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||||
|
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||||
|
if platform == schemas.PlatformType.MOBILE:
|
||||||
|
ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
|
def __get_sort_key(key):
|
||||||
|
return {
|
||||||
|
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||||
|
schemas.ErrorSort.USERS_COUNT: "users",
|
||||||
|
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
||||||
|
}.get(key, 'max_datetime')
|
||||||
|
|
||||||
|
|
||||||
|
def search(data: schemas.SearchErrorsSchema, project_id, user_id):
|
||||||
|
empty_response = {
|
||||||
|
'total': 0,
|
||||||
|
'errors': []
|
||||||
|
}
|
||||||
|
|
||||||
|
platform = None
|
||||||
|
for f in data.filters:
|
||||||
|
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||||
|
platform = f.value[0]
|
||||||
|
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||||
|
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||||
|
"pe.project_id=%(project_id)s"]
|
||||||
|
# To ignore Script error
|
||||||
|
pg_sub_query.append("pe.message!='Script error.'")
|
||||||
|
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||||
|
if platform:
|
||||||
|
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||||
|
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||||
|
statuses = []
|
||||||
|
error_ids = None
|
||||||
|
if data.startTimestamp is None:
|
||||||
|
data.startTimestamp = TimeUTC.now(-30)
|
||||||
|
if data.endTimestamp is None:
|
||||||
|
data.endTimestamp = TimeUTC.now(1)
|
||||||
|
if len(data.events) > 0 or len(data.filters) > 0:
|
||||||
|
print("-- searching for sessions before errors")
|
||||||
|
statuses = sessions.search_sessions(data=data, project_id=project_id, user_id=user_id, errors_only=True,
|
||||||
|
error_status=data.status)
|
||||||
|
if len(statuses) == 0:
|
||||||
|
return empty_response
|
||||||
|
error_ids = [e["errorId"] for e in statuses]
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
step_size = __get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
||||||
|
sort = __get_sort_key('datetime')
|
||||||
|
if data.sort is not None:
|
||||||
|
sort = __get_sort_key(data.sort)
|
||||||
|
order = schemas.SortOrderType.DESC
|
||||||
|
if data.order is not None:
|
||||||
|
order = data.order
|
||||||
|
extra_join = ""
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"startDate": data.startTimestamp,
|
||||||
|
"endDate": data.endTimestamp,
|
||||||
|
"project_id": project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"step_size": step_size}
|
||||||
|
if data.status != schemas.ErrorStatus.ALL:
|
||||||
|
pg_sub_query.append("status = %(error_status)s")
|
||||||
|
params["error_status"] = data.status
|
||||||
|
if data.limit is not None and data.page is not None:
|
||||||
|
params["errors_offset"] = (data.page - 1) * data.limit
|
||||||
|
params["errors_limit"] = data.limit
|
||||||
|
else:
|
||||||
|
params["errors_offset"] = 0
|
||||||
|
params["errors_limit"] = 200
|
||||||
|
|
||||||
|
if error_ids is not None:
|
||||||
|
params["error_ids"] = tuple(error_ids)
|
||||||
|
pg_sub_query.append("error_id IN %(error_ids)s")
|
||||||
|
# if data.bookmarked:
|
||||||
|
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
||||||
|
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
||||||
|
if data.query is not None and len(data.query) > 0:
|
||||||
|
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
||||||
|
params["error_query"] = helper.values_for_operator(value=data.query,
|
||||||
|
op=schemas.SearchEventOperator.CONTAINS)
|
||||||
|
|
||||||
|
main_pg_query = f"""SELECT full_count,
|
||||||
|
error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
users,
|
||||||
|
sessions,
|
||||||
|
last_occurrence,
|
||||||
|
first_occurrence,
|
||||||
|
chart
|
||||||
|
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
||||||
|
FROM (SELECT error_id,
|
||||||
|
name,
|
||||||
|
message,
|
||||||
|
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
||||||
|
COUNT(DISTINCT session_id) AS sessions,
|
||||||
|
MAX(timestamp) AS max_datetime,
|
||||||
|
MIN(timestamp) AS min_datetime
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS pe USING (error_id)
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
{extra_join}
|
||||||
|
WHERE {" AND ".join(pg_sub_query)}
|
||||||
|
GROUP BY error_id, name, message
|
||||||
|
ORDER BY {sort} {order}) AS details
|
||||||
|
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
||||||
|
) AS details
|
||||||
|
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
||||||
|
MIN(timestamp) AS first_occurrence
|
||||||
|
FROM events.errors
|
||||||
|
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
||||||
|
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(session_id) AS count
|
||||||
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
||||||
|
FROM events.errors
|
||||||
|
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY timestamp
|
||||||
|
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
||||||
|
|
||||||
|
# print("--------------------")
|
||||||
|
# print(cur.mogrify(main_pg_query, params))
|
||||||
|
# print("--------------------")
|
||||||
|
|
||||||
|
cur.execute(cur.mogrify(main_pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
||||||
|
|
||||||
|
if total == 0:
|
||||||
|
rows = []
|
||||||
|
else:
|
||||||
|
if len(statuses) == 0:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""SELECT error_id,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_errors AS ve
|
||||||
|
WHERE errors.error_id = ve.error_id
|
||||||
|
AND ve.user_id = %(user_id)s LIMIT 1), FALSE) AS viewed
|
||||||
|
FROM public.errors
|
||||||
|
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
||||||
|
{"project_id": project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
||||||
|
"user_id": user_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
statuses = helper.list_to_camel_case(cur.fetchall())
|
||||||
|
statuses = {
|
||||||
|
s["errorId"]: s for s in statuses
|
||||||
|
}
|
||||||
|
|
||||||
|
for r in rows:
|
||||||
|
r.pop("full_count")
|
||||||
|
if r["error_id"] in statuses:
|
||||||
|
r["viewed"] = statuses[r["error_id"]]["viewed"]
|
||||||
|
else:
|
||||||
|
r["viewed"] = False
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'errors': helper.list_to_camel_case(rows)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def __save_stacktrace(error_id, data):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""UPDATE public.errors
|
||||||
|
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
||||||
|
WHERE error_id = %(error_id)s;""",
|
||||||
|
{"error_id": error_id, "data": json.dumps(data)})
|
||||||
|
cur.execute(query=query)
|
||||||
|
|
||||||
|
|
||||||
|
def get_trace(project_id, error_id):
|
||||||
|
error = get(error_id=error_id, family=False)
|
||||||
|
if error is None:
|
||||||
|
return {"errors": ["error not found"]}
|
||||||
|
if error.get("source", "") != "js_exception":
|
||||||
|
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
||||||
|
if error.get("payload") is None:
|
||||||
|
return {"errors": ["null payload"]}
|
||||||
|
if error.get("stacktrace") is not None:
|
||||||
|
return {"sourcemapUploaded": True,
|
||||||
|
"trace": error.get("stacktrace"),
|
||||||
|
"preparsed": True}
|
||||||
|
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
||||||
|
if all_exists:
|
||||||
|
__save_stacktrace(error_id=error_id, data=trace)
|
||||||
|
return {"sourcemapUploaded": all_exists,
|
||||||
|
"trace": trace,
|
||||||
|
"preparsed": False}
|
||||||
|
|
||||||
|
|
||||||
|
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
||||||
|
extra_constraints = ["s.project_id = %(project_id)s",
|
||||||
|
"s.start_ts >= %(startDate)s",
|
||||||
|
"s.start_ts <= %(endDate)s",
|
||||||
|
"e.error_id = %(error_id)s"]
|
||||||
|
if start_date is None:
|
||||||
|
start_date = TimeUTC.now(-7)
|
||||||
|
if end_date is None:
|
||||||
|
end_date = TimeUTC.now()
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"startDate": start_date,
|
||||||
|
"endDate": end_date,
|
||||||
|
"project_id": project_id,
|
||||||
|
"userId": user_id,
|
||||||
|
"error_id": error_id}
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
f"""SELECT s.project_id,
|
||||||
|
s.session_id::text AS session_id,
|
||||||
|
s.user_uuid,
|
||||||
|
s.user_id,
|
||||||
|
s.user_agent,
|
||||||
|
s.user_os,
|
||||||
|
s.user_browser,
|
||||||
|
s.user_device,
|
||||||
|
s.user_country,
|
||||||
|
s.start_ts,
|
||||||
|
s.duration,
|
||||||
|
s.events_count,
|
||||||
|
s.pages_count,
|
||||||
|
s.errors_count,
|
||||||
|
s.issue_types,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_favorite_sessions AS fs
|
||||||
|
WHERE s.session_id = fs.session_id
|
||||||
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_sessions AS fs
|
||||||
|
WHERE s.session_id = fs.session_id
|
||||||
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||||
|
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
||||||
|
WHERE {" AND ".join(extra_constraints)}
|
||||||
|
ORDER BY s.start_ts DESC;""",
|
||||||
|
params)
|
||||||
|
cur.execute(query=query)
|
||||||
|
sessions_list = []
|
||||||
|
total = cur.rowcount
|
||||||
|
row = cur.fetchone()
|
||||||
|
while row is not None and len(sessions_list) < 100:
|
||||||
|
sessions_list.append(row)
|
||||||
|
row = cur.fetchone()
|
||||||
|
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'sessions': helper.list_to_camel_case(sessions_list)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
ACTION_STATE = {
|
||||||
|
"unsolve": 'unresolved',
|
||||||
|
"solve": 'resolved',
|
||||||
|
"ignore": 'ignored'
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def change_state(project_id, user_id, error_id, action):
|
||||||
|
errors = get(error_id, family=True)
|
||||||
|
print(len(errors))
|
||||||
|
status = ACTION_STATE.get(action)
|
||||||
|
if errors is None or len(errors) == 0:
|
||||||
|
return {"errors": ["error not found"]}
|
||||||
|
if errors[0]["status"] == status:
|
||||||
|
return {"errors": [f"error is already {status}"]}
|
||||||
|
|
||||||
|
if errors[0]["status"] == ACTION_STATE["solve"] and status == ACTION_STATE["ignore"]:
|
||||||
|
return {"errors": [f"state transition not permitted {errors[0]['status']} -> {status}"]}
|
||||||
|
|
||||||
|
params = {
|
||||||
|
"userId": user_id,
|
||||||
|
"error_ids": tuple([e["errorId"] for e in errors]),
|
||||||
|
"status": status}
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""UPDATE public.errors
|
||||||
|
SET status = %(status)s
|
||||||
|
WHERE error_id IN %(error_ids)s
|
||||||
|
RETURNING status""",
|
||||||
|
params)
|
||||||
|
cur.execute(query=query)
|
||||||
|
row = cur.fetchone()
|
||||||
|
if row is not None:
|
||||||
|
for e in errors:
|
||||||
|
e["status"] = row["status"]
|
||||||
|
return {"data": errors}
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
from . import errors_pg as errors_legacy
|
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
|
||||||
logger.info(">>> Using experimental error search")
|
|
||||||
from . import errors_ch as errors
|
|
||||||
else:
|
|
||||||
from . import errors_pg as errors
|
|
||||||
|
|
@ -1,409 +0,0 @@
|
||||||
import schemas
|
|
||||||
from chalicelib.core import metadata
|
|
||||||
from chalicelib.core.errors import errors_legacy
|
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
from chalicelib.core.errors.modules import sessions
|
|
||||||
from chalicelib.utils import ch_client, exp_ch_helper
|
|
||||||
from chalicelib.utils import helper, metrics_helper
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
|
|
||||||
|
|
||||||
def _multiple_values(values, value_key="value"):
|
|
||||||
query_values = {}
|
|
||||||
if values is not None and isinstance(values, list):
|
|
||||||
for i in range(len(values)):
|
|
||||||
k = f"{value_key}_{i}"
|
|
||||||
query_values[k] = values[i]
|
|
||||||
return query_values
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sql_operator(op: schemas.SearchEventOperator):
|
|
||||||
return {
|
|
||||||
schemas.SearchEventOperator.IS: "=",
|
|
||||||
schemas.SearchEventOperator.IS_ANY: "IN",
|
|
||||||
schemas.SearchEventOperator.ON: "=",
|
|
||||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
|
||||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
|
||||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
|
||||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
|
||||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
|
||||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
|
||||||
}.get(op, "=")
|
|
||||||
|
|
||||||
|
|
||||||
def _isAny_opreator(op: schemas.SearchEventOperator):
|
|
||||||
return op in [schemas.SearchEventOperator.ON_ANY, schemas.SearchEventOperator.IS_ANY]
|
|
||||||
|
|
||||||
|
|
||||||
def _isUndefined_operator(op: schemas.SearchEventOperator):
|
|
||||||
return op in [schemas.SearchEventOperator.IS_UNDEFINED]
|
|
||||||
|
|
||||||
|
|
||||||
def __is_negation_operator(op: schemas.SearchEventOperator):
|
|
||||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
|
||||||
schemas.SearchEventOperator.NOT_ON,
|
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS]
|
|
||||||
|
|
||||||
|
|
||||||
def _multiple_conditions(condition, values, value_key="value", is_not=False):
|
|
||||||
query = []
|
|
||||||
for i in range(len(values)):
|
|
||||||
k = f"{value_key}_{i}"
|
|
||||||
query.append(condition.replace(value_key, k))
|
|
||||||
return "(" + (" AND " if is_not else " OR ").join(query) + ")"
|
|
||||||
|
|
||||||
|
|
||||||
def get(error_id, family=False):
|
|
||||||
return errors_legacy.get(error_id=error_id, family=family)
|
|
||||||
|
|
||||||
|
|
||||||
def get_batch(error_ids):
|
|
||||||
return errors_legacy.get_batch(error_ids=error_ids)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
|
||||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
|
||||||
table_name=None):
|
|
||||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
|
||||||
if table_name is not None:
|
|
||||||
table_name = table_name + "."
|
|
||||||
else:
|
|
||||||
table_name = ""
|
|
||||||
if type_condition:
|
|
||||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"{table_name}created_at >= toDateTime(%({startTime_arg_name})s/1000)",
|
|
||||||
f"{table_name}created_at < toDateTime(%({endTime_arg_name})s/1000)"]
|
|
||||||
# if platform == schemas.PlatformType.MOBILE:
|
|
||||||
# ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
# elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
# ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sort_key(key):
|
|
||||||
return {
|
|
||||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
|
||||||
schemas.ErrorSort.USERS_COUNT: "users",
|
|
||||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
|
||||||
}.get(key, 'max_datetime')
|
|
||||||
|
|
||||||
|
|
||||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
|
||||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(data.startTimestamp)
|
|
||||||
MAIN_SESSIONS_TABLE = exp_ch_helper.get_main_sessions_table(data.startTimestamp)
|
|
||||||
|
|
||||||
platform = None
|
|
||||||
for f in data.filters:
|
|
||||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
|
||||||
platform = f.value[0]
|
|
||||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
|
||||||
# ignore platform for errors table
|
|
||||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
|
||||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
|
||||||
|
|
||||||
# To ignore Script error
|
|
||||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'message') != 'Script error.'")
|
|
||||||
error_ids = None
|
|
||||||
|
|
||||||
if data.startTimestamp is None:
|
|
||||||
data.startTimestamp = TimeUTC.now(-7)
|
|
||||||
if data.endTimestamp is None:
|
|
||||||
data.endTimestamp = TimeUTC.now(1)
|
|
||||||
|
|
||||||
subquery_part = ""
|
|
||||||
params = {}
|
|
||||||
if len(data.events) > 0:
|
|
||||||
errors_condition_count = 0
|
|
||||||
for i, e in enumerate(data.events):
|
|
||||||
if e.type == schemas.EventType.ERROR:
|
|
||||||
errors_condition_count += 1
|
|
||||||
is_any = _isAny_opreator(e.operator)
|
|
||||||
op = __get_sql_operator(e.operator)
|
|
||||||
e_k = f"e_value{i}"
|
|
||||||
params = {**params, **_multiple_values(e.value, value_key=e_k)}
|
|
||||||
if not is_any and len(e.value) > 0 and e.value[1] not in [None, "*", ""]:
|
|
||||||
ch_sub_query.append(
|
|
||||||
_multiple_conditions(f"(message {op} %({e_k})s OR name {op} %({e_k})s)",
|
|
||||||
e.value, value_key=e_k))
|
|
||||||
if len(data.events) > errors_condition_count:
|
|
||||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
|
||||||
errors_only=True,
|
|
||||||
project_id=project.project_id,
|
|
||||||
user_id=user_id,
|
|
||||||
issue=None,
|
|
||||||
favorite_only=False)
|
|
||||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
|
||||||
params = {**params, **subquery_part_args}
|
|
||||||
if len(data.filters) > 0:
|
|
||||||
meta_keys = None
|
|
||||||
# to reduce include a sub-query of sessions inside events query, in order to reduce the selected data
|
|
||||||
for i, f in enumerate(data.filters):
|
|
||||||
if not isinstance(f.value, list):
|
|
||||||
f.value = [f.value]
|
|
||||||
filter_type = f.type
|
|
||||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
|
||||||
f_k = f"f_value{i}"
|
|
||||||
params = {**params, f_k: f.value, **_multiple_values(f.value, value_key=f_k)}
|
|
||||||
op = __get_sql_operator(f.operator) \
|
|
||||||
if filter_type not in [schemas.FilterType.EVENTS_COUNT] else f.operator
|
|
||||||
is_any = _isAny_opreator(f.operator)
|
|
||||||
is_undefined = _isUndefined_operator(f.operator)
|
|
||||||
if not is_any and not is_undefined and len(f.value) == 0:
|
|
||||||
continue
|
|
||||||
is_not = False
|
|
||||||
if __is_negation_operator(f.operator):
|
|
||||||
is_not = True
|
|
||||||
if filter_type == schemas.FilterType.USER_BROWSER:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_browser)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.user_browser {op} %({f_k})s', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_os)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.user_os {op} %({f_k})s', f.value, is_not=is_not, value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_device)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.user_device {op} %({f_k})s', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_country)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.user_country {op} %({f_k})s', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.UTM_SOURCE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.utm_source)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.utm_source)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.utm_source {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.UTM_MEDIUM]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.utm_medium)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.utm_medium)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.utm_medium {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
elif filter_type in [schemas.FilterType.UTM_CAMPAIGN]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.utm_campaign)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.utm_campaign)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f's.utm_campaign {op} toString(%({f_k})s)', f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type == schemas.FilterType.DURATION:
|
|
||||||
if len(f.value) > 0 and f.value[0] is not None:
|
|
||||||
ch_sessions_sub_query.append("s.duration >= %(minDuration)s")
|
|
||||||
params["minDuration"] = f.value[0]
|
|
||||||
if len(f.value) > 1 and f.value[1] is not None and int(f.value[1]) > 0:
|
|
||||||
ch_sessions_sub_query.append("s.duration <= %(maxDuration)s")
|
|
||||||
params["maxDuration"] = f.value[1]
|
|
||||||
|
|
||||||
elif filter_type == schemas.FilterType.REFERRER:
|
|
||||||
# extra_from += f"INNER JOIN {events.EventType.LOCATION.table} AS p USING(session_id)"
|
|
||||||
if is_any:
|
|
||||||
referrer_constraint = 'isNotNull(s.base_referrer)'
|
|
||||||
else:
|
|
||||||
referrer_constraint = _multiple_conditions(f"s.base_referrer {op} %({f_k})s", f.value,
|
|
||||||
is_not=is_not, value_key=f_k)
|
|
||||||
elif filter_type == schemas.FilterType.METADATA:
|
|
||||||
# get metadata list only if you need it
|
|
||||||
if meta_keys is None:
|
|
||||||
meta_keys = metadata.get(project_id=project.project_id)
|
|
||||||
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
|
||||||
if f.source in meta_keys.keys():
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append(f"isNotNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append(f"isNull(s.{metadata.index_to_colname(meta_keys[f.source])})")
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(
|
|
||||||
f"s.{metadata.index_to_colname(meta_keys[f.source])} {op} toString(%({f_k})s)",
|
|
||||||
f.value, is_not=is_not, value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_id)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.user_id)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f"s.user_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
elif filter_type in [schemas.FilterType.USER_ANONYMOUS_ID,
|
|
||||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.user_anonymous_id)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.user_anonymous_id)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f"s.user_anonymous_id {op} toString(%({f_k})s)", f.value,
|
|
||||||
is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE]:
|
|
||||||
if is_any:
|
|
||||||
ch_sessions_sub_query.append('isNotNull(s.rev_id)')
|
|
||||||
elif is_undefined:
|
|
||||||
ch_sessions_sub_query.append('isNull(s.rev_id)')
|
|
||||||
else:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f"s.rev_id {op} toString(%({f_k})s)", f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
elif filter_type == schemas.FilterType.PLATFORM:
|
|
||||||
# op = __get_sql_operator(f.operator)
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f"s.user_device_type {op} %({f_k})s", f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
# elif filter_type == schemas.FilterType.issue:
|
|
||||||
# if is_any:
|
|
||||||
# ch_sessions_sub_query.append("notEmpty(s.issue_types)")
|
|
||||||
# else:
|
|
||||||
# ch_sessions_sub_query.append(f"hasAny(s.issue_types,%({f_k})s)")
|
|
||||||
# # _multiple_conditions(f"%({f_k})s {op} ANY (s.issue_types)", f.value, is_not=is_not,
|
|
||||||
# # value_key=f_k))
|
|
||||||
#
|
|
||||||
# if is_not:
|
|
||||||
# extra_constraints[-1] = f"not({extra_constraints[-1]})"
|
|
||||||
# ss_constraints[-1] = f"not({ss_constraints[-1]})"
|
|
||||||
elif filter_type == schemas.FilterType.EVENTS_COUNT:
|
|
||||||
ch_sessions_sub_query.append(
|
|
||||||
_multiple_conditions(f"s.events_count {op} %({f_k})s", f.value, is_not=is_not,
|
|
||||||
value_key=f_k))
|
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as ch:
|
|
||||||
step_size = metrics_helper.get_step_size(data.startTimestamp, data.endTimestamp, data.density)
|
|
||||||
sort = __get_sort_key('datetime')
|
|
||||||
if data.sort is not None:
|
|
||||||
sort = __get_sort_key(data.sort)
|
|
||||||
order = "DESC"
|
|
||||||
if data.order is not None:
|
|
||||||
order = data.order
|
|
||||||
params = {
|
|
||||||
**params,
|
|
||||||
"startDate": data.startTimestamp,
|
|
||||||
"endDate": data.endTimestamp,
|
|
||||||
"project_id": project.project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"step_size": step_size}
|
|
||||||
if data.limit is not None and data.page is not None:
|
|
||||||
params["errors_offset"] = (data.page - 1) * data.limit
|
|
||||||
params["errors_limit"] = data.limit
|
|
||||||
else:
|
|
||||||
params["errors_offset"] = 0
|
|
||||||
params["errors_limit"] = 200
|
|
||||||
# if data.bookmarked:
|
|
||||||
# cur.execute(cur.mogrify(f"""SELECT error_id
|
|
||||||
# FROM public.user_favorite_errors
|
|
||||||
# WHERE user_id = %(userId)s
|
|
||||||
# {"" if error_ids is None else "AND error_id IN %(error_ids)s"}""",
|
|
||||||
# {"userId": user_id, "error_ids": tuple(error_ids or [])}))
|
|
||||||
# error_ids = cur.fetchall()
|
|
||||||
# if len(error_ids) == 0:
|
|
||||||
# return empty_response
|
|
||||||
# error_ids = [e["error_id"] for e in error_ids]
|
|
||||||
|
|
||||||
if error_ids is not None:
|
|
||||||
params["error_ids"] = tuple(error_ids)
|
|
||||||
ch_sub_query.append("error_id IN %(error_ids)s")
|
|
||||||
|
|
||||||
main_ch_query = f"""\
|
|
||||||
SELECT details.error_id as error_id,
|
|
||||||
name, message, users, total,
|
|
||||||
sessions, last_occurrence, first_occurrence, chart
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
|
||||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
|
||||||
COUNT(DISTINCT user_id) AS users,
|
|
||||||
COUNT(DISTINCT events.session_id) AS sessions,
|
|
||||||
MAX(created_at) AS max_datetime,
|
|
||||||
MIN(created_at) AS min_datetime,
|
|
||||||
COUNT(DISTINCT error_id)
|
|
||||||
OVER() AS total
|
|
||||||
FROM {MAIN_EVENTS_TABLE} AS events
|
|
||||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
|
||||||
FROM {MAIN_SESSIONS_TABLE} AS s
|
|
||||||
{subquery_part}
|
|
||||||
WHERE {" AND ".join(ch_sessions_sub_query)}) AS sessions
|
|
||||||
ON (events.session_id = sessions.session_id)
|
|
||||||
WHERE {" AND ".join(ch_sub_query)}
|
|
||||||
GROUP BY error_id, name, message
|
|
||||||
ORDER BY {sort} {order}
|
|
||||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
|
||||||
INNER JOIN (SELECT error_id,
|
|
||||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
|
||||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
|
||||||
FROM {MAIN_EVENTS_TABLE}
|
|
||||||
WHERE project_id=%(project_id)s
|
|
||||||
AND `$event_name`='ERROR'
|
|
||||||
GROUP BY error_id) AS time_details
|
|
||||||
ON details.error_id=time_details.error_id
|
|
||||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
gs.generate_series AS timestamp,
|
|
||||||
COUNT(DISTINCT session_id) AS count
|
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
|
||||||
LEFT JOIN {MAIN_EVENTS_TABLE} ON(TRUE)
|
|
||||||
WHERE {" AND ".join(ch_sub_query)}
|
|
||||||
AND created_at >= toDateTime(timestamp / 1000)
|
|
||||||
AND created_at < toDateTime((timestamp + %(step_size)s) / 1000)
|
|
||||||
GROUP BY error_id, timestamp
|
|
||||||
ORDER BY timestamp) AS sub_table
|
|
||||||
GROUP BY error_id) AS chart_details ON details.error_id=chart_details.error_id;"""
|
|
||||||
|
|
||||||
# print("------------")
|
|
||||||
# print(ch.format(main_ch_query, params))
|
|
||||||
# print("------------")
|
|
||||||
query = ch.format(query=main_ch_query, parameters=params)
|
|
||||||
|
|
||||||
rows = ch.execute(query=query)
|
|
||||||
total = rows[0]["total"] if len(rows) > 0 else 0
|
|
||||||
|
|
||||||
for r in rows:
|
|
||||||
r["chart"] = list(r["chart"])
|
|
||||||
for i in range(len(r["chart"])):
|
|
||||||
r["chart"][i] = {"timestamp": r["chart"][i][0], "count": r["chart"][i][1]}
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'errors': helper.list_to_camel_case(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_trace(project_id, error_id):
|
|
||||||
return errors_legacy.get_trace(project_id=project_id, error_id=error_id)
|
|
||||||
|
|
||||||
|
|
||||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
|
||||||
return errors_legacy.get_sessions(start_date=start_date,
|
|
||||||
end_date=end_date,
|
|
||||||
project_id=project_id,
|
|
||||||
user_id=user_id,
|
|
||||||
error_id=error_id)
|
|
||||||
|
|
@ -1,248 +0,0 @@
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
|
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils.metrics_helper import get_step_size
|
|
||||||
|
|
||||||
|
|
||||||
def __flatten_sort_key_count_version(data, merge_nested=False):
|
|
||||||
if data is None:
|
|
||||||
return []
|
|
||||||
return sorted(
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": f'{o["name"]}@{v["version"]}',
|
|
||||||
"count": v["count"]
|
|
||||||
} for o in data for v in o["partition"]
|
|
||||||
],
|
|
||||||
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
|
||||||
[
|
|
||||||
{
|
|
||||||
"name": o["name"],
|
|
||||||
"count": o["count"],
|
|
||||||
} for o in data
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def __process_tags(row):
|
|
||||||
return [
|
|
||||||
{"name": "browser", "partitions": __flatten_sort_key_count_version(data=row.get("browsers_partition"))},
|
|
||||||
{"name": "browser.ver",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("browsers_partition"), merge_nested=True)},
|
|
||||||
{"name": "OS", "partitions": __flatten_sort_key_count_version(data=row.get("os_partition"))},
|
|
||||||
{"name": "OS.ver",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("os_partition"), merge_nested=True)},
|
|
||||||
{"name": "device.family", "partitions": __flatten_sort_key_count_version(data=row.get("device_partition"))},
|
|
||||||
{"name": "device",
|
|
||||||
"partitions": __flatten_sort_key_count_version(data=row.pop("device_partition"), merge_nested=True)},
|
|
||||||
{"name": "country", "partitions": row.pop("country_partition")}
|
|
||||||
]
|
|
||||||
|
|
||||||
|
|
||||||
def get_details(project_id, error_id, user_id, **data):
|
|
||||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
|
||||||
step_size_name="step_size24")
|
|
||||||
pg_sub_query24.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
|
||||||
startTime_arg_name="startDate30",
|
|
||||||
endTime_arg_name="endDate30",
|
|
||||||
project_key="sessions.project_id")
|
|
||||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
|
||||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
|
||||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
|
||||||
startTime_arg_name="startDate30",
|
|
||||||
endTime_arg_name="endDate30",
|
|
||||||
project_key="errors.project_id")
|
|
||||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
|
||||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
|
||||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
|
||||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
|
||||||
pg_sub_query30_err.append("source ='js_exception'")
|
|
||||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
|
||||||
step_size_name="step_size30")
|
|
||||||
pg_sub_query30.append("error_id = %(error_id)s")
|
|
||||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
|
||||||
pg_basic_query.append("error_id = %(error_id)s")
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
data["startDate24"] = TimeUTC.now(-1)
|
|
||||||
data["endDate24"] = TimeUTC.now()
|
|
||||||
data["startDate30"] = TimeUTC.now(-30)
|
|
||||||
data["endDate30"] = TimeUTC.now()
|
|
||||||
density24 = int(data.get("density24", 24))
|
|
||||||
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24, factor=1)
|
|
||||||
density30 = int(data.get("density30", 30))
|
|
||||||
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30, factor=1)
|
|
||||||
params = {
|
|
||||||
"startDate24": data['startDate24'],
|
|
||||||
"endDate24": data['endDate24'],
|
|
||||||
"startDate30": data['startDate30'],
|
|
||||||
"endDate30": data['endDate30'],
|
|
||||||
"project_id": project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"step_size24": step_size24,
|
|
||||||
"step_size30": step_size30,
|
|
||||||
"error_id": error_id}
|
|
||||||
|
|
||||||
main_pg_query = f"""\
|
|
||||||
SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
users,
|
|
||||||
sessions,
|
|
||||||
last_occurrence,
|
|
||||||
first_occurrence,
|
|
||||||
last_session_id,
|
|
||||||
browsers_partition,
|
|
||||||
os_partition,
|
|
||||||
device_partition,
|
|
||||||
country_partition,
|
|
||||||
chart24,
|
|
||||||
chart30
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
COUNT(DISTINCT user_id) AS users,
|
|
||||||
COUNT(DISTINCT session_id) AS sessions
|
|
||||||
FROM public.errors
|
|
||||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
|
||||||
GROUP BY error_id, name, message) AS details
|
|
||||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
|
||||||
MIN(timestamp) AS first_occurrence
|
|
||||||
FROM events.errors
|
|
||||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT session_id AS last_session_id
|
|
||||||
FROM events.errors
|
|
||||||
WHERE error_id = %(error_id)s
|
|
||||||
ORDER BY errors.timestamp DESC
|
|
||||||
LIMIT 1) AS last_session_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_browser AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_browser
|
|
||||||
ORDER BY count DESC) AS count_per_browser_query
|
|
||||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
|
||||||
FROM (SELECT user_browser_version AS version,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_browser = count_per_browser_query.name
|
|
||||||
GROUP BY user_browser_version
|
|
||||||
ORDER BY count DESC) AS version_details
|
|
||||||
) AS browser_version_details ON (TRUE)) AS browser_details) AS browser_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(os_details) AS os_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_os AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_os
|
|
||||||
ORDER BY count DESC) AS count_per_os_details
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
|
||||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_os = count_per_os_details.name
|
|
||||||
GROUP BY user_os_version
|
|
||||||
ORDER BY count DESC) AS count_per_version_details
|
|
||||||
GROUP BY count_per_os_details.name ) AS os_version_details
|
|
||||||
ON (TRUE)) AS os_details) AS os_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(device_details) AS device_partition
|
|
||||||
FROM (SELECT *
|
|
||||||
FROM (SELECT user_device_type AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_device_type
|
|
||||||
ORDER BY count DESC) AS count_per_device_details
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
|
||||||
FROM (SELECT CASE
|
|
||||||
WHEN user_device = '' OR user_device ISNULL
|
|
||||||
THEN 'unknown'
|
|
||||||
ELSE user_device END AS version,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
AND sessions.user_device_type = count_per_device_details.name
|
|
||||||
GROUP BY user_device
|
|
||||||
ORDER BY count DESC) AS count_per_device_v_details
|
|
||||||
GROUP BY count_per_device_details.name ) AS device_version_details
|
|
||||||
ON (TRUE)) AS device_details) AS device_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(count_per_country_details) AS country_partition
|
|
||||||
FROM (SELECT user_country AS name,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
|
||||||
GROUP BY user_country
|
|
||||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query24)}
|
|
||||||
) AS chart_details ON (TRUE)
|
|
||||||
GROUP BY generated_timestamp
|
|
||||||
ORDER BY generated_timestamp) AS chart_details) AS chart_details24 ON (TRUE)
|
|
||||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart30
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE {" AND ".join(pg_sub_query30)}) AS chart_details
|
|
||||||
ON (TRUE)
|
|
||||||
GROUP BY timestamp
|
|
||||||
ORDER BY timestamp) AS chart_details) AS chart_details30 ON (TRUE);
|
|
||||||
"""
|
|
||||||
|
|
||||||
# print("--------------------")
|
|
||||||
# print(cur.mogrify(main_pg_query, params))
|
|
||||||
# print("--------------------")
|
|
||||||
cur.execute(cur.mogrify(main_pg_query, params))
|
|
||||||
row = cur.fetchone()
|
|
||||||
if row is None:
|
|
||||||
return {"errors": ["error not found"]}
|
|
||||||
row["tags"] = __process_tags(row)
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT error_id, status, session_id, start_ts,
|
|
||||||
parent_error_id,session_id, user_anonymous_id,
|
|
||||||
user_id, user_uuid, user_browser, user_browser_version,
|
|
||||||
user_os, user_os_version, user_device, payload,
|
|
||||||
FALSE AS favorite,
|
|
||||||
True AS viewed
|
|
||||||
FROM public.errors AS pe
|
|
||||||
INNER JOIN events.errors AS ee USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
WHERE pe.project_id = %(project_id)s
|
|
||||||
AND error_id = %(error_id)s
|
|
||||||
ORDER BY start_ts DESC
|
|
||||||
LIMIT 1;""",
|
|
||||||
{"project_id": project_id, "error_id": error_id, "user_id": user_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
status = cur.fetchone()
|
|
||||||
|
|
||||||
if status is not None:
|
|
||||||
row["stack"] = errors_helper.format_first_stack_frame(status).pop("stack")
|
|
||||||
row["status"] = status.pop("status")
|
|
||||||
row["parent_error_id"] = status.pop("parent_error_id")
|
|
||||||
row["favorite"] = status.pop("favorite")
|
|
||||||
row["viewed"] = status.pop("viewed")
|
|
||||||
row["last_hydrated_session"] = status
|
|
||||||
else:
|
|
||||||
row["stack"] = []
|
|
||||||
row["last_hydrated_session"] = None
|
|
||||||
row["status"] = "untracked"
|
|
||||||
row["parent_error_id"] = None
|
|
||||||
row["favorite"] = False
|
|
||||||
row["viewed"] = False
|
|
||||||
return {"data": helper.dict_to_camel_case(row)}
|
|
||||||
|
|
@ -1,294 +0,0 @@
|
||||||
import json
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
from chalicelib.core.sessions import sessions_search
|
|
||||||
from chalicelib.core.sourcemaps import sourcemaps
|
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
|
||||||
from chalicelib.utils.metrics_helper import get_step_size
|
|
||||||
|
|
||||||
|
|
||||||
def get(error_id, family=False) -> dict | List[dict]:
|
|
||||||
if family:
|
|
||||||
return get_batch([error_id])
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""SELECT *
|
|
||||||
FROM public.errors
|
|
||||||
WHERE error_id = %(error_id)s
|
|
||||||
LIMIT 1;""",
|
|
||||||
{"error_id": error_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
result = cur.fetchone()
|
|
||||||
if result is not None:
|
|
||||||
result["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(result["stacktrace_parsed_at"])
|
|
||||||
return helper.dict_to_camel_case(result)
|
|
||||||
|
|
||||||
|
|
||||||
def get_batch(error_ids):
|
|
||||||
if len(error_ids) == 0:
|
|
||||||
return []
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""
|
|
||||||
WITH RECURSIVE error_family AS (
|
|
||||||
SELECT *
|
|
||||||
FROM public.errors
|
|
||||||
WHERE error_id IN %(error_ids)s
|
|
||||||
UNION
|
|
||||||
SELECT child_errors.*
|
|
||||||
FROM public.errors AS child_errors
|
|
||||||
INNER JOIN error_family ON error_family.error_id = child_errors.parent_error_id OR error_family.parent_error_id = child_errors.error_id
|
|
||||||
)
|
|
||||||
SELECT *
|
|
||||||
FROM error_family;""",
|
|
||||||
{"error_ids": tuple(error_ids)})
|
|
||||||
cur.execute(query=query)
|
|
||||||
errors = cur.fetchall()
|
|
||||||
for e in errors:
|
|
||||||
e["stacktrace_parsed_at"] = TimeUTC.datetime_to_timestamp(e["stacktrace_parsed_at"])
|
|
||||||
return helper.list_to_camel_case(errors)
|
|
||||||
|
|
||||||
|
|
||||||
def __get_sort_key(key):
|
|
||||||
return {
|
|
||||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
|
||||||
schemas.ErrorSort.USERS_COUNT: "users",
|
|
||||||
schemas.ErrorSort.SESSIONS_COUNT: "sessions"
|
|
||||||
}.get(key, 'max_datetime')
|
|
||||||
|
|
||||||
|
|
||||||
def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, user_id):
|
|
||||||
empty_response = {
|
|
||||||
'total': 0,
|
|
||||||
'errors': []
|
|
||||||
}
|
|
||||||
|
|
||||||
platform = None
|
|
||||||
for f in data.filters:
|
|
||||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
|
||||||
platform = f.value[0]
|
|
||||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
|
||||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
|
||||||
"pe.project_id=%(project_id)s"]
|
|
||||||
# To ignore Script error
|
|
||||||
pg_sub_query.append("pe.message!='Script error.'")
|
|
||||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
|
||||||
project_key=None)
|
|
||||||
if platform:
|
|
||||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
|
||||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
|
||||||
statuses = []
|
|
||||||
error_ids = None
|
|
||||||
if data.startTimestamp is None:
|
|
||||||
data.startTimestamp = TimeUTC.now(-30)
|
|
||||||
if data.endTimestamp is None:
|
|
||||||
data.endTimestamp = TimeUTC.now(1)
|
|
||||||
if len(data.events) > 0 or len(data.filters) > 0:
|
|
||||||
print("-- searching for sessions before errors")
|
|
||||||
statuses = sessions_search.search_sessions(data=data, project=project, user_id=user_id, errors_only=True,
|
|
||||||
error_status=data.status)
|
|
||||||
if len(statuses) == 0:
|
|
||||||
return empty_response
|
|
||||||
error_ids = [e["errorId"] for e in statuses]
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
step_size = get_step_size(data.startTimestamp, data.endTimestamp, data.density, factor=1)
|
|
||||||
sort = __get_sort_key('datetime')
|
|
||||||
if data.sort is not None:
|
|
||||||
sort = __get_sort_key(data.sort)
|
|
||||||
order = schemas.SortOrderType.DESC
|
|
||||||
if data.order is not None:
|
|
||||||
order = data.order
|
|
||||||
extra_join = ""
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"startDate": data.startTimestamp,
|
|
||||||
"endDate": data.endTimestamp,
|
|
||||||
"project_id": project.project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"step_size": step_size}
|
|
||||||
if data.status != schemas.ErrorStatus.ALL:
|
|
||||||
pg_sub_query.append("status = %(error_status)s")
|
|
||||||
params["error_status"] = data.status
|
|
||||||
if data.limit is not None and data.page is not None:
|
|
||||||
params["errors_offset"] = (data.page - 1) * data.limit
|
|
||||||
params["errors_limit"] = data.limit
|
|
||||||
else:
|
|
||||||
params["errors_offset"] = 0
|
|
||||||
params["errors_limit"] = 200
|
|
||||||
|
|
||||||
if error_ids is not None:
|
|
||||||
params["error_ids"] = tuple(error_ids)
|
|
||||||
pg_sub_query.append("error_id IN %(error_ids)s")
|
|
||||||
# if data.bookmarked:
|
|
||||||
# pg_sub_query.append("ufe.user_id = %(userId)s")
|
|
||||||
# extra_join += " INNER JOIN public.user_favorite_errors AS ufe USING (error_id)"
|
|
||||||
if data.query is not None and len(data.query) > 0:
|
|
||||||
pg_sub_query.append("(pe.name ILIKE %(error_query)s OR pe.message ILIKE %(error_query)s)")
|
|
||||||
params["error_query"] = helper.values_for_operator(value=data.query,
|
|
||||||
op=schemas.SearchEventOperator.CONTAINS)
|
|
||||||
|
|
||||||
main_pg_query = f"""SELECT full_count,
|
|
||||||
error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
users,
|
|
||||||
sessions,
|
|
||||||
last_occurrence,
|
|
||||||
first_occurrence,
|
|
||||||
chart
|
|
||||||
FROM (SELECT COUNT(details) OVER () AS full_count, details.*
|
|
||||||
FROM (SELECT error_id,
|
|
||||||
name,
|
|
||||||
message,
|
|
||||||
COUNT(DISTINCT COALESCE(user_id,user_uuid::text)) AS users,
|
|
||||||
COUNT(DISTINCT session_id) AS sessions,
|
|
||||||
MAX(timestamp) AS max_datetime,
|
|
||||||
MIN(timestamp) AS min_datetime
|
|
||||||
FROM events.errors
|
|
||||||
INNER JOIN public.errors AS pe USING (error_id)
|
|
||||||
INNER JOIN public.sessions USING (session_id)
|
|
||||||
{extra_join}
|
|
||||||
WHERE {" AND ".join(pg_sub_query)}
|
|
||||||
GROUP BY error_id, name, message
|
|
||||||
ORDER BY {sort} {order}) AS details
|
|
||||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s
|
|
||||||
) AS details
|
|
||||||
INNER JOIN LATERAL (SELECT MAX(timestamp) AS last_occurrence,
|
|
||||||
MIN(timestamp) AS first_occurrence
|
|
||||||
FROM events.errors
|
|
||||||
WHERE errors.error_id = details.error_id) AS time_details ON (TRUE)
|
|
||||||
INNER JOIN LATERAL (SELECT jsonb_agg(chart_details) AS chart
|
|
||||||
FROM (SELECT generated_timestamp AS timestamp,
|
|
||||||
COUNT(session_id) AS count
|
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT session_id
|
|
||||||
FROM events.errors
|
|
||||||
{"INNER JOIN public.sessions USING(session_id)" if platform else ""}
|
|
||||||
WHERE {" AND ".join(pg_sub_query_chart)}
|
|
||||||
) AS sessions ON (TRUE)
|
|
||||||
GROUP BY timestamp
|
|
||||||
ORDER BY timestamp) AS chart_details) AS chart_details ON (TRUE);"""
|
|
||||||
|
|
||||||
# print("--------------------")
|
|
||||||
# print(cur.mogrify(main_pg_query, params))
|
|
||||||
# print("--------------------")
|
|
||||||
|
|
||||||
cur.execute(cur.mogrify(main_pg_query, params))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
total = 0 if len(rows) == 0 else rows[0]["full_count"]
|
|
||||||
|
|
||||||
if total == 0:
|
|
||||||
rows = []
|
|
||||||
else:
|
|
||||||
if len(statuses) == 0:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""SELECT error_id
|
|
||||||
FROM public.errors
|
|
||||||
WHERE project_id = %(project_id)s AND error_id IN %(error_ids)s;""",
|
|
||||||
{"project_id": project.project_id, "error_ids": tuple([r["error_id"] for r in rows]),
|
|
||||||
"user_id": user_id})
|
|
||||||
cur.execute(query=query)
|
|
||||||
statuses = helper.list_to_camel_case(cur.fetchall())
|
|
||||||
statuses = {
|
|
||||||
s["errorId"]: s for s in statuses
|
|
||||||
}
|
|
||||||
|
|
||||||
for r in rows:
|
|
||||||
r.pop("full_count")
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'errors': helper.list_to_camel_case(rows)
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def __save_stacktrace(error_id, data):
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
"""UPDATE public.errors
|
|
||||||
SET stacktrace=%(data)s::jsonb, stacktrace_parsed_at=timezone('utc'::text, now())
|
|
||||||
WHERE error_id = %(error_id)s;""",
|
|
||||||
{"error_id": error_id, "data": json.dumps(data)})
|
|
||||||
cur.execute(query=query)
|
|
||||||
|
|
||||||
|
|
||||||
def get_trace(project_id, error_id):
|
|
||||||
error = get(error_id=error_id, family=False)
|
|
||||||
if error is None:
|
|
||||||
return {"errors": ["error not found"]}
|
|
||||||
if error.get("source", "") != "js_exception":
|
|
||||||
return {"errors": ["this source of errors doesn't have a sourcemap"]}
|
|
||||||
if error.get("payload") is None:
|
|
||||||
return {"errors": ["null payload"]}
|
|
||||||
if error.get("stacktrace") is not None:
|
|
||||||
return {"sourcemapUploaded": True,
|
|
||||||
"trace": error.get("stacktrace"),
|
|
||||||
"preparsed": True}
|
|
||||||
trace, all_exists = sourcemaps.get_traces_group(project_id=project_id, payload=error["payload"])
|
|
||||||
if all_exists:
|
|
||||||
__save_stacktrace(error_id=error_id, data=trace)
|
|
||||||
return {"sourcemapUploaded": all_exists,
|
|
||||||
"trace": trace,
|
|
||||||
"preparsed": False}
|
|
||||||
|
|
||||||
|
|
||||||
def get_sessions(start_date, end_date, project_id, user_id, error_id):
|
|
||||||
extra_constraints = ["s.project_id = %(project_id)s",
|
|
||||||
"s.start_ts >= %(startDate)s",
|
|
||||||
"s.start_ts <= %(endDate)s",
|
|
||||||
"e.error_id = %(error_id)s"]
|
|
||||||
if start_date is None:
|
|
||||||
start_date = TimeUTC.now(-7)
|
|
||||||
if end_date is None:
|
|
||||||
end_date = TimeUTC.now()
|
|
||||||
|
|
||||||
params = {
|
|
||||||
"startDate": start_date,
|
|
||||||
"endDate": end_date,
|
|
||||||
"project_id": project_id,
|
|
||||||
"userId": user_id,
|
|
||||||
"error_id": error_id}
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(
|
|
||||||
f"""SELECT s.project_id,
|
|
||||||
s.session_id::text AS session_id,
|
|
||||||
s.user_uuid,
|
|
||||||
s.user_id,
|
|
||||||
s.user_agent,
|
|
||||||
s.user_os,
|
|
||||||
s.user_browser,
|
|
||||||
s.user_device,
|
|
||||||
s.user_country,
|
|
||||||
s.start_ts,
|
|
||||||
s.duration,
|
|
||||||
s.events_count,
|
|
||||||
s.pages_count,
|
|
||||||
s.errors_count,
|
|
||||||
s.issue_types,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_favorite_sessions AS fs
|
|
||||||
WHERE s.session_id = fs.session_id
|
|
||||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS favorite,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_sessions AS fs
|
|
||||||
WHERE s.session_id = fs.session_id
|
|
||||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
|
||||||
FROM public.sessions AS s INNER JOIN events.errors AS e USING (session_id)
|
|
||||||
WHERE {" AND ".join(extra_constraints)}
|
|
||||||
ORDER BY s.start_ts DESC;""",
|
|
||||||
params)
|
|
||||||
cur.execute(query=query)
|
|
||||||
sessions_list = []
|
|
||||||
total = cur.rowcount
|
|
||||||
row = cur.fetchone()
|
|
||||||
while row is not None and len(sessions_list) < 100:
|
|
||||||
sessions_list.append(row)
|
|
||||||
row = cur.fetchone()
|
|
||||||
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'sessions': helper.list_to_camel_case(sessions_list)
|
|
||||||
}
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
from . import helper as errors_helper
|
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
|
||||||
import chalicelib.core.sessions.sessions_ch as sessions
|
|
||||||
else:
|
|
||||||
import chalicelib.core.sessions.sessions_pg as sessions
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core.sourcemaps import sourcemaps
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
|
||||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
|
||||||
chart: bool = False, step_size_name: str = "step_size",
|
|
||||||
project_key: Optional[str] = "project_id"):
|
|
||||||
if project_key is None:
|
|
||||||
ch_sub_query = []
|
|
||||||
else:
|
|
||||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
|
||||||
f"timestamp < %({endTime_arg_name})s"]
|
|
||||||
if chart:
|
|
||||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
|
||||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
|
||||||
if platform == schemas.PlatformType.MOBILE:
|
|
||||||
ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
|
||||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
|
||||||
table_name=None):
|
|
||||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
|
||||||
if table_name is not None:
|
|
||||||
table_name = table_name + "."
|
|
||||||
else:
|
|
||||||
table_name = ""
|
|
||||||
if type_condition:
|
|
||||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
|
||||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
|
||||||
if platform == schemas.PlatformType.MOBILE:
|
|
||||||
ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def format_first_stack_frame(error):
|
|
||||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
|
||||||
for s in error["stack"]:
|
|
||||||
for c in s.get("context", []):
|
|
||||||
for sci, sc in enumerate(c):
|
|
||||||
if isinstance(sc, str) and len(sc) > 1000:
|
|
||||||
c[sci] = sc[:1000]
|
|
||||||
# convert bytes to string:
|
|
||||||
if isinstance(s["filename"], bytes):
|
|
||||||
s["filename"] = s["filename"].decode("utf-8")
|
|
||||||
return error
|
|
||||||
48
api/chalicelib/core/errors_favorite.py
Normal file
48
api/chalicelib/core/errors_favorite.py
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
|
def add_favorite_error(project_id, user_id, error_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify(f"""INSERT INTO public.user_favorite_errors(user_id, error_id)
|
||||||
|
VALUES (%(userId)s,%(error_id)s);""",
|
||||||
|
{"userId": user_id, "error_id": error_id})
|
||||||
|
)
|
||||||
|
return {"errorId": error_id, "favorite": True}
|
||||||
|
|
||||||
|
|
||||||
|
def remove_favorite_error(project_id, user_id, error_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify(f"""DELETE FROM public.user_favorite_errors
|
||||||
|
WHERE
|
||||||
|
user_id = %(userId)s
|
||||||
|
AND error_id = %(error_id)s;""",
|
||||||
|
{"userId": user_id, "error_id": error_id})
|
||||||
|
)
|
||||||
|
return {"errorId": error_id, "favorite": False}
|
||||||
|
|
||||||
|
|
||||||
|
def favorite_error(project_id, user_id, error_id):
|
||||||
|
exists, favorite = error_exists_and_favorite(user_id=user_id, error_id=error_id)
|
||||||
|
if not exists:
|
||||||
|
return {"errors": ["cannot bookmark non-rehydrated errors"]}
|
||||||
|
if favorite:
|
||||||
|
return remove_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||||
|
return add_favorite_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||||
|
|
||||||
|
|
||||||
|
def error_exists_and_favorite(user_id, error_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify(
|
||||||
|
"""SELECT errors.error_id AS exists, ufe.error_id AS favorite
|
||||||
|
FROM public.errors
|
||||||
|
LEFT JOIN (SELECT error_id FROM public.user_favorite_errors WHERE user_id = %(userId)s) AS ufe USING (error_id)
|
||||||
|
WHERE error_id = %(error_id)s;""",
|
||||||
|
{"userId": user_id, "error_id": error_id})
|
||||||
|
)
|
||||||
|
r = cur.fetchone()
|
||||||
|
if r is None:
|
||||||
|
return False, False
|
||||||
|
return True, r.get("favorite") is not None
|
||||||
37
api/chalicelib/core/errors_viewed.py
Normal file
37
api/chalicelib/core/errors_viewed.py
Normal file
|
|
@ -0,0 +1,37 @@
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
|
def add_viewed_error(project_id, user_id, error_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
cur.execute(
|
||||||
|
cur.mogrify("""INSERT INTO public.user_viewed_errors(user_id, error_id)
|
||||||
|
VALUES (%(userId)s,%(error_id)s);""",
|
||||||
|
{"userId": user_id, "error_id": error_id})
|
||||||
|
)
|
||||||
|
|
||||||
|
|
||||||
|
def viewed_error_exists(user_id, error_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(
|
||||||
|
"""SELECT
|
||||||
|
errors.error_id AS hydrated,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_errors AS ve
|
||||||
|
WHERE ve.error_id = %(error_id)s
|
||||||
|
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||||
|
FROM public.errors
|
||||||
|
WHERE error_id = %(error_id)s""",
|
||||||
|
{"userId": user_id, "error_id": error_id})
|
||||||
|
cur.execute(
|
||||||
|
query=query
|
||||||
|
)
|
||||||
|
r = cur.fetchone()
|
||||||
|
if r:
|
||||||
|
return r.get("viewed")
|
||||||
|
return True
|
||||||
|
|
||||||
|
|
||||||
|
def viewed_error(project_id, user_id, error_id):
|
||||||
|
if viewed_error_exists(user_id=user_id, error_id=error_id):
|
||||||
|
return None
|
||||||
|
return add_viewed_error(project_id=project_id, user_id=user_id, error_id=error_id)
|
||||||
|
|
@ -1,10 +1,9 @@
|
||||||
from functools import cache
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
|
from chalicelib.core import autocomplete
|
||||||
from chalicelib.core import issues
|
from chalicelib.core import issues
|
||||||
from chalicelib.core.autocomplete import autocomplete
|
from chalicelib.core import sessions_metas
|
||||||
from chalicelib.core.sessions import sessions_metas
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
from chalicelib.utils.event_filter_definition import SupportedFilter, Event
|
||||||
|
|
@ -138,57 +137,52 @@ class EventType:
|
||||||
column=None) # column=None because errors are searched by name or message
|
column=None) # column=None because errors are searched by name or message
|
||||||
|
|
||||||
|
|
||||||
@cache
|
SUPPORTED_TYPES = {
|
||||||
def supported_types():
|
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||||
return {
|
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
query=autocomplete.__generic_query(
|
||||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
typename=EventType.LOCATION.ui_type)),
|
||||||
|
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||||
|
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||||
|
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.REQUEST.ui_type)),
|
||||||
|
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.GRAPHQL.ui_type)),
|
||||||
|
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.STATEACTION.ui_type)),
|
||||||
|
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||||
|
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||||
|
query=None),
|
||||||
|
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||||
|
query=None),
|
||||||
|
# MOBILE
|
||||||
|
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.LOCATION.ui_type)),
|
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.CUSTOM.ui_type)),
|
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||||
|
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.REQUEST.ui_type)),
|
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.GRAPHQL.ui_type)),
|
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.STATEACTION.ui_type)),
|
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
|
||||||
query=None),
|
|
||||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
|
||||||
query=None),
|
query=None),
|
||||||
# MOBILE
|
}
|
||||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
|
||||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
|
||||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
|
||||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
|
||||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
|
||||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
|
||||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
|
||||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
|
||||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
|
||||||
query=None),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_errors_by_session_id(session_id, project_id):
|
def get_errors_by_session_id(session_id, project_id):
|
||||||
|
|
@ -208,17 +202,20 @@ def search(text, event_type, project_id, source, key):
|
||||||
if not event_type:
|
if not event_type:
|
||||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||||
|
|
||||||
if event_type in supported_types().keys():
|
if event_type in SUPPORTED_TYPES.keys():
|
||||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
elif event_type + "_MOBILE" in supported_types().keys():
|
# for IOS events autocomplete
|
||||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
# if event_type + "_IOS" in SUPPORTED_TYPES.keys():
|
||||||
elif event_type in sessions_metas.supported_types().keys():
|
# rows += SUPPORTED_TYPES[event_type + "_IOS"].get(project_id=project_id, value=text, key=key,source=source)
|
||||||
|
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
||||||
|
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
|
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_IOS") \
|
elif event_type.endswith("_IOS") \
|
||||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_MOBILE") \
|
elif event_type.endswith("_MOBILE") \
|
||||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
else:
|
else:
|
||||||
return {"errors": ["unsupported event"]}
|
return {"errors": ["unsupported event"]}
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
from typing import List
|
from typing import List
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.metrics.modules import significance
|
from chalicelib.core import significance
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
|
@ -27,6 +27,7 @@ HEALTH_ENDPOINTS = {
|
||||||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||||
|
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||||
"sourcemapreader": app_connection_string(
|
"sourcemapreader": app_connection_string(
|
||||||
"sourcemapreader-openreplay", 8888, "health"
|
"sourcemapreader-openreplay", 8888, "health"
|
||||||
|
|
@ -38,7 +39,9 @@ HEALTH_ENDPOINTS = {
|
||||||
def __check_database_pg(*_):
|
def __check_database_pg(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["Postgres health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["Postgres health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
try:
|
try:
|
||||||
|
|
@ -60,26 +63,29 @@ def __check_database_pg(*_):
|
||||||
"details": {
|
"details": {
|
||||||
# "version": server_version["server_version"],
|
# "version": server_version["server_version"],
|
||||||
# "schema": schema_version["version"]
|
# "schema": schema_version["version"]
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __always_healthy(*_):
|
def __always_healthy(*_):
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_be_service(service_name):
|
def __check_be_service(service_name):
|
||||||
def fn(*_):
|
def fn(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["server health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(
|
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
|
||||||
)
|
|
||||||
logger.error(results.text)
|
logger.error(results.text)
|
||||||
# fail_response["details"]["errors"].append(results.text)
|
# fail_response["details"]["errors"].append(results.text)
|
||||||
return fail_response
|
return fail_response
|
||||||
|
|
@ -97,7 +103,10 @@ def __check_be_service(service_name):
|
||||||
logger.error("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
# fail_response["details"]["errors"].append(str(e))
|
# fail_response["details"]["errors"].append(str(e))
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
|
|
@ -105,7 +114,7 @@ def __check_be_service(service_name):
|
||||||
def __check_redis(*_):
|
def __check_redis(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {"errors": ["server health-check failed"]}
|
||||||
}
|
}
|
||||||
if config("REDIS_STRING", default=None) is None:
|
if config("REDIS_STRING", default=None) is None:
|
||||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||||
|
|
@ -124,14 +133,16 @@ def __check_redis(*_):
|
||||||
"health": True,
|
"health": True,
|
||||||
"details": {
|
"details": {
|
||||||
# "version": r.execute_command('INFO')['redis_version']
|
# "version": r.execute_command('INFO')['redis_version']
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_SSL(*_):
|
def __check_SSL(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["SSL Certificate health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||||
|
|
@ -139,28 +150,36 @@ def __check_SSL(*_):
|
||||||
logger.error("!! health failed: SSL Certificate")
|
logger.error("!! health failed: SSL Certificate")
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_stats(*_):
|
def __get_sessions_stats(*_):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
constraints = ["projects.deleted_at IS NULL"]
|
constraints = ["projects.deleted_at IS NULL"]
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
|
||||||
COALESCE(SUM(events_count),0) AS e_c
|
COALESCE(SUM(events_count),0) AS e_c
|
||||||
FROM public.projects_stats
|
FROM public.projects_stats
|
||||||
INNER JOIN public.projects USING(project_id)
|
INNER JOIN public.projects USING(project_id)
|
||||||
WHERE {" AND ".join(constraints)};"""
|
WHERE {" AND ".join(constraints)};""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
return {
|
||||||
|
"numberOfSessionsCaptured": row["s_c"],
|
||||||
|
"numberOfEventCaptured": row["e_c"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_health(tenant_id=None):
|
def get_health(tenant_id=None):
|
||||||
health_map = {
|
health_map = {
|
||||||
"databases": {"postgres": __check_database_pg},
|
"databases": {
|
||||||
"ingestionPipeline": {"redis": __check_redis},
|
"postgres": __check_database_pg
|
||||||
|
},
|
||||||
|
"ingestionPipeline": {
|
||||||
|
"redis": __check_redis
|
||||||
|
},
|
||||||
"backendServices": {
|
"backendServices": {
|
||||||
"alerts": __check_be_service("alerts"),
|
"alerts": __check_be_service("alerts"),
|
||||||
"assets": __check_be_service("assets"),
|
"assets": __check_be_service("assets"),
|
||||||
|
|
@ -173,12 +192,13 @@ def get_health(tenant_id=None):
|
||||||
"http": __check_be_service("http"),
|
"http": __check_be_service("http"),
|
||||||
"ingress-nginx": __always_healthy,
|
"ingress-nginx": __always_healthy,
|
||||||
"integrations": __check_be_service("integrations"),
|
"integrations": __check_be_service("integrations"),
|
||||||
|
"peers": __check_be_service("peers"),
|
||||||
"sink": __check_be_service("sink"),
|
"sink": __check_be_service("sink"),
|
||||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||||
"storage": __check_be_service("storage"),
|
"storage": __check_be_service("storage")
|
||||||
},
|
},
|
||||||
"details": __get_sessions_stats,
|
"details": __get_sessions_stats,
|
||||||
"ssl": __check_SSL,
|
"ssl": __check_SSL
|
||||||
}
|
}
|
||||||
return __process_health(health_map=health_map)
|
return __process_health(health_map=health_map)
|
||||||
|
|
||||||
|
|
@ -190,16 +210,10 @@ def __process_health(health_map):
|
||||||
response.pop(parent_key)
|
response.pop(parent_key)
|
||||||
elif isinstance(health_map[parent_key], dict):
|
elif isinstance(health_map[parent_key], dict):
|
||||||
for element_key in health_map[parent_key]:
|
for element_key in health_map[parent_key]:
|
||||||
if config(
|
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
|
||||||
cast=bool,
|
|
||||||
default=False,
|
|
||||||
):
|
|
||||||
response[parent_key].pop(element_key)
|
response[parent_key].pop(element_key)
|
||||||
else:
|
else:
|
||||||
response[parent_key][element_key] = health_map[parent_key][
|
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||||
element_key
|
|
||||||
]()
|
|
||||||
else:
|
else:
|
||||||
response[parent_key] = health_map[parent_key]()
|
response[parent_key] = health_map[parent_key]()
|
||||||
return response
|
return response
|
||||||
|
|
@ -207,8 +221,7 @@ def __process_health(health_map):
|
||||||
|
|
||||||
def cron():
|
def cron():
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT projects.project_id,
|
||||||
"""SELECT projects.project_id,
|
|
||||||
projects.created_at,
|
projects.created_at,
|
||||||
projects.sessions_last_check_at,
|
projects.sessions_last_check_at,
|
||||||
projects.first_recorded_session_at,
|
projects.first_recorded_session_at,
|
||||||
|
|
@ -216,8 +229,7 @@ def cron():
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
|
|
@ -238,24 +250,20 @@ def cron():
|
||||||
count_start_from = r["last_update_at"]
|
count_start_from = r["last_update_at"]
|
||||||
|
|
||||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"start_ts": count_start_from,
|
||||||
"start_ts": count_start_from,
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts>=%(start_ts)s
|
AND start_ts>=%(start_ts)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
|
|
@ -263,68 +271,56 @@ def cron():
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
if insert:
|
if insert:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
|
||||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||||
events_count=events_count+%(events_count)s,
|
events_count=events_count+%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
# this cron is used to correct the sessions&events count every week
|
# this cron is used to correct the sessions&events count every week
|
||||||
def weekly_cron():
|
def weekly_cron():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT project_id,
|
||||||
"""SELECT project_id,
|
|
||||||
projects_stats.last_update_at
|
projects_stats.last_update_at
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["last_update_at"] is None:
|
if r["last_update_at"] is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
params["sessions_count"] = row["sessions_count"]
|
params["sessions_count"] = row["sessions_count"]
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=%(sessions_count)s,
|
SET sessions_count=%(sessions_count)s,
|
||||||
events_count=%(events_count)s,
|
events_count=%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import sessions
|
from chalicelib.core import sessions_mobs, sessions
|
||||||
from chalicelib.core.sessions import sessions_mobs
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.issue_tracking import base
|
from chalicelib.core import integration_base
|
||||||
from chalicelib.core.issue_tracking.github_issue import GithubIntegrationIssue
|
from chalicelib.core.integration_github_issue import GithubIntegrationIssue
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
PROVIDER = schemas.IntegrationType.GITHUB
|
PROVIDER = schemas.IntegrationType.GITHUB
|
||||||
|
|
||||||
|
|
||||||
class GitHubIntegration(base.BaseIntegration):
|
class GitHubIntegration(integration_base.BaseIntegration):
|
||||||
|
|
||||||
def __init__(self, tenant_id, user_id):
|
def __init__(self, tenant_id, user_id):
|
||||||
self.__tenant_id = tenant_id
|
self.__tenant_id = tenant_id
|
||||||
|
|
@ -1,12 +1,12 @@
|
||||||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||||
from chalicelib.utils import github_client_v3
|
from chalicelib.utils import github_client_v3
|
||||||
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
from chalicelib.utils.github_client_v3 import github_formatters as formatter
|
||||||
|
|
||||||
|
|
||||||
class GithubIntegrationIssue(BaseIntegrationIssue):
|
class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
def __init__(self, token):
|
def __init__(self, integration_token):
|
||||||
self.__client = github_client_v3.githubV3Request(token)
|
self.__client = github_client_v3.githubV3Request(integration_token)
|
||||||
super(GithubIntegrationIssue, self).__init__("GITHUB", token)
|
super(GithubIntegrationIssue, self).__init__("GITHUB", integration_token)
|
||||||
|
|
||||||
def get_current_user(self):
|
def get_current_user(self):
|
||||||
return formatter.user(self.__client.get("/user"))
|
return formatter.user(self.__client.get("/user"))
|
||||||
|
|
@ -28,9 +28,9 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
|
|
||||||
return meta
|
return meta
|
||||||
|
|
||||||
def create_new_assignment(self, project_id, title, description, assignee,
|
def create_new_assignment(self, integration_project_id, title, description, assignee,
|
||||||
issue_type):
|
issue_type):
|
||||||
repoId = project_id
|
repoId = integration_project_id
|
||||||
assignees = [assignee]
|
assignees = [assignee]
|
||||||
labels = [str(issue_type)]
|
labels = [str(issue_type)]
|
||||||
|
|
||||||
|
|
@ -59,11 +59,11 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
def get_by_ids(self, saved_issues):
|
def get_by_ids(self, saved_issues):
|
||||||
results = []
|
results = []
|
||||||
for i in saved_issues:
|
for i in saved_issues:
|
||||||
results.append(self.get(project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
results.append(self.get(integration_project_id=i["integrationProjectId"], assignment_id=i["id"]))
|
||||||
return {"issues": results}
|
return {"issues": results}
|
||||||
|
|
||||||
def get(self, project_id, assignment_id):
|
def get(self, integration_project_id, assignment_id):
|
||||||
repoId = project_id
|
repoId = integration_project_id
|
||||||
issueNumber = assignment_id
|
issueNumber = assignment_id
|
||||||
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
issue = self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}")
|
||||||
issue = formatter.issue(issue)
|
issue = formatter.issue(issue)
|
||||||
|
|
@ -72,17 +72,17 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
self.__client.get(f"/repositories/{repoId}/issues/{issueNumber}/comments")]
|
||||||
return issue
|
return issue
|
||||||
|
|
||||||
def comment(self, project_id, assignment_id, comment):
|
def comment(self, integration_project_id, assignment_id, comment):
|
||||||
repoId = project_id
|
repoId = integration_project_id
|
||||||
issueNumber = assignment_id
|
issueNumber = assignment_id
|
||||||
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
commentCreated = self.__client.post(f"/repositories/{repoId}/issues/{issueNumber}/comments",
|
||||||
body={"body": comment})
|
body={"body": comment})
|
||||||
return formatter.comment(commentCreated)
|
return formatter.comment(commentCreated)
|
||||||
|
|
||||||
def get_metas(self, project_id):
|
def get_metas(self, integration_project_id):
|
||||||
current_user = self.get_current_user()
|
current_user = self.get_current_user()
|
||||||
try:
|
try:
|
||||||
users = self.__client.get(f"/repositories/{project_id}/collaborators")
|
users = self.__client.get(f"/repositories/{integration_project_id}/collaborators")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
users = []
|
users = []
|
||||||
users = [formatter.user(u) for u in users]
|
users = [formatter.user(u) for u in users]
|
||||||
|
|
@ -92,7 +92,7 @@ class GithubIntegrationIssue(BaseIntegrationIssue):
|
||||||
return {"provider": self.provider.lower(),
|
return {"provider": self.provider.lower(),
|
||||||
'users': users,
|
'users': users,
|
||||||
'issueTypes': [formatter.label(l) for l in
|
'issueTypes': [formatter.label(l) for l in
|
||||||
self.__client.get(f"/repositories/{project_id}/labels")]
|
self.__client.get(f"/repositories/{integration_project_id}/labels")]
|
||||||
}
|
}
|
||||||
|
|
||||||
def get_projects(self):
|
def get_projects(self):
|
||||||
|
|
@ -1,6 +1,6 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.issue_tracking import base
|
from chalicelib.core import integration_base
|
||||||
from chalicelib.core.issue_tracking.jira_cloud_issue import JIRACloudIntegrationIssue
|
from chalicelib.core.integration_jira_cloud_issue import JIRACloudIntegrationIssue
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
PROVIDER = schemas.IntegrationType.JIRA
|
PROVIDER = schemas.IntegrationType.JIRA
|
||||||
|
|
@ -10,7 +10,7 @@ def obfuscate_string(string):
|
||||||
return "*" * (len(string) - 4) + string[-4:]
|
return "*" * (len(string) - 4) + string[-4:]
|
||||||
|
|
||||||
|
|
||||||
class JIRAIntegration(base.BaseIntegration):
|
class JIRAIntegration(integration_base.BaseIntegration):
|
||||||
def __init__(self, tenant_id, user_id):
|
def __init__(self, tenant_id, user_id):
|
||||||
self.__tenant_id = tenant_id
|
self.__tenant_id = tenant_id
|
||||||
# TODO: enable super-constructor when OAuth is done
|
# TODO: enable super-constructor when OAuth is done
|
||||||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
"""SELECT username, token, url
|
"""SELECT username, token, url
|
||||||
FROM public.jira_cloud
|
FROM public.jira_cloud
|
||||||
WHERE user_id = %(user_id)s;""",
|
WHERE user_id=%(user_id)s;""",
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
data = helper.dict_to_camel_case(cur.fetchone())
|
data = helper.dict_to_camel_case(cur.fetchone())
|
||||||
|
|
@ -95,9 +95,10 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def add(self, username, token, url, obfuscate=False):
|
def add(self, username, token, url, obfuscate=False):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||||
|
RETURNING username, token, url;""",
|
||||||
{"user_id": self._user_id, "username": username,
|
{"user_id": self._user_id, "username": username,
|
||||||
"token": token, "url": url})
|
"token": token, "url": url})
|
||||||
)
|
)
|
||||||
|
|
@ -111,10 +112,9 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def delete(self):
|
def delete(self):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
DELETE
|
DELETE FROM public.jira_cloud
|
||||||
FROM public.jira_cloud
|
WHERE user_id=%(user_id)s;""",
|
||||||
WHERE user_id = %(user_id)s;""",
|
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
return {"state": "success"}
|
return {"state": "success"}
|
||||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
changes={
|
changes={
|
||||||
"username": data.username,
|
"username": data.username,
|
||||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||||
else self.integration["token"],
|
else self.integration.token,
|
||||||
"url": str(data.url)
|
"url": str(data.url)
|
||||||
},
|
},
|
||||||
obfuscate=True
|
obfuscate=True
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from chalicelib.utils import jira_client
|
from chalicelib.utils import jira_client
|
||||||
from chalicelib.core.issue_tracking.base_issue import BaseIntegrationIssue
|
from chalicelib.core.integration_base_issue import BaseIntegrationIssue
|
||||||
|
|
||||||
|
|
||||||
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
|
|
@ -9,8 +9,8 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
self._client = jira_client.JiraManager(self.url, self.username, token, None)
|
||||||
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
super(JIRACloudIntegrationIssue, self).__init__("JIRA", token)
|
||||||
|
|
||||||
def create_new_assignment(self, project_id, title, description, assignee, issue_type):
|
def create_new_assignment(self, integration_project_id, title, description, assignee, issue_type):
|
||||||
self._client.set_jira_project_id(project_id)
|
self._client.set_jira_project_id(integration_project_id)
|
||||||
data = {
|
data = {
|
||||||
'summary': title,
|
'summary': title,
|
||||||
'description': description,
|
'description': description,
|
||||||
|
|
@ -28,26 +28,26 @@ class JIRACloudIntegrationIssue(BaseIntegrationIssue):
|
||||||
projects_map[i["integrationProjectId"]].append(i["id"])
|
projects_map[i["integrationProjectId"]].append(i["id"])
|
||||||
|
|
||||||
results = []
|
results = []
|
||||||
for project_id in projects_map:
|
for integration_project_id in projects_map:
|
||||||
self._client.set_jira_project_id(project_id)
|
self._client.set_jira_project_id(integration_project_id)
|
||||||
jql = 'labels = OpenReplay'
|
jql = 'labels = OpenReplay'
|
||||||
if len(projects_map[project_id]) > 0:
|
if len(projects_map[integration_project_id]) > 0:
|
||||||
jql += f" AND ID IN ({','.join(projects_map[project_id])})"
|
jql += f" AND ID IN ({','.join(projects_map[integration_project_id])})"
|
||||||
issues = self._client.get_issues(jql, offset=0)
|
issues = self._client.get_issues(jql, offset=0)
|
||||||
results += issues
|
results += issues
|
||||||
return {"issues": results}
|
return {"issues": results}
|
||||||
|
|
||||||
def get(self, project_id, assignment_id):
|
def get(self, integration_project_id, assignment_id):
|
||||||
self._client.set_jira_project_id(project_id)
|
self._client.set_jira_project_id(integration_project_id)
|
||||||
return self._client.get_issue_v3(assignment_id)
|
return self._client.get_issue_v3(assignment_id)
|
||||||
|
|
||||||
def comment(self, project_id, assignment_id, comment):
|
def comment(self, integration_project_id, assignment_id, comment):
|
||||||
self._client.set_jira_project_id(project_id)
|
self._client.set_jira_project_id(integration_project_id)
|
||||||
return self._client.add_comment_v3(assignment_id, comment)
|
return self._client.add_comment_v3(assignment_id, comment)
|
||||||
|
|
||||||
def get_metas(self, project_id):
|
def get_metas(self, integration_project_id):
|
||||||
meta = {}
|
meta = {}
|
||||||
self._client.set_jira_project_id(project_id)
|
self._client.set_jira_project_id(integration_project_id)
|
||||||
meta['issueTypes'] = self._client.get_issue_types()
|
meta['issueTypes'] = self._client.get_issue_types()
|
||||||
meta['users'] = self._client.get_assignable_users()
|
meta['users'] = self._client.get_assignable_users()
|
||||||
return {"provider": self.provider.lower(), **meta}
|
return {"provider": self.provider.lower(), **meta}
|
||||||
|
|
@ -1,5 +1,4 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.modules import TENANT_CONDITION
|
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -52,10 +51,10 @@ def get_global_integrations_status(tenant_id, user_id, project_id):
|
||||||
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
AND provider='elasticsearch')) AS {schemas.IntegrationType.ELASTICSEARCH.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.webhooks
|
FROM public.webhooks
|
||||||
WHERE type='slack' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.SLACK.value},
|
WHERE type='slack' AND deleted_at ISNULL)) AS {schemas.IntegrationType.SLACK.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.webhooks
|
FROM public.webhooks
|
||||||
WHERE type='msteams' AND deleted_at ISNULL AND {TENANT_CONDITION})) AS {schemas.IntegrationType.MS_TEAMS.value},
|
WHERE type='msteams' AND deleted_at ISNULL)) AS {schemas.IntegrationType.MS_TEAMS.value},
|
||||||
EXISTS((SELECT 1
|
EXISTS((SELECT 1
|
||||||
FROM public.integrations
|
FROM public.integrations
|
||||||
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
WHERE project_id=%(project_id)s AND provider='dynatrace')) AS {schemas.IntegrationType.DYNATRACE.value};""",
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
from chalicelib.core.issue_tracking import github, jira_cloud
|
from chalicelib.core import integration_github, integration_jira_cloud
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
|
|
||||||
SUPPORTED_TOOLS = [github.PROVIDER, jira_cloud.PROVIDER]
|
SUPPORTED_TOOLS = [integration_github.PROVIDER, integration_jira_cloud.PROVIDER]
|
||||||
|
|
||||||
|
|
||||||
def get_available_integrations(user_id):
|
def get_available_integrations(user_id):
|
||||||
|
|
@ -23,7 +23,7 @@ def get_available_integrations(user_id):
|
||||||
|
|
||||||
def __get_default_integration(user_id):
|
def __get_default_integration(user_id):
|
||||||
current_integrations = get_available_integrations(user_id)
|
current_integrations = get_available_integrations(user_id)
|
||||||
return github.PROVIDER if current_integrations["github"] else jira_cloud.PROVIDER if \
|
return integration_github.PROVIDER if current_integrations["github"] else integration_jira_cloud.PROVIDER if \
|
||||||
current_integrations["jira"] else None
|
current_integrations["jira"] else None
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -35,11 +35,11 @@ def get_integration(tenant_id, user_id, tool=None, for_delete=False):
|
||||||
tool = tool.upper()
|
tool = tool.upper()
|
||||||
if tool not in SUPPORTED_TOOLS:
|
if tool not in SUPPORTED_TOOLS:
|
||||||
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
return {"errors": [f"issue tracking tool not supported yet, available: {SUPPORTED_TOOLS}"]}, None
|
||||||
if tool == jira_cloud.PROVIDER:
|
if tool == integration_jira_cloud.PROVIDER:
|
||||||
integration = jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
integration = integration_jira_cloud.JIRAIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||||
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
if not for_delete and integration.integration is not None and not integration.integration.get("valid", True):
|
||||||
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
return {"errors": ["JIRA: connexion issue/unauthorized"]}, integration
|
||||||
return None, integration
|
return None, integration
|
||||||
elif tool == github.PROVIDER:
|
elif tool == integration_github.PROVIDER:
|
||||||
return None, github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
return None, integration_github.GitHubIntegration(tenant_id=tenant_id, user_id=user_id)
|
||||||
return {"errors": ["lost integration"]}, None
|
return {"errors": ["lost integration"]}, None
|
||||||
|
|
@ -1,10 +1,6 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.core import sessions_mobs, sessions_devtool
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
class Actions:
|
class Actions:
|
||||||
|
|
@ -154,23 +150,23 @@ def get_scheduled_jobs():
|
||||||
def execute_jobs():
|
def execute_jobs():
|
||||||
jobs = get_scheduled_jobs()
|
jobs = get_scheduled_jobs()
|
||||||
for job in jobs:
|
for job in jobs:
|
||||||
logger.info(f"Executing jobId:{job['jobId']}")
|
print(f"Executing jobId:{job['jobId']}")
|
||||||
try:
|
try:
|
||||||
if job["action"] == Actions.DELETE_USER_DATA:
|
if job["action"] == Actions.DELETE_USER_DATA:
|
||||||
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
session_ids = __get_session_ids_by_user_ids(project_id=job["projectId"],
|
||||||
user_ids=[job["referenceId"]])
|
user_ids=[job["referenceId"]])
|
||||||
if len(session_ids) > 0:
|
if len(session_ids) > 0:
|
||||||
logger.info(f"Deleting {len(session_ids)} sessions")
|
print(f"Deleting {len(session_ids)} sessions")
|
||||||
__delete_sessions_by_session_ids(session_ids=session_ids)
|
__delete_sessions_by_session_ids(session_ids=session_ids)
|
||||||
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
__delete_session_mobs_by_session_ids(session_ids=session_ids, project_id=job["projectId"])
|
||||||
else:
|
else:
|
||||||
raise Exception(f"The action '{job['action']}' not supported.")
|
raise Exception(f"The action '{job['action']}' not supported.")
|
||||||
|
|
||||||
job["status"] = JobStatus.COMPLETED
|
job["status"] = JobStatus.COMPLETED
|
||||||
logger.info(f"Job completed {job['jobId']}")
|
print(f"Job completed {job['jobId']}")
|
||||||
except Exception as e:
|
except Exception as e:
|
||||||
job["status"] = JobStatus.FAILED
|
job["status"] = JobStatus.FAILED
|
||||||
job["errors"] = str(e)
|
job["errors"] = str(e)
|
||||||
logger.error(f"Job failed {job['jobId']}")
|
print(f"Job failed {job['jobId']}")
|
||||||
|
|
||||||
update(job["jobId"], job)
|
update(job["jobId"], job)
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,6 @@
|
||||||
|
from chalicelib.core import log_tools
|
||||||
import requests
|
import requests
|
||||||
from chalicelib.core.log_tools import log_tools
|
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "bugsnag"
|
IN_TY = "bugsnag"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import boto3
|
import boto3
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "cloudwatch"
|
IN_TY = "cloudwatch"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "datadog"
|
IN_TY = "datadog"
|
||||||
|
|
@ -1,7 +1,8 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from chalicelib.core.log_tools import log_tools
|
|
||||||
from elasticsearch import Elasticsearch
|
from elasticsearch import Elasticsearch
|
||||||
|
|
||||||
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "newrelic"
|
IN_TY = "newrelic"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "rollbar"
|
IN_TY = "rollbar"
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
import requests
|
import requests
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "sentry"
|
IN_TY = "sentry"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "stackdriver"
|
IN_TY = "stackdriver"
|
||||||
|
|
@ -1,4 +1,4 @@
|
||||||
from chalicelib.core.log_tools import log_tools
|
from chalicelib.core import log_tools
|
||||||
from schemas import schemas
|
from schemas import schemas
|
||||||
|
|
||||||
IN_TY = "sumologic"
|
IN_TY = "sumologic"
|
||||||
|
|
@ -1,7 +1,5 @@
|
||||||
import json
|
|
||||||
|
|
||||||
from chalicelib.core.modules import TENANT_CONDITION
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
import json
|
||||||
|
|
||||||
EXCEPT = ["jira_server", "jira_cloud"]
|
EXCEPT = ["jira_server", "jira_cloud"]
|
||||||
|
|
||||||
|
|
@ -96,11 +94,11 @@ def get_all_by_tenant(tenant_id, integration):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
f"""SELECT integrations.*
|
"""SELECT integrations.*
|
||||||
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
FROM public.integrations INNER JOIN public.projects USING(project_id)
|
||||||
WHERE provider = %(provider)s AND {TENANT_CONDITION}
|
WHERE provider = %(provider)s
|
||||||
AND projects.deleted_at ISNULL;""",
|
AND projects.deleted_at ISNULL;""",
|
||||||
{"tenant_id": tenant_id, "provider": integration})
|
{"provider": integration})
|
||||||
)
|
)
|
||||||
r = cur.fetchall()
|
r = cur.fetchall()
|
||||||
return helper.list_to_camel_case(r, flatten=True)
|
return helper.list_to_camel_case(r, flatten=True)
|
||||||
|
|
@ -98,23 +98,17 @@ def __edit(project_id, col_index, colname, new_name):
|
||||||
if col_index not in list(old_metas.keys()):
|
if col_index not in list(old_metas.keys()):
|
||||||
return {"errors": ["custom field not found"]}
|
return {"errors": ["custom field not found"]}
|
||||||
|
|
||||||
if old_metas[col_index]["key"] != new_name:
|
with pg_client.PostgresClient() as cur:
|
||||||
with pg_client.PostgresClient() as cur:
|
if old_metas[col_index]["key"] != new_name:
|
||||||
query = cur.mogrify(f"""UPDATE public.projects
|
query = cur.mogrify(f"""UPDATE public.projects
|
||||||
SET {colname} = %(value)s
|
SET {colname} = %(value)s
|
||||||
WHERE project_id = %(project_id)s
|
WHERE project_id = %(project_id)s
|
||||||
AND deleted_at ISNULL
|
AND deleted_at ISNULL
|
||||||
RETURNING {colname},
|
RETURNING {colname};""",
|
||||||
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
|
|
||||||
{"project_id": project_id, "value": new_name})
|
{"project_id": project_id, "value": new_name})
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
row = cur.fetchone()
|
new_name = cur.fetchone()[colname]
|
||||||
new_name = row[colname]
|
|
||||||
old_name = row['old_' + colname]
|
|
||||||
old_metas[col_index]["key"] = new_name
|
old_metas[col_index]["key"] = new_name
|
||||||
projects.rename_metadata_condition(project_id=project_id,
|
|
||||||
old_metadata_key=old_name,
|
|
||||||
new_metadata_key=new_name)
|
|
||||||
return {"data": old_metas[col_index]}
|
return {"data": old_metas[col_index]}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -127,8 +121,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
|
||||||
def delete(tenant_id, project_id, index: int):
|
def delete(tenant_id, project_id, index: int):
|
||||||
index = int(index)
|
index = int(index)
|
||||||
old_segments = get(project_id)
|
old_segments = get(project_id)
|
||||||
old_indexes = [k["index"] for k in old_segments]
|
old_segments = [k["index"] for k in old_segments]
|
||||||
if index not in old_indexes:
|
if index not in old_segments:
|
||||||
return {"errors": ["custom field not found"]}
|
return {"errors": ["custom field not found"]}
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -138,8 +132,7 @@ def delete(tenant_id, project_id, index: int):
|
||||||
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
||||||
{"project_id": project_id})
|
{"project_id": project_id})
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
projects.delete_metadata_condition(project_id=project_id,
|
|
||||||
metadata_key=old_segments[old_indexes.index(index)]["key"])
|
|
||||||
return {"data": get(project_id)}
|
return {"data": get(project_id)}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
624
api/chalicelib/core/metrics.py
Normal file
624
api/chalicelib/core/metrics.py
Normal file
|
|
@ -0,0 +1,624 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import metadata
|
||||||
|
from chalicelib.utils import helper
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils.metrics_helper import __get_step_size
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_constraints(project_id, time_constraint=True, chart=False, duration=True, project=True,
|
||||||
|
project_identifier="project_id",
|
||||||
|
main_table="sessions", time_column="start_ts", data={}):
|
||||||
|
pg_sub_query = []
|
||||||
|
main_table = main_table + "." if main_table is not None and len(main_table) > 0 else ""
|
||||||
|
if project:
|
||||||
|
pg_sub_query.append(f"{main_table}{project_identifier} =%({project_identifier})s")
|
||||||
|
if duration:
|
||||||
|
pg_sub_query.append(f"{main_table}duration>0")
|
||||||
|
if time_constraint:
|
||||||
|
pg_sub_query.append(f"{main_table}{time_column} >= %(startTimestamp)s")
|
||||||
|
pg_sub_query.append(f"{main_table}{time_column} < %(endTimestamp)s")
|
||||||
|
if chart:
|
||||||
|
pg_sub_query.append(f"{main_table}{time_column} >= generated_timestamp")
|
||||||
|
pg_sub_query.append(f"{main_table}{time_column} < generated_timestamp + %(step_size)s")
|
||||||
|
return pg_sub_query + __get_meta_constraint(project_id=project_id, data=data)
|
||||||
|
|
||||||
|
|
||||||
|
def __merge_charts(list1, list2, time_key="timestamp"):
|
||||||
|
if len(list1) != len(list2):
|
||||||
|
raise Exception("cannot merge unequal lists")
|
||||||
|
result = []
|
||||||
|
for i in range(len(list1)):
|
||||||
|
timestamp = min(list1[i][time_key], list2[i][time_key])
|
||||||
|
result.append({**list1[i], **list2[i], time_key: timestamp})
|
||||||
|
return result
|
||||||
|
|
||||||
|
|
||||||
|
def __get_constraint_values(data):
|
||||||
|
params = {}
|
||||||
|
for i, f in enumerate(data.get("filters", [])):
|
||||||
|
params[f"{f['key']}_{i}"] = f["value"]
|
||||||
|
return params
|
||||||
|
|
||||||
|
|
||||||
|
def __get_meta_constraint(project_id, data):
|
||||||
|
if len(data.get("filters", [])) == 0:
|
||||||
|
return []
|
||||||
|
constraints = []
|
||||||
|
meta_keys = metadata.get(project_id=project_id)
|
||||||
|
meta_keys = {m["key"]: m["index"] for m in meta_keys}
|
||||||
|
|
||||||
|
for i, f in enumerate(data.get("filters", [])):
|
||||||
|
if f["key"] in meta_keys.keys():
|
||||||
|
key = f"sessions.metadata_{meta_keys[f['key']]})"
|
||||||
|
if f["value"] in ["*", ""]:
|
||||||
|
constraints.append(f"{key} IS NOT NULL")
|
||||||
|
else:
|
||||||
|
constraints.append(f"{key} = %({f['key']}_{i})s")
|
||||||
|
else:
|
||||||
|
filter_type = f["key"].upper()
|
||||||
|
filter_type = [filter_type, "USER" + filter_type, filter_type[4:]]
|
||||||
|
if any(item in [schemas.FilterType.USER_BROWSER] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_browser = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.USER_OS, schemas.FilterType.USER_OS_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_os = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.USER_DEVICE, schemas.FilterType.USER_DEVICE_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_device = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.USER_COUNTRY, schemas.FilterType.USER_COUNTRY_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_country = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.USER_ID, schemas.FilterType.USER_ID_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_id = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.USER_ANONYMOUS_ID, schemas.FilterType.USER_ANONYMOUS_ID_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.user_anonymous_id = %({f['key']}_{i})s")
|
||||||
|
elif any(item in [schemas.FilterType.REV_ID, schemas.FilterType.REV_ID_MOBILE] \
|
||||||
|
for item in filter_type):
|
||||||
|
constraints.append(f"sessions.rev_id = %({f['key']}_{i})s")
|
||||||
|
return constraints
|
||||||
|
|
||||||
|
|
||||||
|
def get_processed_sessions(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(),
|
||||||
|
density=7, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||||
|
chart=True, data=args)
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(COUNT(sessions), 0) AS value
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
results = {
|
||||||
|
"value": sum([r["value"] for r in rows]),
|
||||||
|
"chart": rows
|
||||||
|
}
|
||||||
|
|
||||||
|
diff = endTimestamp - startTimestamp
|
||||||
|
endTimestamp = startTimestamp
|
||||||
|
startTimestamp = endTimestamp - diff
|
||||||
|
|
||||||
|
pg_query = f"""SELECT COUNT(sessions.session_id) AS count
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query)};"""
|
||||||
|
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||||
|
**__get_constraint_values(args)}
|
||||||
|
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
|
||||||
|
count = cur.fetchone()["count"]
|
||||||
|
|
||||||
|
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||||
|
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def __get_neutral(rows, add_All_if_empty=True):
|
||||||
|
neutral = {l: 0 for l in [i for k in [list(v.keys()) for v in rows] for i in k]}
|
||||||
|
if add_All_if_empty and len(neutral.keys()) <= 1:
|
||||||
|
neutral = {"All": 0}
|
||||||
|
return neutral
|
||||||
|
|
||||||
|
|
||||||
|
def __merge_rows_with_neutral(rows, neutral):
|
||||||
|
for i in range(len(rows)):
|
||||||
|
rows[i] = {**neutral, **rows[i]}
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def __get_domains_errors_4xx_and_5xx(status, project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True, chart=False, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, chart=True,
|
||||||
|
data=args, main_table="requests", time_column="timestamp", project=False,
|
||||||
|
duration=False)
|
||||||
|
pg_sub_query_subset.append("requests.status_code/100 = %(status_code)s")
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""WITH requests AS (SELECT host, timestamp
|
||||||
|
FROM events_common.requests INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
)
|
||||||
|
SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(JSONB_AGG(requests) FILTER ( WHERE requests IS NOT NULL ), '[]'::JSONB) AS keys
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL ( SELECT requests.host, COUNT(*) AS count
|
||||||
|
FROM requests
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
GROUP BY host
|
||||||
|
ORDER BY count DESC
|
||||||
|
LIMIT 5
|
||||||
|
) AS requests ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
params = {"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp,
|
||||||
|
"step_size": step_size,
|
||||||
|
"status_code": status, **__get_constraint_values(args)}
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
rows = __nested_array_to_dict_array(rows, key="host")
|
||||||
|
neutral = __get_neutral(rows)
|
||||||
|
rows = __merge_rows_with_neutral(rows, neutral)
|
||||||
|
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_domains_errors_4xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||||
|
return __get_domains_errors_4xx_and_5xx(status=4, project_id=project_id, startTimestamp=startTimestamp,
|
||||||
|
endTimestamp=endTimestamp, density=density, **args)
|
||||||
|
|
||||||
|
|
||||||
|
def get_domains_errors_5xx(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), density=6, **args):
|
||||||
|
return __get_domains_errors_4xx_and_5xx(status=5, project_id=project_id, startTimestamp=startTimestamp,
|
||||||
|
endTimestamp=endTimestamp, density=density, **args)
|
||||||
|
|
||||||
|
|
||||||
|
def __nested_array_to_dict_array(rows, key="url_host", value="count"):
|
||||||
|
for r in rows:
|
||||||
|
for i in range(len(r["keys"])):
|
||||||
|
r[r["keys"][i][key]] = r["keys"][i][value]
|
||||||
|
r.pop("keys")
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_errors_per_domains(project_id, limit, page, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), **args):
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query.append("requests.success = FALSE")
|
||||||
|
params = {"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp,
|
||||||
|
"limit_s": (page - 1) * limit,
|
||||||
|
"limit_e": page * limit,
|
||||||
|
**__get_constraint_values(args)}
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""SELECT COALESCE(SUM(errors_count),0)::INT AS count,
|
||||||
|
COUNT(raw.domain) AS total,
|
||||||
|
jsonb_agg(raw) FILTER ( WHERE rn > %(limit_s)s
|
||||||
|
AND rn <= %(limit_e)s ) AS values
|
||||||
|
FROM (SELECT requests.host AS domain,
|
||||||
|
COUNT(requests.session_id) AS errors_count,
|
||||||
|
row_number() over (ORDER BY COUNT(requests.session_id) DESC ) AS rn
|
||||||
|
FROM events_common.requests
|
||||||
|
INNER JOIN sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query)}
|
||||||
|
GROUP BY requests.host
|
||||||
|
ORDER BY errors_count DESC) AS raw;"""
|
||||||
|
pg_query = cur.mogrify(pg_query, params)
|
||||||
|
logger.debug("-----------")
|
||||||
|
logger.debug(pg_query)
|
||||||
|
logger.debug("-----------")
|
||||||
|
cur.execute(pg_query)
|
||||||
|
row = cur.fetchone()
|
||||||
|
if row:
|
||||||
|
row["values"] = row["values"] or []
|
||||||
|
for r in row["values"]:
|
||||||
|
r.pop("rn")
|
||||||
|
|
||||||
|
return helper.dict_to_camel_case(row)
|
||||||
|
|
||||||
|
|
||||||
|
def get_errors_per_type(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTimestamp=TimeUTC.now(),
|
||||||
|
platform=None, density=7, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
|
||||||
|
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query_subset.append("requests.timestamp>=%(startTimestamp)s")
|
||||||
|
pg_sub_query_subset.append("requests.timestamp<%(endTimestamp)s")
|
||||||
|
pg_sub_query_subset.append("requests.status_code > 200")
|
||||||
|
|
||||||
|
pg_sub_query_subset_e = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
||||||
|
time_constraint=False)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
||||||
|
chart=True, data=args, main_table="", time_column="timestamp",
|
||||||
|
project=False, duration=False)
|
||||||
|
pg_sub_query_subset_e.append("timestamp>=%(startTimestamp)s")
|
||||||
|
pg_sub_query_subset_e.append("timestamp<%(endTimestamp)s")
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""WITH requests AS (SELECT status_code AS status, timestamp
|
||||||
|
FROM events_common.requests
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
),
|
||||||
|
errors_integ AS (SELECT timestamp
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
||||||
|
AND source != 'js_exception'
|
||||||
|
),
|
||||||
|
errors_js AS (SELECT timestamp
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset_e)}
|
||||||
|
AND source = 'js_exception'
|
||||||
|
)
|
||||||
|
SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(SUM(CASE WHEN status / 100 = 4 THEN 1 ELSE 0 END), 0) AS _4xx,
|
||||||
|
COALESCE(SUM(CASE WHEN status / 100 = 5 THEN 1 ELSE 0 END), 0) AS _5xx,
|
||||||
|
COALESCE((SELECT COUNT(*)
|
||||||
|
FROM errors_js
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
), 0) AS js,
|
||||||
|
COALESCE((SELECT COUNT(*)
|
||||||
|
FROM errors_integ
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
), 0) AS integrations
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (SELECT status
|
||||||
|
FROM requests
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS errors_partition ON (TRUE)
|
||||||
|
GROUP BY timestamp
|
||||||
|
ORDER BY timestamp;"""
|
||||||
|
params = {"step_size": step_size,
|
||||||
|
"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
rows = helper.list_to_camel_case(rows)
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_impacted_sessions_by_js_errors(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||||
|
chart=True, data=args)
|
||||||
|
pg_sub_query.append("m_errors.source = 'js_exception'")
|
||||||
|
pg_sub_query.append("m_errors.project_id = %(project_id)s")
|
||||||
|
pg_sub_query.append("errors.timestamp >= %(startTimestamp)s")
|
||||||
|
pg_sub_query.append("errors.timestamp < %(endTimestamp)s")
|
||||||
|
pg_sub_query_chart.append("m_errors.source = 'js_exception'")
|
||||||
|
pg_sub_query_chart.append("m_errors.project_id = %(project_id)s")
|
||||||
|
pg_sub_query_chart.append("errors.timestamp >= generated_timestamp")
|
||||||
|
pg_sub_query_chart.append("errors.timestamp < generated_timestamp+ %(step_size)s")
|
||||||
|
|
||||||
|
pg_sub_query_subset = __get_constraints(project_id=project_id, data=args, duration=False, main_table="m_errors",
|
||||||
|
time_constraint=False)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False,
|
||||||
|
chart=True, data=args, main_table="errors", time_column="timestamp",
|
||||||
|
project=False, duration=False)
|
||||||
|
pg_sub_query_subset.append("m_errors.source = 'js_exception'")
|
||||||
|
pg_sub_query_subset.append("errors.timestamp>=%(startTimestamp)s")
|
||||||
|
pg_sub_query_subset.append("errors.timestamp<%(endTimestamp)s")
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""WITH errors AS (SELECT DISTINCT ON (session_id,timestamp) session_id, timestamp
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM (SELECT COUNT(DISTINCT session_id) AS sessions_count
|
||||||
|
FROM errors) AS counts
|
||||||
|
LEFT JOIN
|
||||||
|
(SELECT jsonb_agg(chart) AS chart
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(COUNT(session_id), 0) AS sessions_count
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL ( SELECT DISTINCT session_id
|
||||||
|
FROM errors
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||||
|
"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp,
|
||||||
|
**__get_constraint_values(args)}))
|
||||||
|
row_sessions = cur.fetchone()
|
||||||
|
pg_query = f"""WITH errors AS ( SELECT DISTINCT ON(errors.error_id,timestamp) errors.error_id,timestamp
|
||||||
|
FROM events.errors
|
||||||
|
INNER JOIN public.errors AS m_errors USING (error_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
)
|
||||||
|
SELECT *
|
||||||
|
FROM (SELECT COUNT(DISTINCT errors.error_id) AS errors_count
|
||||||
|
FROM errors) AS counts
|
||||||
|
LEFT JOIN
|
||||||
|
(SELECT jsonb_agg(chart) AS chart
|
||||||
|
FROM (SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(COUNT(error_id), 0) AS errors_count
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL ( SELECT DISTINCT errors.error_id
|
||||||
|
FROM errors
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS errors ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp) AS chart) AS chart ON (TRUE);"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||||
|
"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp,
|
||||||
|
**__get_constraint_values(args)}))
|
||||||
|
row_errors = cur.fetchone()
|
||||||
|
chart = __merge_charts(row_sessions.pop("chart"), row_errors.pop("chart"))
|
||||||
|
row_sessions = helper.dict_to_camel_case(row_sessions)
|
||||||
|
row_errors = helper.dict_to_camel_case(row_errors)
|
||||||
|
return {**row_sessions, **row_errors, "chart": chart}
|
||||||
|
|
||||||
|
|
||||||
|
def get_resources_by_party(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), density=7, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
||||||
|
chart=False, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||||
|
chart=True, data=args, main_table="requests", time_column="timestamp",
|
||||||
|
duration=False)
|
||||||
|
pg_sub_query_subset.append("requests.timestamp >= %(startTimestamp)s")
|
||||||
|
pg_sub_query_subset.append("requests.timestamp < %(endTimestamp)s")
|
||||||
|
# pg_sub_query_subset.append("resources.type IN ('fetch', 'script')")
|
||||||
|
pg_sub_query_subset.append("requests.success = FALSE")
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""WITH requests AS (
|
||||||
|
SELECT requests.host, timestamp
|
||||||
|
FROM events_common.requests
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
)
|
||||||
|
SELECT generated_timestamp AS timestamp,
|
||||||
|
SUM(CASE WHEN first.host = sub_requests.host THEN 1 ELSE 0 END) AS first_party,
|
||||||
|
SUM(CASE WHEN first.host != sub_requests.host THEN 1 ELSE 0 END) AS third_party
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN (
|
||||||
|
SELECT requests.host,
|
||||||
|
COUNT(requests.session_id) AS count
|
||||||
|
FROM events_common.requests
|
||||||
|
INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE sessions.project_id = '1'
|
||||||
|
AND sessions.start_ts > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
||||||
|
AND sessions.start_ts < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
||||||
|
AND requests.timestamp > (EXTRACT(EPOCH FROM now() - INTERVAL '31 days') * 1000)::BIGINT
|
||||||
|
AND requests.timestamp < (EXTRACT(EPOCH FROM now()) * 1000)::BIGINT
|
||||||
|
AND sessions.duration>0
|
||||||
|
GROUP BY requests.host
|
||||||
|
ORDER BY count DESC
|
||||||
|
LIMIT 1
|
||||||
|
) AS first ON (TRUE)
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT requests.host
|
||||||
|
FROM requests
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sub_requests ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {"step_size": step_size,
|
||||||
|
"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
|
||||||
|
|
||||||
|
rows = cur.fetchall()
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_user_activity_avg_visited_pages(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), **args):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
||||||
|
results = helper.dict_to_camel_case(row)
|
||||||
|
results["chart"] = __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp,
|
||||||
|
endTimestamp, **args)
|
||||||
|
|
||||||
|
diff = endTimestamp - startTimestamp
|
||||||
|
endTimestamp = startTimestamp
|
||||||
|
startTimestamp = endTimestamp - diff
|
||||||
|
row = __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args)
|
||||||
|
|
||||||
|
previous = helper.dict_to_camel_case(row)
|
||||||
|
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
|
||||||
|
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def __get_user_activity_avg_visited_pages(cur, project_id, startTimestamp, endTimestamp, **args):
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query.append("sessions.pages_count>0")
|
||||||
|
pg_query = f"""SELECT COALESCE(CEIL(AVG(sessions.pages_count)),0) AS value
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query)};"""
|
||||||
|
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||||
|
**__get_constraint_values(args)}
|
||||||
|
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
row = cur.fetchone()
|
||||||
|
return row
|
||||||
|
|
||||||
|
|
||||||
|
def __get_user_activity_avg_visited_pages_chart(cur, project_id, startTimestamp, endTimestamp, density=20, **args):
|
||||||
|
step_size = __get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
||||||
|
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp}
|
||||||
|
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
||||||
|
chart=False, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||||
|
chart=True, data=args, main_table="sessions", time_column="start_ts",
|
||||||
|
duration=False)
|
||||||
|
pg_sub_query_subset.append("sessions.duration IS NOT NULL")
|
||||||
|
|
||||||
|
pg_query = f"""WITH sessions AS(SELECT sessions.pages_count, sessions.start_ts
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||||
|
)
|
||||||
|
SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(AVG(sessions.pages_count),0) AS value
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT sessions.pages_count
|
||||||
|
FROM sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
return rows
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_metrics_count_requests(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), value=None, density=20, **args):
|
||||||
|
step_size = __get_step_size(endTimestamp=endTimestamp, startTimestamp=startTimestamp, density=density, factor=1)
|
||||||
|
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp}
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=False, project=False,
|
||||||
|
chart=True, data=args, main_table="pages", time_column="timestamp",
|
||||||
|
duration=False)
|
||||||
|
|
||||||
|
if value is not None:
|
||||||
|
pg_sub_query.append("pages.path = %(value)s")
|
||||||
|
pg_sub_query_chart.append("pages.path = %(value)s")
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""SELECT COUNT(pages.session_id) AS value
|
||||||
|
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query)};"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp,
|
||||||
|
"value": value, **__get_constraint_values(args)}))
|
||||||
|
row = cur.fetchone()
|
||||||
|
pg_query = f"""WITH pages AS(SELECT pages.timestamp
|
||||||
|
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query)}
|
||||||
|
)
|
||||||
|
SELECT generated_timestamp AS timestamp,
|
||||||
|
COUNT(pages.*) AS value
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT 1
|
||||||
|
FROM pages
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS pages ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, {**params, **__get_constraint_values(args)}))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
row["chart"] = rows
|
||||||
|
row["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||||
|
return helper.dict_to_camel_case(row)
|
||||||
|
|
||||||
|
|
||||||
|
def get_unique_users(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(),
|
||||||
|
density=7, **args):
|
||||||
|
step_size = __get_step_size(startTimestamp, endTimestamp, density, factor=1)
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||||
|
chart=True, data=args)
|
||||||
|
pg_sub_query.append("user_id IS NOT NULL")
|
||||||
|
pg_sub_query.append("user_id != ''")
|
||||||
|
pg_sub_query_chart.append("user_id IS NOT NULL")
|
||||||
|
pg_sub_query_chart.append("user_id != ''")
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""SELECT generated_timestamp AS timestamp,
|
||||||
|
COALESCE(COUNT(sessions), 0) AS value
|
||||||
|
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
|
||||||
|
LEFT JOIN LATERAL ( SELECT DISTINCT user_id
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query_chart)}
|
||||||
|
) AS sessions ON (TRUE)
|
||||||
|
GROUP BY generated_timestamp
|
||||||
|
ORDER BY generated_timestamp;"""
|
||||||
|
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
results = {
|
||||||
|
"value": sum([r["value"] for r in rows]),
|
||||||
|
"chart": rows
|
||||||
|
}
|
||||||
|
|
||||||
|
diff = endTimestamp - startTimestamp
|
||||||
|
endTimestamp = startTimestamp
|
||||||
|
startTimestamp = endTimestamp - diff
|
||||||
|
|
||||||
|
pg_query = f"""SELECT COUNT(DISTINCT sessions.user_id) AS count
|
||||||
|
FROM public.sessions
|
||||||
|
WHERE {" AND ".join(pg_sub_query)};"""
|
||||||
|
params = {"project_id": project_id, "startTimestamp": startTimestamp, "endTimestamp": endTimestamp,
|
||||||
|
**__get_constraint_values(args)}
|
||||||
|
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
|
||||||
|
count = cur.fetchone()["count"]
|
||||||
|
|
||||||
|
results["progress"] = helper.__progress(old_val=count, new_val=results["value"])
|
||||||
|
results["unit"] = schemas.TemplatePredefinedUnits.COUNT
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
|
def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
||||||
|
endTimestamp=TimeUTC.now(), **args):
|
||||||
|
pg_sub_query = __get_constraints(project_id=project_id, data=args)
|
||||||
|
pg_sub_query.append("pages.speed_index IS NOT NULL")
|
||||||
|
pg_sub_query.append("pages.speed_index>0")
|
||||||
|
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS value
|
||||||
|
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query)}
|
||||||
|
GROUP BY sessions.user_country
|
||||||
|
ORDER BY value, sessions.user_country;"""
|
||||||
|
params = {"project_id": project_id,
|
||||||
|
"startTimestamp": startTimestamp,
|
||||||
|
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
if len(rows) > 0:
|
||||||
|
pg_query = f"""SELECT AVG(pages.speed_index) AS avg
|
||||||
|
FROM events.pages INNER JOIN public.sessions USING (session_id)
|
||||||
|
WHERE {" AND ".join(pg_sub_query)};"""
|
||||||
|
cur.execute(cur.mogrify(pg_query, params))
|
||||||
|
avg = cur.fetchone()["avg"]
|
||||||
|
else:
|
||||||
|
avg = 0
|
||||||
|
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.MILLISECOND}
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
logger.info(">>> Using experimental metrics")
|
|
||||||
else:
|
|
||||||
pass
|
|
||||||
|
|
@ -1,11 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
logger.info(">>> Using experimental heatmaps")
|
|
||||||
from .heatmaps_ch import *
|
|
||||||
else:
|
|
||||||
from .heatmaps import *
|
|
||||||
|
|
@ -1,385 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import events
|
|
||||||
from chalicelib.core.metrics.modules import sessions, sessions_mobs
|
|
||||||
from chalicelib.utils import sql_helper as sh
|
|
||||||
|
|
||||||
from chalicelib.utils import pg_client, helper, ch_client, exp_ch_helper
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
|
||||||
if data.url is None or data.url == "":
|
|
||||||
return []
|
|
||||||
args = {"startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
|
||||||
"project_id": project_id, "url": data.url}
|
|
||||||
constraints = [
|
|
||||||
"main_events.project_id = toUInt16(%(project_id)s)",
|
|
||||||
"main_events.created_at >= toDateTime(%(startDate)s / 1000)",
|
|
||||||
"main_events.created_at <= toDateTime(%(endDate)s / 1000)",
|
|
||||||
"main_events.`$event_name` = 'CLICK'",
|
|
||||||
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
|
||||||
]
|
|
||||||
|
|
||||||
if data.operator == schemas.SearchEventOperator.IS:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
|
||||||
else:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
|
||||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
|
||||||
|
|
||||||
query_from = f"{exp_ch_helper.get_main_events_table(data.startTimestamp)} AS main_events"
|
|
||||||
# TODO: is this used ?
|
|
||||||
# has_click_rage_filter = False
|
|
||||||
# if len(data.filters) > 0:
|
|
||||||
# for i, f in enumerate(data.filters):
|
|
||||||
# if f.type == schemas.FilterType.issue and len(f.value) > 0:
|
|
||||||
# has_click_rage_filter = True
|
|
||||||
# query_from += """INNER JOIN events_common.issues USING (timestamp, session_id)
|
|
||||||
# INNER JOIN issues AS mis USING (issue_id)
|
|
||||||
# INNER JOIN LATERAL (
|
|
||||||
# SELECT COUNT(1) AS real_count
|
|
||||||
# FROM events.clicks AS sc
|
|
||||||
# INNER JOIN sessions as ss USING (session_id)
|
|
||||||
# WHERE ss.project_id = 2
|
|
||||||
# AND (sc.url = %(url)s OR sc.path = %(url)s)
|
|
||||||
# AND sc.timestamp >= %(startDate)s
|
|
||||||
# AND sc.timestamp <= %(endDate)s
|
|
||||||
# AND ss.start_ts >= %(startDate)s
|
|
||||||
# AND ss.start_ts <= %(endDate)s
|
|
||||||
# AND sc.selector = clicks.selector) AS r_clicks ON (TRUE)"""
|
|
||||||
# constraints += ["mis.project_id = %(project_id)s",
|
|
||||||
# "issues.timestamp >= %(startDate)s",
|
|
||||||
# "issues.timestamp <= %(endDate)s"]
|
|
||||||
# f_k = f"issue_value{i}"
|
|
||||||
# args = {**args, **sh.multi_values(f.value, value_key=f_k)}
|
|
||||||
# constraints.append(sh.multi_conditions(f"%({f_k})s = ANY (issue_types)",
|
|
||||||
# f.value, value_key=f_k))
|
|
||||||
# constraints.append(sh.multi_conditions(f"mis.type = %({f_k})s",
|
|
||||||
# f.value, value_key=f_k))
|
|
||||||
# TODO: change this once click-rage is fixed
|
|
||||||
# if data.click_rage and not has_click_rage_filter:
|
|
||||||
# constraints.append("""(issues_t.session_id IS NULL
|
|
||||||
# OR (issues_t.datetime >= toDateTime(%(startDate)s/1000)
|
|
||||||
# AND issues_t.datetime <= toDateTime(%(endDate)s/1000)
|
|
||||||
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
|
||||||
# AND issues_t.event_type = 'ISSUE'
|
|
||||||
# AND issues_t.project_id = toUInt16(%(project_id)s)
|
|
||||||
# AND mis.project_id = toUInt16(%(project_id)s)
|
|
||||||
# AND mis.type='click_rage'))""")
|
|
||||||
# query_from += """ LEFT JOIN experimental.events AS issues_t ON (main_events.session_id=issues_t.session_id)
|
|
||||||
# LEFT JOIN experimental.issues AS mis ON (issues_t.issue_id=mis.issue_id)"""
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
query = cur.format(query=f"""SELECT
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_x') AS normalized_x,
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.normalized_y') AS normalized_y
|
|
||||||
FROM {query_from}
|
|
||||||
WHERE {" AND ".join(constraints)}
|
|
||||||
LIMIT 500;""",
|
|
||||||
parameters=args)
|
|
||||||
logger.debug("---------")
|
|
||||||
logger.debug(query)
|
|
||||||
logger.debug("---------")
|
|
||||||
try:
|
|
||||||
rows = cur.execute(query=query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- HEATMAP 2 SEARCH QUERY EXCEPTION CH -----------")
|
|
||||||
logger.warning(query)
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(data)
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
return helper.list_to_camel_case(rows)
|
|
||||||
|
|
||||||
|
|
||||||
def get_x_y_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
|
||||||
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
|
||||||
constraints = [
|
|
||||||
"main_events.project_id = toUInt16(%(project_id)s)",
|
|
||||||
"main_events.session_id = %(session_id)s",
|
|
||||||
"main_events.`$event_name`='CLICK'",
|
|
||||||
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
|
||||||
]
|
|
||||||
if data.operator == schemas.SearchEventOperator.IS:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
|
||||||
else:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
|
||||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
|
||||||
|
|
||||||
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
query = cur.format(query=f"""SELECT main_events.normalized_x AS normalized_x,
|
|
||||||
main_events.normalized_y AS normalized_y
|
|
||||||
FROM {query_from}
|
|
||||||
WHERE {" AND ".join(constraints)};""",
|
|
||||||
parameters=args)
|
|
||||||
logger.debug("---------")
|
|
||||||
logger.debug(query)
|
|
||||||
logger.debug("---------")
|
|
||||||
try:
|
|
||||||
rows = cur.execute(query=query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
|
||||||
logger.warning(query)
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(data)
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
return helper.list_to_camel_case(rows)
|
|
||||||
|
|
||||||
|
|
||||||
def get_selectors_by_url_and_session_id(project_id, session_id, data: schemas.GetHeatMapPayloadSchema):
|
|
||||||
args = {"project_id": project_id, "session_id": session_id, "url": data.url}
|
|
||||||
constraints = ["main_events.project_id = toUInt16(%(project_id)s)",
|
|
||||||
"main_events.session_id = %(session_id)s",
|
|
||||||
"main_events.`$event_name`='CLICK'"]
|
|
||||||
|
|
||||||
if data.operator == schemas.SearchEventOperator.IS:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
|
||||||
else:
|
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
|
||||||
args["url"] = helper.values_for_operator(data.url, data.operator)
|
|
||||||
|
|
||||||
query_from = f"{exp_ch_helper.get_main_events_table(0)} AS main_events"
|
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
query = cur.format(query=f"""SELECT CAST(`$properties`.selector AS String) AS selector,
|
|
||||||
COUNT(1) AS count
|
|
||||||
FROM {query_from}
|
|
||||||
WHERE {" AND ".join(constraints)}
|
|
||||||
GROUP BY 1
|
|
||||||
ORDER BY count DESC;""",
|
|
||||||
parameters=args)
|
|
||||||
logger.debug("---------")
|
|
||||||
logger.debug(query)
|
|
||||||
logger.debug("---------")
|
|
||||||
try:
|
|
||||||
rows = cur.execute(query=query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- HEATMAP-session_id SEARCH QUERY EXCEPTION CH -----------")
|
|
||||||
logger.warning(query)
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(data)
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
return helper.list_to_camel_case(rows)
|
|
||||||
|
|
||||||
|
|
||||||
# use CH
|
|
||||||
SESSION_PROJECTION_COLS = """s.project_id,
|
|
||||||
s.session_id AS session_id,
|
|
||||||
toUnixTimestamp(s.datetime)*1000 AS start_ts,
|
|
||||||
s.duration AS duration"""
|
|
||||||
|
|
||||||
|
|
||||||
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
|
||||||
start_time: int,
|
|
||||||
end_time: int) -> str | None:
|
|
||||||
full_args = {
|
|
||||||
"sessionId": session_id,
|
|
||||||
"projectId": project_id,
|
|
||||||
"start_time": start_time,
|
|
||||||
"end_time": end_time,
|
|
||||||
}
|
|
||||||
sub_condition = ["session_id = %(sessionId)s", "`$event_name` = 'CLICK'", "project_id = %(projectId)s"]
|
|
||||||
if location_condition and len(location_condition.value) > 0:
|
|
||||||
f_k = "LOC"
|
|
||||||
op = sh.get_sql_operator(location_condition.operator)
|
|
||||||
full_args = {**full_args, **sh.multi_values(location_condition.value, value_key=f_k)}
|
|
||||||
sub_condition.append(
|
|
||||||
sh.multi_conditions(f'path {op} %({f_k})s', location_condition.value, is_not=False,
|
|
||||||
value_key=f_k))
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
main_query = cur.format(query=f"""WITH paths AS (
|
|
||||||
SELECT DISTINCT
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url_path
|
|
||||||
FROM product_analytics.events
|
|
||||||
WHERE {" AND ".join(sub_condition)}
|
|
||||||
)
|
|
||||||
SELECT
|
|
||||||
paths.url_path,
|
|
||||||
COUNT(*) AS count
|
|
||||||
FROM product_analytics.events
|
|
||||||
INNER JOIN paths
|
|
||||||
ON JSON_VALUE(CAST(product_analytics.events.$properties AS String), '$.url_path') = paths.url_path
|
|
||||||
WHERE `$event_name` = 'CLICK'
|
|
||||||
AND project_id = %(projectId)s
|
|
||||||
AND created_at >= toDateTime(%(start_time)s / 1000)
|
|
||||||
AND created_at <= toDateTime(%(end_time)s / 1000)
|
|
||||||
GROUP BY paths.url_path
|
|
||||||
ORDER BY count DESC
|
|
||||||
LIMIT 1;""",
|
|
||||||
parameters=full_args)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
logger.debug(main_query)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
try:
|
|
||||||
url = cur.execute(query=main_query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- CLICK MAP BEST URL SEARCH QUERY EXCEPTION CH-----------")
|
|
||||||
logger.warning(main_query.decode('UTF-8'))
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(full_args)
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
if url is None or len(url) == 0:
|
|
||||||
return None
|
|
||||||
return url[0]["url_path"]
|
|
||||||
|
|
||||||
|
|
||||||
def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_id,
|
|
||||||
include_mobs: bool = True, exclude_sessions: list[str] = [],
|
|
||||||
_depth: int = 3):
|
|
||||||
no_platform = True
|
|
||||||
location_condition = None
|
|
||||||
no_click = True
|
|
||||||
for f in data.filters:
|
|
||||||
if f.type == schemas.FilterType.PLATFORM:
|
|
||||||
no_platform = False
|
|
||||||
break
|
|
||||||
for f in data.events:
|
|
||||||
if f.type == schemas.EventType.LOCATION:
|
|
||||||
if len(f.value) == 0:
|
|
||||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
|
||||||
location_condition = f.model_copy()
|
|
||||||
elif f.type == schemas.EventType.CLICK:
|
|
||||||
no_click = False
|
|
||||||
if len(f.value) == 0:
|
|
||||||
f.operator = schemas.SearchEventOperator.IS_ANY
|
|
||||||
if location_condition and not no_click:
|
|
||||||
break
|
|
||||||
|
|
||||||
if no_platform:
|
|
||||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.PLATFORM,
|
|
||||||
value=[schemas.PlatformType.DESKTOP],
|
|
||||||
operator=schemas.SearchEventOperator.IS))
|
|
||||||
if not location_condition:
|
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
|
||||||
value=[],
|
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
|
||||||
if no_click:
|
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
|
||||||
value=[],
|
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
|
||||||
|
|
||||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
|
||||||
value=[0],
|
|
||||||
operator=schemas.MathOperator.GREATER))
|
|
||||||
|
|
||||||
full_args, query_part = sessions.search_query_parts_ch(data=data, error_status=None, errors_only=False,
|
|
||||||
favorite_only=data.bookmarked, issue=None,
|
|
||||||
project_id=project_id, user_id=user_id)
|
|
||||||
full_args["exclude_sessions"] = tuple(exclude_sessions)
|
|
||||||
if len(exclude_sessions) > 0:
|
|
||||||
query_part += "\n AND session_id NOT IN (%(exclude_sessions)s)"
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
data.order = schemas.SortOrderType.DESC
|
|
||||||
data.sort = 'duration'
|
|
||||||
main_query = cur.format(query=f"""SELECT *
|
|
||||||
FROM (SELECT {SESSION_PROJECTION_COLS}
|
|
||||||
{query_part}
|
|
||||||
-- ORDER BY {data.sort} {data.order.value}
|
|
||||||
LIMIT 20) AS raw
|
|
||||||
ORDER BY rand()
|
|
||||||
LIMIT 1;""",
|
|
||||||
parameters=full_args)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
logger.debug(main_query)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
try:
|
|
||||||
session = cur.execute(query=main_query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- CLICK MAP SHORT SESSION SEARCH QUERY EXCEPTION CH -----------")
|
|
||||||
logger.warning(main_query)
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(data.model_dump_json())
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
if len(session) > 0:
|
|
||||||
session = session[0]
|
|
||||||
if not location_condition or location_condition.operator == schemas.SearchEventOperator.IS_ANY:
|
|
||||||
session["path"] = __get_1_url(project_id=project_id, session_id=session["session_id"],
|
|
||||||
location_condition=location_condition,
|
|
||||||
start_time=data.startTimestamp, end_time=data.endTimestamp)
|
|
||||||
else:
|
|
||||||
session["path"] = location_condition.value[0]
|
|
||||||
|
|
||||||
if include_mobs:
|
|
||||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
|
||||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
|
||||||
if _depth > 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
|
||||||
return search_short_session(data=data, project_id=project_id, user_id=user_id,
|
|
||||||
include_mobs=include_mobs,
|
|
||||||
exclude_sessions=exclude_sessions + [session["session_id"]],
|
|
||||||
_depth=_depth - 1)
|
|
||||||
elif _depth == 0 and len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
|
||||||
logger.info("couldn't find an existing replay after 3 iterations for heatmap")
|
|
||||||
|
|
||||||
session['events'] = events.get_by_session_id(project_id=project_id, session_id=session["session_id"],
|
|
||||||
event_type=schemas.EventType.LOCATION)
|
|
||||||
else:
|
|
||||||
return None
|
|
||||||
|
|
||||||
return helper.dict_to_camel_case(session)
|
|
||||||
|
|
||||||
|
|
||||||
def get_selected_session(project_id, session_id):
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
main_query = cur.format(query=f"""SELECT {SESSION_PROJECTION_COLS}
|
|
||||||
FROM experimental.sessions AS s
|
|
||||||
WHERE session_id=%(session_id)s;""",
|
|
||||||
parameters={"session_id": session_id})
|
|
||||||
logger.debug("--------------------")
|
|
||||||
logger.debug(main_query)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
try:
|
|
||||||
session = cur.execute(query=main_query)
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- CLICK MAP GET SELECTED SESSION QUERY EXCEPTION -----------")
|
|
||||||
logger.warning(main_query.decode('UTF-8'))
|
|
||||||
raise err
|
|
||||||
if len(session) > 0:
|
|
||||||
session = session[0]
|
|
||||||
else:
|
|
||||||
session = None
|
|
||||||
|
|
||||||
if session:
|
|
||||||
session['domURL'] = sessions_mobs.get_urls(session_id=session["session_id"], project_id=project_id)
|
|
||||||
session['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session["session_id"])
|
|
||||||
if len(session['domURL']) == 0 and len(session['mobsUrl']) == 0:
|
|
||||||
session["_issue"] = "mob file not found"
|
|
||||||
logger.info("can't find selected mob file for heatmap")
|
|
||||||
session['events'] = get_page_events(session_id=session["session_id"], project_id=project_id)
|
|
||||||
|
|
||||||
return helper.dict_to_camel_case(session)
|
|
||||||
|
|
||||||
|
|
||||||
def get_page_events(session_id, project_id):
|
|
||||||
with ch_client.ClickHouseClient() as cur:
|
|
||||||
query = cur.format(query=f"""SELECT
|
|
||||||
event_id as message_id,
|
|
||||||
toUnixTimestamp(created_at)*1000 AS timestamp,
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_host') AS host,
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS path,
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS value,
|
|
||||||
JSON_VALUE(CAST(`$properties` AS String), '$.url_path') AS url,
|
|
||||||
'LOCATION' AS type
|
|
||||||
FROM product_analytics.events
|
|
||||||
WHERE session_id = %(session_id)s
|
|
||||||
AND `$event_name`='LOCATION'
|
|
||||||
AND project_id= %(project_id)s
|
|
||||||
ORDER BY created_at,message_id;""",
|
|
||||||
parameters={"session_id": session_id, "project_id": project_id})
|
|
||||||
|
|
||||||
rows = cur.execute(query=query)
|
|
||||||
rows = helper.list_to_camel_case(rows)
|
|
||||||
return rows
|
|
||||||
|
|
@ -1,12 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
import chalicelib.core.sessions.sessions_ch as sessions
|
|
||||||
else:
|
|
||||||
import chalicelib.core.sessions.sessions_pg as sessions
|
|
||||||
|
|
||||||
from chalicelib.core.sessions import sessions_mobs
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
from .significance import *
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
from .significance_ch import *
|
|
||||||
|
|
@ -1,10 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
logger.info(">>> Using experimental product-analytics")
|
|
||||||
from .product_analytics_ch import *
|
|
||||||
else:
|
|
||||||
from .product_analytics import *
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
from chalicelib.utils.ch_client import ClickHouseClient
|
|
||||||
|
|
||||||
|
|
||||||
def search_events(project_id: int, data: dict):
|
|
||||||
with ClickHouseClient() as ch_client:
|
|
||||||
r = ch_client.format(
|
|
||||||
"""SELECT *
|
|
||||||
FROM taha.events
|
|
||||||
WHERE project_id=%(project_id)s
|
|
||||||
ORDER BY created_at;""",
|
|
||||||
params={"project_id": project_id})
|
|
||||||
x = ch_client.execute(r)
|
|
||||||
|
|
||||||
return x
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
TENANT_CONDITION = "TRUE"
|
|
||||||
MOB_KEY = ""
|
|
||||||
|
|
||||||
|
|
||||||
def get_file_key(project_id, session_id):
|
|
||||||
return {}
|
|
||||||
|
|
@ -12,75 +12,42 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def __transform_journey(rows, reverse_path=False):
|
def __transform_journey(rows, reverse_path=False):
|
||||||
total_100p = 0
|
total_100p = 0
|
||||||
|
number_of_step1 = 0
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["event_number_in_session"] > 1:
|
if r["event_number_in_session"] > 1:
|
||||||
break
|
break
|
||||||
|
number_of_step1 += 1
|
||||||
total_100p += r["sessions_count"]
|
total_100p += r["sessions_count"]
|
||||||
|
# for i in range(number_of_step1):
|
||||||
|
# rows[i]["value"] = 100 / number_of_step1
|
||||||
|
|
||||||
|
# for i in range(number_of_step1, len(rows)):
|
||||||
|
for i in range(len(rows)):
|
||||||
|
rows[i]["value"] = rows[i]["sessions_count"] * 100 / total_100p
|
||||||
|
|
||||||
nodes = []
|
nodes = []
|
||||||
nodes_values = []
|
nodes_values = []
|
||||||
links = []
|
links = []
|
||||||
drops = []
|
|
||||||
max_depth = 0
|
|
||||||
for r in rows:
|
for r in rows:
|
||||||
r["value"] = r["sessions_count"] * 100 / total_100p
|
source = f"{r['event_number_in_session']}_{r['event_type']}_{r['e_value']}"
|
||||||
source = f"{r['event_number_in_session'] - 1}_{r['event_type']}_{r['e_value']}"
|
|
||||||
if source not in nodes:
|
if source not in nodes:
|
||||||
nodes.append(source)
|
nodes.append(source)
|
||||||
nodes_values.append({"depth": r['event_number_in_session'] - 1,
|
nodes_values.append({"name": r['e_value'], "eventType": r['event_type'],
|
||||||
"name": r['e_value'],
|
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
||||||
"eventType": r['event_type'],
|
if r['next_value']:
|
||||||
"id": len(nodes_values)})
|
target = f"{r['event_number_in_session'] + 1}_{r['next_type']}_{r['next_value']}"
|
||||||
|
if target not in nodes:
|
||||||
target = f"{r['event_number_in_session']}_{r['next_type']}_{r['next_value']}"
|
|
||||||
if target not in nodes:
|
|
||||||
nodes.append(target)
|
|
||||||
nodes_values.append({"depth": r['event_number_in_session'],
|
|
||||||
"name": r['next_value'],
|
|
||||||
"eventType": r['next_type'],
|
|
||||||
"id": len(nodes_values)})
|
|
||||||
|
|
||||||
sr_idx = nodes.index(source)
|
|
||||||
tg_idx = nodes.index(target)
|
|
||||||
|
|
||||||
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"], "value": r["value"]}
|
|
||||||
if not reverse_path:
|
|
||||||
link["source"] = sr_idx
|
|
||||||
link["target"] = tg_idx
|
|
||||||
else:
|
|
||||||
link["source"] = tg_idx
|
|
||||||
link["target"] = sr_idx
|
|
||||||
links.append(link)
|
|
||||||
|
|
||||||
max_depth = r['event_number_in_session']
|
|
||||||
if r["next_type"] == "DROP":
|
|
||||||
for d in drops:
|
|
||||||
if d["depth"] == r['event_number_in_session']:
|
|
||||||
d["sessions_count"] += r["sessions_count"]
|
|
||||||
break
|
|
||||||
else:
|
|
||||||
drops.append({"depth": r['event_number_in_session'], "sessions_count": r["sessions_count"]})
|
|
||||||
|
|
||||||
for i in range(len(drops)):
|
|
||||||
|
|
||||||
if drops[i]["depth"] < max_depth:
|
|
||||||
source = f"{drops[i]['depth']}_DROP_None"
|
|
||||||
target = f"{drops[i]['depth'] + 1}_DROP_None"
|
|
||||||
sr_idx = nodes.index(source)
|
|
||||||
|
|
||||||
if i < len(drops) - 1 and drops[i]["depth"] + 1 == drops[i + 1]["depth"]:
|
|
||||||
tg_idx = nodes.index(target)
|
|
||||||
else:
|
|
||||||
nodes.append(target)
|
nodes.append(target)
|
||||||
nodes_values.append({"depth": drops[i]["depth"] + 1,
|
nodes_values.append({"name": r['next_value'], "eventType": r['next_type'],
|
||||||
"name": None,
|
"avgTimeFromPrevious": 0, "sessionsCount": 0})
|
||||||
"eventType": "DROP",
|
|
||||||
"id": len(nodes_values)})
|
|
||||||
tg_idx = len(nodes) - 1
|
|
||||||
|
|
||||||
link = {"eventType": "DROP",
|
sr_idx = nodes.index(source)
|
||||||
"sessionsCount": drops[i]["sessions_count"],
|
tg_idx = nodes.index(target)
|
||||||
"value": drops[i]["sessions_count"] * 100 / total_100p}
|
if r["avg_time_from_previous"] is not None:
|
||||||
|
nodes_values[tg_idx]["avgTimeFromPrevious"] += r["avg_time_from_previous"] * r["sessions_count"]
|
||||||
|
nodes_values[tg_idx]["sessionsCount"] += r["sessions_count"]
|
||||||
|
link = {"eventType": r['event_type'], "sessionsCount": r["sessions_count"],
|
||||||
|
"value": r["value"], "avgTimeFromPrevious": r["avg_time_from_previous"]}
|
||||||
if not reverse_path:
|
if not reverse_path:
|
||||||
link["source"] = sr_idx
|
link["source"] = sr_idx
|
||||||
link["target"] = tg_idx
|
link["target"] = tg_idx
|
||||||
|
|
@ -88,10 +55,13 @@ def __transform_journey(rows, reverse_path=False):
|
||||||
link["source"] = tg_idx
|
link["source"] = tg_idx
|
||||||
link["target"] = sr_idx
|
link["target"] = sr_idx
|
||||||
links.append(link)
|
links.append(link)
|
||||||
|
for n in nodes_values:
|
||||||
|
if n["sessionsCount"] > 0:
|
||||||
|
n["avgTimeFromPrevious"] = n["avgTimeFromPrevious"] / n["sessionsCount"]
|
||||||
|
else:
|
||||||
|
n["avgTimeFromPrevious"] = None
|
||||||
|
n.pop("sessionsCount")
|
||||||
|
|
||||||
if reverse_path:
|
|
||||||
for n in nodes_values:
|
|
||||||
n["depth"] = max_depth - n["depth"]
|
|
||||||
return {"nodes": nodes_values,
|
return {"nodes": nodes_values,
|
||||||
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
"links": sorted(links, key=lambda x: (x["source"], x["target"]), reverse=False)}
|
||||||
|
|
||||||
|
|
@ -433,9 +403,7 @@ WITH sub_sessions AS (SELECT session_id {sub_sessions_extra_projection}
|
||||||
{"UNION ALL".join(projection_query)};"""
|
{"UNION ALL".join(projection_query)};"""
|
||||||
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
params = {"project_id": project_id, "startTimestamp": data.startTimestamp,
|
||||||
"endTimestamp": data.endTimestamp, "density": data.density,
|
"endTimestamp": data.endTimestamp, "density": data.density,
|
||||||
# This is ignored because UI will take care of it
|
"eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
||||||
# "eventThresholdNumberInGroup": 4 if data.hide_excess else 8,
|
|
||||||
"eventThresholdNumberInGroup": 8,
|
|
||||||
**extra_values}
|
**extra_values}
|
||||||
query = cur.mogrify(pg_query, params)
|
query = cur.mogrify(pg_query, params)
|
||||||
_now = time()
|
_now = time()
|
||||||
|
|
@ -1,7 +1,6 @@
|
||||||
import json
|
import json
|
||||||
import logging
|
|
||||||
from collections import Counter
|
|
||||||
from typing import Optional, List
|
from typing import Optional, List
|
||||||
|
from collections import Counter
|
||||||
|
|
||||||
from fastapi import HTTPException, status
|
from fastapi import HTTPException, status
|
||||||
|
|
||||||
|
|
@ -10,8 +9,6 @@ from chalicelib.core import users
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
|
|
||||||
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -413,6 +410,7 @@ def update_project_conditions(project_id, conditions):
|
||||||
create_project_conditions(project_id, to_be_created)
|
create_project_conditions(project_id, to_be_created)
|
||||||
|
|
||||||
if to_be_updated:
|
if to_be_updated:
|
||||||
|
print(to_be_updated)
|
||||||
update_project_condition(project_id, to_be_updated)
|
update_project_condition(project_id, to_be_updated)
|
||||||
|
|
||||||
return get_conditions(project_id)
|
return get_conditions(project_id)
|
||||||
|
|
@ -427,45 +425,3 @@ def get_projects_ids(tenant_id):
|
||||||
cur.execute(query=query)
|
cur.execute(query=query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
return [r["project_id"] for r in rows]
|
return [r["project_id"] for r in rows]
|
||||||
|
|
||||||
|
|
||||||
def delete_metadata_condition(project_id, metadata_key):
|
|
||||||
sql = """\
|
|
||||||
UPDATE public.projects_conditions
|
|
||||||
SET filters=(SELECT COALESCE(jsonb_agg(elem), '[]'::jsonb)
|
|
||||||
FROM jsonb_array_elements(filters) AS elem
|
|
||||||
WHERE NOT (elem ->> 'type' = 'metadata'
|
|
||||||
AND elem ->> 'source' = %(metadata_key)s))
|
|
||||||
WHERE project_id = %(project_id)s
|
|
||||||
AND jsonb_typeof(filters) = 'array'
|
|
||||||
AND EXISTS (SELECT 1
|
|
||||||
FROM jsonb_array_elements(filters) AS elem
|
|
||||||
WHERE elem ->> 'type' = 'metadata'
|
|
||||||
AND elem ->> 'source' = %(metadata_key)s);"""
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(sql, {"project_id": project_id, "metadata_key": metadata_key})
|
|
||||||
cur.execute(query)
|
|
||||||
|
|
||||||
|
|
||||||
def rename_metadata_condition(project_id, old_metadata_key, new_metadata_key):
|
|
||||||
sql = """\
|
|
||||||
UPDATE public.projects_conditions
|
|
||||||
SET filters = (SELECT jsonb_agg(CASE
|
|
||||||
WHEN elem ->> 'type' = 'metadata' AND elem ->> 'source' = %(old_metadata_key)s
|
|
||||||
THEN elem || ('{"source": "'||%(new_metadata_key)s||'"}')::jsonb
|
|
||||||
ELSE elem END)
|
|
||||||
FROM jsonb_array_elements(filters) AS elem)
|
|
||||||
WHERE project_id = %(project_id)s
|
|
||||||
AND jsonb_typeof(filters) = 'array'
|
|
||||||
AND EXISTS (SELECT 1
|
|
||||||
FROM jsonb_array_elements(filters) AS elem
|
|
||||||
WHERE elem ->> 'type' = 'metadata'
|
|
||||||
AND elem ->> 'source' = %(old_metadata_key)s);"""
|
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
query = cur.mogrify(sql, {"project_id": project_id, "old_metadata_key": old_metadata_key,
|
|
||||||
"new_metadata_key": new_metadata_key})
|
|
||||||
cur.execute(query)
|
|
||||||
|
|
||||||
# TODO: make project conditions use metadata-column-name instead of metadata-key
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ def reset(data: schemas.ForgetPasswordPayloadSchema, background_tasks: Backgroun
|
||||||
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
||||||
return {"errors": ["Invalid captcha."]}
|
return {"errors": ["Invalid captcha."]}
|
||||||
if not smtp.has_smtp():
|
if not smtp.has_smtp():
|
||||||
return {"errors": ["Email delivery failed due to invalid SMTP configuration. Please contact your admin."]}
|
return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]}
|
||||||
a_user = users.get_by_email_only(data.email)
|
a_user = users.get_by_email_only(data.email)
|
||||||
if a_user:
|
if a_user:
|
||||||
invitation_link = users.generate_new_invitation(user_id=a_user["userId"])
|
invitation_link = users.generate_new_invitation(user_id=a_user["userId"])
|
||||||
|
|
|
||||||
|
|
@ -2,20 +2,179 @@ import logging
|
||||||
from typing import List, Union
|
from typing import List, Union
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import events, metadata
|
from chalicelib.core import events, metadata, projects, performance_event, sessions_favorite
|
||||||
from . import performance_event
|
|
||||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
SESSION_PROJECTION_BASE_COLS = """s.project_id,
|
||||||
|
s.session_id::text AS session_id,
|
||||||
|
s.user_uuid,
|
||||||
|
s.user_id,
|
||||||
|
s.user_os,
|
||||||
|
s.user_browser,
|
||||||
|
s.user_device,
|
||||||
|
s.user_device_type,
|
||||||
|
s.user_country,
|
||||||
|
s.user_city,
|
||||||
|
s.user_state,
|
||||||
|
s.start_ts,
|
||||||
|
s.duration,
|
||||||
|
s.events_count,
|
||||||
|
s.pages_count,
|
||||||
|
s.errors_count,
|
||||||
|
s.user_anonymous_id,
|
||||||
|
s.platform,
|
||||||
|
s.issue_score,
|
||||||
|
s.timezone,
|
||||||
|
to_jsonb(s.issue_types) AS issue_types """
|
||||||
|
|
||||||
|
SESSION_PROJECTION_COLS = SESSION_PROJECTION_BASE_COLS + """,
|
||||||
|
favorite_sessions.session_id NOTNULL AS favorite,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_sessions AS fs
|
||||||
|
WHERE s.session_id = fs.session_id
|
||||||
|
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
||||||
|
|
||||||
|
|
||||||
|
# This function executes the query and return result
|
||||||
|
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||||
|
error_status=schemas.ErrorStatus.ALL, count_only=False, issue=None, ids_only=False,
|
||||||
|
platform="web"):
|
||||||
|
if data.bookmarked:
|
||||||
|
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project_id, user_id)
|
||||||
|
|
||||||
|
full_args, query_part = search_query_parts(data=data, error_status=error_status, errors_only=errors_only,
|
||||||
|
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
|
||||||
|
user_id=user_id, platform=platform)
|
||||||
|
if data.limit is not None and data.page is not None:
|
||||||
|
full_args["sessions_limit"] = data.limit
|
||||||
|
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
|
||||||
|
full_args["sessions_limit_e"] = data.page * data.limit
|
||||||
|
else:
|
||||||
|
full_args["sessions_limit"] = 200
|
||||||
|
full_args["sessions_limit_s"] = 0
|
||||||
|
full_args["sessions_limit_e"] = 200
|
||||||
|
|
||||||
|
meta_keys = []
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
if errors_only:
|
||||||
|
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id,
|
||||||
|
COALESCE((SELECT TRUE
|
||||||
|
FROM public.user_viewed_errors AS ve
|
||||||
|
WHERE er.error_id = ve.error_id
|
||||||
|
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
||||||
|
{query_part};""", full_args)
|
||||||
|
|
||||||
|
elif count_only:
|
||||||
|
main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions,
|
||||||
|
COUNT(DISTINCT s.user_uuid) AS count_users
|
||||||
|
{query_part};""", full_args)
|
||||||
|
elif data.group_by_user:
|
||||||
|
g_sort = "count(full_sessions)"
|
||||||
|
if data.order is None:
|
||||||
|
data.order = schemas.SortOrderType.DESC.value
|
||||||
|
else:
|
||||||
|
data.order = data.order
|
||||||
|
if data.sort is not None and data.sort != 'sessionsCount':
|
||||||
|
sort = helper.key_to_snake_case(data.sort)
|
||||||
|
g_sort = f"{'MIN' if data.order == schemas.SortOrderType.DESC else 'MAX'}({sort})"
|
||||||
|
else:
|
||||||
|
sort = 'start_ts'
|
||||||
|
|
||||||
|
meta_keys = metadata.get(project_id=project_id)
|
||||||
|
main_query = cur.mogrify(f"""SELECT COUNT(*) AS count,
|
||||||
|
COALESCE(JSONB_AGG(users_sessions)
|
||||||
|
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
||||||
|
FROM (SELECT user_id,
|
||||||
|
count(full_sessions) AS user_sessions_count,
|
||||||
|
jsonb_agg(full_sessions) FILTER (WHERE rn <= 1) AS last_session,
|
||||||
|
MIN(full_sessions.start_ts) AS first_session_ts,
|
||||||
|
ROW_NUMBER() OVER (ORDER BY {g_sort} {data.order}) AS rn
|
||||||
|
FROM (SELECT *, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY {sort} {data.order}) AS rn
|
||||||
|
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
||||||
|
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||||
|
{query_part}
|
||||||
|
) AS filtred_sessions
|
||||||
|
) AS full_sessions
|
||||||
|
GROUP BY user_id
|
||||||
|
) AS users_sessions;""",
|
||||||
|
full_args)
|
||||||
|
elif ids_only:
|
||||||
|
main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
|
||||||
|
{query_part}
|
||||||
|
ORDER BY s.session_id desc
|
||||||
|
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
|
||||||
|
full_args)
|
||||||
|
else:
|
||||||
|
if data.order is None:
|
||||||
|
data.order = schemas.SortOrderType.DESC.value
|
||||||
|
else:
|
||||||
|
data.order = data.order
|
||||||
|
sort = 'session_id'
|
||||||
|
if data.sort is not None and data.sort != "session_id":
|
||||||
|
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||||
|
sort = helper.key_to_snake_case(data.sort)
|
||||||
|
|
||||||
|
meta_keys = metadata.get(project_id=project_id)
|
||||||
|
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||||
|
COALESCE(JSONB_AGG(full_sessions)
|
||||||
|
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
||||||
|
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY {sort} {data.order}, issue_score DESC) AS rn
|
||||||
|
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
||||||
|
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||||
|
{query_part}
|
||||||
|
ORDER BY s.session_id desc) AS filtred_sessions
|
||||||
|
ORDER BY {sort} {data.order}, issue_score DESC) AS full_sessions;""",
|
||||||
|
full_args)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
logger.debug(main_query)
|
||||||
|
logger.debug("--------------------")
|
||||||
|
try:
|
||||||
|
cur.execute(main_query)
|
||||||
|
sessions = cur.fetchone()
|
||||||
|
except Exception as err:
|
||||||
|
logger.warning("--------- SESSIONS SEARCH QUERY EXCEPTION -----------")
|
||||||
|
logger.warning(main_query.decode('UTF-8'))
|
||||||
|
logger.warning("--------- PAYLOAD -----------")
|
||||||
|
logger.warning(data.model_dump_json())
|
||||||
|
logger.warning("--------------------")
|
||||||
|
raise err
|
||||||
|
if errors_only or ids_only:
|
||||||
|
return helper.list_to_camel_case(cur.fetchall())
|
||||||
|
|
||||||
|
if count_only:
|
||||||
|
return helper.dict_to_camel_case(sessions)
|
||||||
|
|
||||||
|
total = sessions["count"]
|
||||||
|
sessions = sessions["sessions"]
|
||||||
|
|
||||||
|
if data.group_by_user:
|
||||||
|
for i, s in enumerate(sessions):
|
||||||
|
sessions[i] = {**s.pop("last_session")[0], **s}
|
||||||
|
sessions[i].pop("rn")
|
||||||
|
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
||||||
|
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
||||||
|
else:
|
||||||
|
for i, s in enumerate(sessions):
|
||||||
|
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
||||||
|
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
||||||
|
# if not data.group_by_user and data.sort is not None and data.sort != "session_id":
|
||||||
|
# sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
|
||||||
|
# reverse=data.order.upper() == "DESC")
|
||||||
|
return {
|
||||||
|
'total': total,
|
||||||
|
'sessions': helper.list_to_camel_case(sessions)
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
# TODO: remove "table of" search from this function
|
# TODO: remove "table of" search from this function
|
||||||
def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
||||||
metric_type: schemas.MetricType, metric_of: schemas.MetricOfTimeseries | schemas.MetricOfTable,
|
view_type: schemas.MetricTimeseriesViewType, metric_type: schemas.MetricType,
|
||||||
metric_value: List):
|
metric_of: schemas.MetricOfTable, metric_value: List):
|
||||||
step_size = int(metrics_helper.get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
||||||
density=density, factor=1, decimal=True))
|
density=density, factor=1, decimal=True))
|
||||||
extra_event = None
|
extra_event = None
|
||||||
if metric_of == schemas.MetricOfTable.VISITED_URL:
|
if metric_of == schemas.MetricOfTable.VISITED_URL:
|
||||||
extra_event = "events.pages"
|
extra_event = "events.pages"
|
||||||
|
|
@ -29,35 +188,39 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
||||||
sessions = []
|
sessions = []
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
if metric_type == schemas.MetricType.TIMESERIES:
|
if metric_type == schemas.MetricType.TIMESERIES:
|
||||||
if metric_of == schemas.MetricOfTimeseries.SESSION_COUNT:
|
if view_type == schemas.MetricTimeseriesViewType.LINE_CHART:
|
||||||
# main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts
|
if metric_of == schemas.MetricOfTimeseries.SESSION_COUNT:
|
||||||
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.session_id, s.start_ts
|
# main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT DISTINCT ON(s.session_id) s.session_id, s.start_ts
|
||||||
{query_part})
|
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.session_id, s.start_ts
|
||||||
SELECT generated_timestamp AS timestamp,
|
{query_part})
|
||||||
COUNT(s) AS count
|
SELECT generated_timestamp AS timestamp,
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
COUNT(s) AS count
|
||||||
LEFT JOIN LATERAL (SELECT 1 AS s
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||||
FROM full_sessions
|
LEFT JOIN LATERAL ( SELECT 1 AS s
|
||||||
WHERE start_ts >= generated_timestamp
|
FROM full_sessions
|
||||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
WHERE start_ts >= generated_timestamp
|
||||||
GROUP BY generated_timestamp
|
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||||
ORDER BY generated_timestamp;""", full_args)
|
GROUP BY generated_timestamp
|
||||||
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
|
ORDER BY generated_timestamp;""", full_args)
|
||||||
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.user_id, s.start_ts
|
elif metric_of == schemas.MetricOfTimeseries.USER_COUNT:
|
||||||
|
main_query = cur.mogrify(f"""WITH full_sessions AS (SELECT s.user_id, s.start_ts
|
||||||
{query_part}
|
{query_part}
|
||||||
AND s.user_id IS NOT NULL
|
AND s.user_id IS NOT NULL
|
||||||
AND s.user_id != '')
|
AND s.user_id != '')
|
||||||
SELECT generated_timestamp AS timestamp,
|
SELECT generated_timestamp AS timestamp,
|
||||||
COUNT(s) AS count
|
COUNT(s) AS count
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS generated_timestamp
|
||||||
LEFT JOIN LATERAL (SELECT DISTINCT user_id AS s
|
LEFT JOIN LATERAL ( SELECT DISTINCT user_id AS s
|
||||||
FROM full_sessions
|
FROM full_sessions
|
||||||
WHERE start_ts >= generated_timestamp
|
WHERE start_ts >= generated_timestamp
|
||||||
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
AND start_ts <= generated_timestamp + %(step_size)s) AS sessions ON (TRUE)
|
||||||
GROUP BY generated_timestamp
|
GROUP BY generated_timestamp
|
||||||
ORDER BY generated_timestamp;""", full_args)
|
ORDER BY generated_timestamp;""", full_args)
|
||||||
|
else:
|
||||||
|
raise Exception(f"Unsupported metricOf:{metric_of}")
|
||||||
else:
|
else:
|
||||||
raise Exception(f"Unsupported metricOf:{metric_of}")
|
main_query = cur.mogrify(f"""SELECT count(DISTINCT s.session_id) AS count
|
||||||
|
{query_part};""", full_args)
|
||||||
|
|
||||||
logger.debug("--------------------")
|
logger.debug("--------------------")
|
||||||
logger.debug(main_query)
|
logger.debug(main_query)
|
||||||
|
|
@ -71,8 +234,10 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
||||||
logger.warning(data.model_dump_json())
|
logger.warning(data.model_dump_json())
|
||||||
logger.warning("--------------------")
|
logger.warning("--------------------")
|
||||||
raise err
|
raise err
|
||||||
sessions = cur.fetchall()
|
if view_type == schemas.MetricTimeseriesViewType.LINE_CHART:
|
||||||
|
sessions = cur.fetchall()
|
||||||
|
else:
|
||||||
|
sessions = cur.fetchone()["count"]
|
||||||
elif metric_type == schemas.MetricType.TABLE:
|
elif metric_type == schemas.MetricType.TABLE:
|
||||||
if isinstance(metric_of, schemas.MetricOfTable):
|
if isinstance(metric_of, schemas.MetricOfTable):
|
||||||
main_col = "user_id"
|
main_col = "user_id"
|
||||||
|
|
@ -133,8 +298,8 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
||||||
def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, density: int,
|
||||||
metric_of: schemas.MetricOfTable, metric_value: List,
|
metric_of: schemas.MetricOfTable, metric_value: List,
|
||||||
metric_format: Union[schemas.MetricExtendedFormatType, schemas.MetricExtendedFormatType]):
|
metric_format: Union[schemas.MetricExtendedFormatType, schemas.MetricExtendedFormatType]):
|
||||||
step_size = int(metrics_helper.get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
step_size = int(metrics_helper.__get_step_size(endTimestamp=data.endTimestamp, startTimestamp=data.startTimestamp,
|
||||||
density=density, factor=1, decimal=True))
|
density=density, factor=1, decimal=True))
|
||||||
extra_event = None
|
extra_event = None
|
||||||
extra_conditions = None
|
extra_conditions = None
|
||||||
if metric_of == schemas.MetricOfTable.VISITED_URL:
|
if metric_of == schemas.MetricOfTable.VISITED_URL:
|
||||||
|
|
@ -302,6 +467,8 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
||||||
if len(data.filters) > 0:
|
if len(data.filters) > 0:
|
||||||
meta_keys = None
|
meta_keys = None
|
||||||
for i, f in enumerate(data.filters):
|
for i, f in enumerate(data.filters):
|
||||||
|
if not isinstance(f.value, list):
|
||||||
|
f.value = [f.value]
|
||||||
filter_type = f.type
|
filter_type = f.type
|
||||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||||
f_k = f"f_value{i}"
|
f_k = f"f_value{i}"
|
||||||
|
|
@ -1002,6 +1169,69 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
||||||
return full_args, query_part
|
return full_args, query_part
|
||||||
|
|
||||||
|
|
||||||
|
def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
|
||||||
|
if project_id is None:
|
||||||
|
all_projects = projects.get_projects(tenant_id=tenant_id)
|
||||||
|
else:
|
||||||
|
all_projects = [
|
||||||
|
projects.get_project(tenant_id=tenant_id, project_id=int(project_id), include_last_session=False,
|
||||||
|
include_gdpr=False)]
|
||||||
|
|
||||||
|
all_projects = {int(p["projectId"]): p["name"] for p in all_projects}
|
||||||
|
project_ids = list(all_projects.keys())
|
||||||
|
|
||||||
|
available_keys = metadata.get_keys_by_projects(project_ids)
|
||||||
|
for i in available_keys:
|
||||||
|
available_keys[i]["user_id"] = schemas.FilterType.USER_ID
|
||||||
|
available_keys[i]["user_anonymous_id"] = schemas.FilterType.USER_ANONYMOUS_ID
|
||||||
|
results = {}
|
||||||
|
for i in project_ids:
|
||||||
|
if m_key not in available_keys[i].values():
|
||||||
|
available_keys.pop(i)
|
||||||
|
results[i] = {"total": 0, "sessions": [], "missingMetadata": True}
|
||||||
|
project_ids = list(available_keys.keys())
|
||||||
|
if len(project_ids) > 0:
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
sub_queries = []
|
||||||
|
for i in project_ids:
|
||||||
|
col_name = list(available_keys[i].keys())[list(available_keys[i].values()).index(m_key)]
|
||||||
|
sub_queries.append(cur.mogrify(
|
||||||
|
f"(SELECT COALESCE(COUNT(s.*)) AS count FROM public.sessions AS s WHERE s.project_id = %(id)s AND s.{col_name} = %(value)s) AS \"{i}\"",
|
||||||
|
{"id": i, "value": m_value}).decode('UTF-8'))
|
||||||
|
query = f"""SELECT {", ".join(sub_queries)};"""
|
||||||
|
cur.execute(query=query)
|
||||||
|
|
||||||
|
rows = cur.fetchone()
|
||||||
|
|
||||||
|
sub_queries = []
|
||||||
|
for i in rows.keys():
|
||||||
|
results[i] = {"total": rows[i], "sessions": [], "missingMetadata": False, "name": all_projects[int(i)]}
|
||||||
|
if rows[i] > 0:
|
||||||
|
col_name = list(available_keys[int(i)].keys())[list(available_keys[int(i)].values()).index(m_key)]
|
||||||
|
sub_queries.append(
|
||||||
|
cur.mogrify(
|
||||||
|
f"""(
|
||||||
|
SELECT *
|
||||||
|
FROM (
|
||||||
|
SELECT DISTINCT ON(favorite_sessions.session_id, s.session_id) {SESSION_PROJECTION_COLS}
|
||||||
|
FROM public.sessions AS s LEFT JOIN (SELECT session_id
|
||||||
|
FROM public.user_favorite_sessions
|
||||||
|
WHERE user_favorite_sessions.user_id = %(userId)s
|
||||||
|
) AS favorite_sessions USING (session_id)
|
||||||
|
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s
|
||||||
|
) AS full_sessions
|
||||||
|
ORDER BY favorite DESC, issue_score DESC
|
||||||
|
LIMIT 10
|
||||||
|
)""",
|
||||||
|
{"id": i, "value": m_value, "userId": user_id}).decode('UTF-8'))
|
||||||
|
if len(sub_queries) > 0:
|
||||||
|
cur.execute("\nUNION\n".join(sub_queries))
|
||||||
|
rows = cur.fetchall()
|
||||||
|
for i in rows:
|
||||||
|
results[str(i["project_id"])]["sessions"].append(helper.dict_to_camel_case(i))
|
||||||
|
return results
|
||||||
|
|
||||||
|
|
||||||
def get_user_sessions(project_id, user_id, start_date, end_date):
|
def get_user_sessions(project_id, user_id, start_date, end_date):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
constraints = ["s.project_id = %(projectId)s", "s.user_id = %(userId)s"]
|
constraints = ["s.project_id = %(projectId)s", "s.user_id = %(userId)s"]
|
||||||
|
|
@ -1111,3 +1341,28 @@ def check_recording_status(project_id: int) -> dict:
|
||||||
"recordingStatus": row["recording_status"],
|
"recordingStatus": row["recording_status"],
|
||||||
"sessionsCount": row["sessions_count"]
|
"sessionsCount": row["sessions_count"]
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def search_sessions_by_ids(project_id: int, session_ids: list, sort_by: str = 'session_id',
|
||||||
|
ascending: bool = False) -> dict:
|
||||||
|
if session_ids is None or len(session_ids) == 0:
|
||||||
|
return {"total": 0, "sessions": []}
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
meta_keys = metadata.get(project_id=project_id)
|
||||||
|
params = {"project_id": project_id, "session_ids": tuple(session_ids)}
|
||||||
|
order_direction = 'ASC' if ascending else 'DESC'
|
||||||
|
main_query = cur.mogrify(f"""SELECT {SESSION_PROJECTION_BASE_COLS}
|
||||||
|
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
||||||
|
FROM public.sessions AS s
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
AND session_id IN %(session_ids)s
|
||||||
|
ORDER BY {sort_by} {order_direction};""", params)
|
||||||
|
|
||||||
|
cur.execute(main_query)
|
||||||
|
rows = cur.fetchall()
|
||||||
|
if len(meta_keys) > 0:
|
||||||
|
for s in rows:
|
||||||
|
s["metadata"] = {}
|
||||||
|
for m in meta_keys:
|
||||||
|
s["metadata"][m["key"]] = s.pop(f'metadata_{m["index"]}')
|
||||||
|
return {"total": len(rows), "sessions": helper.list_to_camel_case(rows)}
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
from . import sessions_pg
|
|
||||||
from . import sessions_pg as sessions_legacy
|
|
||||||
from . import sessions_ch
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
|
||||||
from . import sessions_ch as sessions
|
|
||||||
else:
|
|
||||||
from . import sessions_pg as sessions
|
|
||||||
File diff suppressed because it is too large
Load diff
|
|
@ -1 +0,0 @@
|
||||||
from .sessions_devtool import *
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from .sessions_favorite import *
|
|
||||||
|
|
@ -1,82 +0,0 @@
|
||||||
from functools import cache
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core.autocomplete import autocomplete
|
|
||||||
from chalicelib.utils.event_filter_definition import SupportedFilter
|
|
||||||
|
|
||||||
|
|
||||||
@cache
|
|
||||||
def supported_types():
|
|
||||||
return {
|
|
||||||
schemas.FilterType.USER_OS: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
|
||||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
|
||||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
|
||||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
|
||||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
|
||||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
|
||||||
schemas.FilterType.USER_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
|
||||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
|
||||||
schemas.FilterType.REV_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
|
||||||
schemas.FilterType.REFERRER: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
|
||||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
|
||||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
|
||||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
|
||||||
# Mobile
|
|
||||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
|
||||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(
|
|
||||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
|
||||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
|
||||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
|
||||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
|
||||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
|
||||||
rows = []
|
|
||||||
if meta_type not in list(supported_types().keys()):
|
|
||||||
return {"errors": ["unsupported type"]}
|
|
||||||
rows += supported_types()[meta_type].get(project_id=project_id, text=text)
|
|
||||||
# for IOS events autocomplete
|
|
||||||
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
|
||||||
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
|
||||||
return {"data": rows}
|
|
||||||
|
|
@ -1,268 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core import metadata, projects
|
|
||||||
from . import sessions_favorite, sessions_legacy
|
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
SESSION_PROJECTION_BASE_COLS = """s.project_id,
|
|
||||||
s.session_id::text AS session_id,
|
|
||||||
s.user_uuid,
|
|
||||||
s.user_id,
|
|
||||||
s.user_os,
|
|
||||||
s.user_browser,
|
|
||||||
s.user_device,
|
|
||||||
s.user_device_type,
|
|
||||||
s.user_country,
|
|
||||||
s.user_city,
|
|
||||||
s.user_state,
|
|
||||||
s.start_ts,
|
|
||||||
s.duration,
|
|
||||||
s.events_count,
|
|
||||||
s.pages_count,
|
|
||||||
s.errors_count,
|
|
||||||
s.user_anonymous_id,
|
|
||||||
s.platform,
|
|
||||||
s.issue_score,
|
|
||||||
s.timezone,
|
|
||||||
to_jsonb(s.issue_types) AS issue_types """
|
|
||||||
|
|
||||||
SESSION_PROJECTION_COLS = SESSION_PROJECTION_BASE_COLS + """,
|
|
||||||
favorite_sessions.session_id NOTNULL AS favorite,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_sessions AS fs
|
|
||||||
WHERE s.session_id = fs.session_id
|
|
||||||
AND fs.user_id = %(userId)s LIMIT 1), FALSE) AS viewed """
|
|
||||||
|
|
||||||
|
|
||||||
# This function executes the query and return result
|
|
||||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.ProjectContext,
|
|
||||||
user_id, errors_only=False, error_status=schemas.ErrorStatus.ALL,
|
|
||||||
count_only=False, issue=None, ids_only=False, platform="web"):
|
|
||||||
if data.bookmarked:
|
|
||||||
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
|
|
||||||
if data.startTimestamp is None:
|
|
||||||
logger.debug(f"No vault sessions found for project:{project.project_id}")
|
|
||||||
return {
|
|
||||||
'total': 0,
|
|
||||||
'sessions': [],
|
|
||||||
'src': 1
|
|
||||||
}
|
|
||||||
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
|
|
||||||
errors_only=errors_only,
|
|
||||||
favorite_only=data.bookmarked, issue=issue,
|
|
||||||
project_id=project.project_id,
|
|
||||||
user_id=user_id, platform=platform)
|
|
||||||
if data.limit is not None and data.page is not None:
|
|
||||||
full_args["sessions_limit"] = data.limit
|
|
||||||
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
|
|
||||||
full_args["sessions_limit_e"] = data.page * data.limit
|
|
||||||
else:
|
|
||||||
full_args["sessions_limit"] = 200
|
|
||||||
full_args["sessions_limit_s"] = 0
|
|
||||||
full_args["sessions_limit_e"] = 200
|
|
||||||
|
|
||||||
meta_keys = []
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
if errors_only:
|
|
||||||
main_query = cur.mogrify(f"""SELECT DISTINCT er.error_id,
|
|
||||||
COALESCE((SELECT TRUE
|
|
||||||
FROM public.user_viewed_errors AS ve
|
|
||||||
WHERE er.error_id = ve.error_id
|
|
||||||
AND ve.user_id = %(userId)s LIMIT 1), FALSE) AS viewed
|
|
||||||
{query_part};""", full_args)
|
|
||||||
|
|
||||||
elif count_only:
|
|
||||||
main_query = cur.mogrify(f"""SELECT COUNT(DISTINCT s.session_id) AS count_sessions,
|
|
||||||
COUNT(DISTINCT s.user_uuid) AS count_users
|
|
||||||
{query_part};""", full_args)
|
|
||||||
elif data.group_by_user:
|
|
||||||
g_sort = "count(full_sessions)"
|
|
||||||
if data.order is None:
|
|
||||||
data.order = schemas.SortOrderType.DESC.value
|
|
||||||
else:
|
|
||||||
data.order = data.order
|
|
||||||
if data.sort is not None and data.sort != 'sessionsCount':
|
|
||||||
sort = helper.key_to_snake_case(data.sort)
|
|
||||||
g_sort = f"{'MIN' if data.order == schemas.SortOrderType.DESC else 'MAX'}({sort})"
|
|
||||||
else:
|
|
||||||
sort = 'start_ts'
|
|
||||||
|
|
||||||
meta_keys = metadata.get(project_id=project.project_id)
|
|
||||||
main_query = cur.mogrify(f"""SELECT COUNT(*) AS count,
|
|
||||||
COALESCE(JSONB_AGG(users_sessions)
|
|
||||||
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
|
||||||
FROM (SELECT user_id,
|
|
||||||
count(full_sessions) AS user_sessions_count,
|
|
||||||
jsonb_agg(full_sessions) FILTER (WHERE rn <= 1) AS last_session,
|
|
||||||
MIN(full_sessions.start_ts) AS first_session_ts,
|
|
||||||
ROW_NUMBER() OVER (ORDER BY {g_sort} {data.order}) AS rn
|
|
||||||
FROM (SELECT *, ROW_NUMBER() OVER (PARTITION BY user_id ORDER BY {sort} {data.order}) AS rn
|
|
||||||
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
|
||||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
|
||||||
{query_part}
|
|
||||||
) AS filtred_sessions
|
|
||||||
) AS full_sessions
|
|
||||||
GROUP BY user_id
|
|
||||||
) AS users_sessions;""",
|
|
||||||
full_args)
|
|
||||||
elif ids_only:
|
|
||||||
main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
|
|
||||||
{query_part}
|
|
||||||
ORDER BY s.session_id desc
|
|
||||||
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
|
|
||||||
full_args)
|
|
||||||
else:
|
|
||||||
if data.order is None:
|
|
||||||
data.order = schemas.SortOrderType.DESC.value
|
|
||||||
else:
|
|
||||||
data.order = data.order
|
|
||||||
sort = 'session_id'
|
|
||||||
if data.sort is not None and data.sort != "session_id":
|
|
||||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
|
||||||
if data.sort == 'datetime':
|
|
||||||
sort = 'start_ts'
|
|
||||||
else:
|
|
||||||
sort = helper.key_to_snake_case(data.sort)
|
|
||||||
|
|
||||||
meta_keys = metadata.get(project_id=project.project_id)
|
|
||||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
|
||||||
COALESCE(JSONB_AGG(full_sessions)
|
|
||||||
FILTER (WHERE rn>%(sessions_limit_s)s AND rn<=%(sessions_limit_e)s), '[]'::JSONB) AS sessions
|
|
||||||
FROM (SELECT *, ROW_NUMBER() OVER (ORDER BY {sort} {data.order}, issue_score DESC) AS rn
|
|
||||||
FROM (SELECT DISTINCT ON(s.session_id) {SESSION_PROJECTION_COLS}
|
|
||||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
|
||||||
{query_part}
|
|
||||||
ORDER BY s.session_id desc) AS filtred_sessions
|
|
||||||
ORDER BY {sort} {data.order}, issue_score DESC) AS full_sessions;""",
|
|
||||||
full_args)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
logger.debug(main_query)
|
|
||||||
logger.debug("--------------------")
|
|
||||||
try:
|
|
||||||
cur.execute(main_query)
|
|
||||||
sessions = cur.fetchone()
|
|
||||||
except Exception as err:
|
|
||||||
logger.warning("--------- SESSIONS SEARCH QUERY EXCEPTION -----------")
|
|
||||||
logger.warning(main_query.decode('UTF-8'))
|
|
||||||
logger.warning("--------- PAYLOAD -----------")
|
|
||||||
logger.warning(data.model_dump_json())
|
|
||||||
logger.warning("--------------------")
|
|
||||||
raise err
|
|
||||||
if errors_only or ids_only:
|
|
||||||
return helper.list_to_camel_case(cur.fetchall())
|
|
||||||
|
|
||||||
if count_only:
|
|
||||||
return helper.dict_to_camel_case(sessions)
|
|
||||||
|
|
||||||
total = sessions["count"]
|
|
||||||
sessions = sessions["sessions"]
|
|
||||||
|
|
||||||
if data.group_by_user:
|
|
||||||
for i, s in enumerate(sessions):
|
|
||||||
sessions[i] = {**s.pop("last_session")[0], **s}
|
|
||||||
sessions[i].pop("rn")
|
|
||||||
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
|
||||||
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
|
||||||
else:
|
|
||||||
for i, s in enumerate(sessions):
|
|
||||||
sessions[i]["metadata"] = {k["key"]: sessions[i][f'metadata_{k["index"]}'] for k in meta_keys \
|
|
||||||
if sessions[i][f'metadata_{k["index"]}'] is not None}
|
|
||||||
# if not data.group_by_user and data.sort is not None and data.sort != "session_id":
|
|
||||||
# sessions = sorted(sessions, key=lambda s: s[helper.key_to_snake_case(data.sort)],
|
|
||||||
# reverse=data.order.upper() == "DESC")
|
|
||||||
return {
|
|
||||||
'total': total,
|
|
||||||
'sessions': helper.list_to_camel_case(sessions),
|
|
||||||
'src': 1
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
|
|
||||||
if project_id is None:
|
|
||||||
all_projects = projects.get_projects(tenant_id=tenant_id)
|
|
||||||
else:
|
|
||||||
all_projects = [
|
|
||||||
projects.get_project(tenant_id=tenant_id, project_id=int(project_id), include_last_session=False,
|
|
||||||
include_gdpr=False)]
|
|
||||||
|
|
||||||
all_projects = {int(p["projectId"]): p["name"] for p in all_projects}
|
|
||||||
project_ids = list(all_projects.keys())
|
|
||||||
|
|
||||||
available_keys = metadata.get_keys_by_projects(project_ids)
|
|
||||||
for i in available_keys:
|
|
||||||
available_keys[i]["user_id"] = schemas.FilterType.USER_ID
|
|
||||||
available_keys[i]["user_anonymous_id"] = schemas.FilterType.USER_ANONYMOUS_ID
|
|
||||||
results = {}
|
|
||||||
for i in project_ids:
|
|
||||||
if m_key not in available_keys[i].values():
|
|
||||||
available_keys.pop(i)
|
|
||||||
results[i] = {"total": 0, "sessions": [], "missingMetadata": True}
|
|
||||||
project_ids = list(available_keys.keys())
|
|
||||||
if len(project_ids) > 0:
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
sub_queries = []
|
|
||||||
for i in project_ids:
|
|
||||||
col_name = list(available_keys[i].keys())[list(available_keys[i].values()).index(m_key)]
|
|
||||||
sub_queries.append(cur.mogrify(
|
|
||||||
f"(SELECT COALESCE(COUNT(s.*)) AS count FROM public.sessions AS s WHERE s.project_id = %(id)s AND s.{col_name} = %(value)s) AS \"{i}\"",
|
|
||||||
{"id": i, "value": m_value}).decode('UTF-8'))
|
|
||||||
query = f"""SELECT {", ".join(sub_queries)};"""
|
|
||||||
cur.execute(query=query)
|
|
||||||
|
|
||||||
rows = cur.fetchone()
|
|
||||||
|
|
||||||
sub_queries = []
|
|
||||||
for i in rows.keys():
|
|
||||||
results[i] = {"total": rows[i], "sessions": [], "missingMetadata": False, "name": all_projects[int(i)]}
|
|
||||||
if rows[i] > 0:
|
|
||||||
col_name = list(available_keys[int(i)].keys())[list(available_keys[int(i)].values()).index(m_key)]
|
|
||||||
sub_queries.append(
|
|
||||||
cur.mogrify(
|
|
||||||
f"""(
|
|
||||||
SELECT *
|
|
||||||
FROM (
|
|
||||||
SELECT DISTINCT ON(favorite_sessions.session_id, s.session_id) {SESSION_PROJECTION_COLS}
|
|
||||||
FROM public.sessions AS s LEFT JOIN (SELECT session_id
|
|
||||||
FROM public.user_favorite_sessions
|
|
||||||
WHERE user_favorite_sessions.user_id = %(userId)s
|
|
||||||
) AS favorite_sessions USING (session_id)
|
|
||||||
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s
|
|
||||||
) AS full_sessions
|
|
||||||
ORDER BY favorite DESC, issue_score DESC
|
|
||||||
LIMIT 10
|
|
||||||
)""",
|
|
||||||
{"id": i, "value": m_value, "userId": user_id}).decode('UTF-8'))
|
|
||||||
if len(sub_queries) > 0:
|
|
||||||
cur.execute("\nUNION\n".join(sub_queries))
|
|
||||||
rows = cur.fetchall()
|
|
||||||
for i in rows:
|
|
||||||
results[str(i["project_id"])]["sessions"].append(helper.dict_to_camel_case(i))
|
|
||||||
return results
|
|
||||||
|
|
||||||
|
|
||||||
def search_sessions_by_ids(project_id: int, session_ids: list, sort_by: str = 'session_id',
|
|
||||||
ascending: bool = False) -> dict:
|
|
||||||
if session_ids is None or len(session_ids) == 0:
|
|
||||||
return {"total": 0, "sessions": []}
|
|
||||||
with pg_client.PostgresClient() as cur:
|
|
||||||
meta_keys = metadata.get(project_id=project_id)
|
|
||||||
params = {"project_id": project_id, "session_ids": tuple(session_ids)}
|
|
||||||
order_direction = 'ASC' if ascending else 'DESC'
|
|
||||||
main_query = cur.mogrify(f"""SELECT {SESSION_PROJECTION_BASE_COLS}
|
|
||||||
{"," if len(meta_keys) > 0 else ""}{",".join([f'metadata_{m["index"]}' for m in meta_keys])}
|
|
||||||
FROM public.sessions AS s
|
|
||||||
WHERE project_id=%(project_id)s
|
|
||||||
AND session_id IN %(session_ids)s
|
|
||||||
ORDER BY {sort_by} {order_direction};""", params)
|
|
||||||
|
|
||||||
cur.execute(main_query)
|
|
||||||
rows = cur.fetchall()
|
|
||||||
if len(meta_keys) > 0:
|
|
||||||
for s in rows:
|
|
||||||
s["metadata"] = {}
|
|
||||||
for m in meta_keys:
|
|
||||||
s["metadata"][m["key"]] = s.pop(f'metadata_{m["index"]}')
|
|
||||||
return {"total": len(rows), "sessions": helper.list_to_camel_case(rows)}
|
|
||||||
|
|
@ -1 +0,0 @@
|
||||||
from .sessions_viewed import *
|
|
||||||
|
|
@ -1,11 +1,9 @@
|
||||||
import json
|
|
||||||
|
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from chalicelib.utils import pg_client
|
||||||
|
from chalicelib.core import integrations_manager, integration_base_issue
|
||||||
|
import json
|
||||||
|
|
||||||
|
|
||||||
def __get_saved_data(project_id, session_id, issue_id, tool):
|
def __get_saved_data(project_id, session_id, issue_id, tool):
|
||||||
|
|
@ -41,8 +39,8 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne
|
||||||
issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description,
|
issue = integration.issue_handler.create_new_assignment(title=title, assignee=assignee, description=description,
|
||||||
issue_type=issue_type,
|
issue_type=issue_type,
|
||||||
integration_project_id=integration_project_id)
|
integration_project_id=integration_project_id)
|
||||||
except base_issue.RequestException as e:
|
except integration_base_issue.RequestException as e:
|
||||||
return base_issue.proxy_issues_handler(e)
|
return integration_base_issue.proxy_issues_handler(e)
|
||||||
if issue is None or "id" not in issue:
|
if issue is None or "id" not in issue:
|
||||||
return {"errors": ["something went wrong while creating the issue"]}
|
return {"errors": ["something went wrong while creating the issue"]}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
@ -1,10 +1,9 @@
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.utils.storage import StorageClient
|
from chalicelib.utils.storage import StorageClient
|
||||||
|
|
||||||
|
|
||||||
def get_devtools_keys(project_id, session_id):
|
def __get_devtools_keys(project_id, session_id):
|
||||||
params = {
|
params = {
|
||||||
"sessionId": session_id,
|
"sessionId": session_id,
|
||||||
"projectId": project_id
|
"projectId": project_id
|
||||||
|
|
@ -14,9 +13,9 @@ def get_devtools_keys(project_id, session_id):
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True):
|
def get_urls(session_id, project_id, check_existence: bool = True):
|
||||||
results = []
|
results = []
|
||||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||||
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
||||||
continue
|
continue
|
||||||
results.append(StorageClient.get_presigned_url_for_sharing(
|
results.append(StorageClient.get_presigned_url_for_sharing(
|
||||||
|
|
@ -29,5 +28,5 @@ def get_urls(session_id, project_id, context: schemas.CurrentContext, check_exis
|
||||||
|
|
||||||
def delete_mobs(project_id, session_ids):
|
def delete_mobs(project_id, session_ids):
|
||||||
for session_id in session_ids:
|
for session_id in session_ids:
|
||||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||||
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
||||||
77
api/chalicelib/core/sessions_metas.py
Normal file
77
api/chalicelib/core/sessions_metas.py
Normal file
|
|
@ -0,0 +1,77 @@
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import autocomplete
|
||||||
|
from chalicelib.utils.event_filter_definition import SupportedFilter
|
||||||
|
|
||||||
|
SUPPORTED_TYPES = {
|
||||||
|
schemas.FilterType.USER_OS: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||||
|
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||||
|
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||||
|
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||||
|
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||||
|
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||||
|
schemas.FilterType.USER_ID: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||||
|
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||||
|
schemas.FilterType.REV_ID: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||||
|
schemas.FilterType.REFERRER: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||||
|
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||||
|
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||||
|
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||||
|
# IOS
|
||||||
|
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||||
|
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(
|
||||||
|
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||||
|
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||||
|
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||||
|
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||||
|
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||||
|
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||||
|
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
||||||
|
rows = []
|
||||||
|
if meta_type not in list(SUPPORTED_TYPES.keys()):
|
||||||
|
return {"errors": ["unsupported type"]}
|
||||||
|
rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
|
||||||
|
# for IOS events autocomplete
|
||||||
|
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
||||||
|
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
||||||
|
return {"data": rows}
|
||||||
|
|
@ -4,8 +4,8 @@ from urllib.parse import urljoin
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.collaborations.collaboration_msteams import MSTeams
|
from chalicelib.core.collaboration_msteams import MSTeams
|
||||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
from chalicelib.core.collaboration_slack import Slack
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
@ -30,7 +30,6 @@ def get_note(tenant_id, project_id, user_id, note_id, share=None):
|
||||||
row = helper.dict_to_camel_case(row)
|
row = helper.dict_to_camel_case(row)
|
||||||
if row:
|
if row:
|
||||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||||
row["updatedAt"] = TimeUTC.datetime_to_timestamp(row["updatedAt"])
|
|
||||||
return row
|
return row
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -57,74 +56,41 @@ def get_session_notes(tenant_id, project_id, session_id, user_id):
|
||||||
|
|
||||||
def get_all_notes_by_project_id(tenant_id, project_id, user_id, data: schemas.SearchNoteSchema):
|
def get_all_notes_by_project_id(tenant_id, project_id, user_id, data: schemas.SearchNoteSchema):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
# base conditions
|
conditions = ["sessions_notes.project_id = %(project_id)s", "sessions_notes.deleted_at IS NULL"]
|
||||||
conditions = [
|
extra_params = {}
|
||||||
"sessions_notes.project_id = %(project_id)s",
|
if data.tags and len(data.tags) > 0:
|
||||||
"sessions_notes.deleted_at IS NULL"
|
k = "tag_value"
|
||||||
]
|
|
||||||
params = {"project_id": project_id, "user_id": user_id, "tenant_id": tenant_id}
|
|
||||||
|
|
||||||
# tag conditions
|
|
||||||
if data.tags:
|
|
||||||
tag_key = "tag_value"
|
|
||||||
conditions.append(
|
conditions.append(
|
||||||
sh.multi_conditions(f"%({tag_key})s = sessions_notes.tag", data.tags, value_key=tag_key)
|
sh.multi_conditions(f"%({k})s = sessions_notes.tag", data.tags, value_key=k))
|
||||||
)
|
extra_params = sh.multi_values(data.tags, value_key=k)
|
||||||
params.update(sh.multi_values(data.tags, value_key=tag_key))
|
|
||||||
|
|
||||||
# filter by ownership or shared status
|
|
||||||
if data.shared_only:
|
if data.shared_only:
|
||||||
conditions.append("sessions_notes.is_public IS TRUE")
|
conditions.append("sessions_notes.is_public")
|
||||||
elif data.mine_only:
|
elif data.mine_only:
|
||||||
conditions.append("sessions_notes.user_id = %(user_id)s")
|
conditions.append("sessions_notes.user_id = %(user_id)s")
|
||||||
else:
|
else:
|
||||||
conditions.append("(sessions_notes.user_id = %(user_id)s OR sessions_notes.is_public)")
|
conditions.append("(sessions_notes.user_id = %(user_id)s OR sessions_notes.is_public)")
|
||||||
|
query = cur.mogrify(f"""SELECT COUNT(1) OVER () AS full_count, sessions_notes.*, users.name AS user_name
|
||||||
# search condition
|
FROM sessions_notes INNER JOIN users USING (user_id)
|
||||||
if data.search:
|
WHERE {" AND ".join(conditions)}
|
||||||
conditions.append("sessions_notes.message ILIKE %(search)s")
|
ORDER BY created_at {data.order}
|
||||||
params["search"] = f"%{data.search}%"
|
LIMIT {data.limit} OFFSET {data.limit * (data.page - 1)};""",
|
||||||
|
{"project_id": project_id, "user_id": user_id, "tenant_id": tenant_id, **extra_params})
|
||||||
query = f"""
|
|
||||||
SELECT
|
|
||||||
COUNT(1) OVER () AS full_count,
|
|
||||||
sessions_notes.*,
|
|
||||||
users.name AS user_name
|
|
||||||
FROM
|
|
||||||
sessions_notes
|
|
||||||
INNER JOIN
|
|
||||||
users USING (user_id)
|
|
||||||
WHERE
|
|
||||||
{" AND ".join(conditions)}
|
|
||||||
ORDER BY
|
|
||||||
created_at {data.order}
|
|
||||||
LIMIT
|
|
||||||
%(limit)s OFFSET %(offset)s;
|
|
||||||
"""
|
|
||||||
params.update({
|
|
||||||
"limit": data.limit,
|
|
||||||
"offset": data.limit * (data.page - 1)
|
|
||||||
})
|
|
||||||
|
|
||||||
query = cur.mogrify(query, params)
|
|
||||||
logger.debug(query)
|
logger.debug(query)
|
||||||
cur.execute(query)
|
cur.execute(query=query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
|
|
||||||
result = {"count": 0, "notes": helper.list_to_camel_case(rows)}
|
result = {"count": 0, "notes": helper.list_to_camel_case(rows)}
|
||||||
if rows:
|
if len(rows) > 0:
|
||||||
result["count"] = rows[0]["fullCount"]
|
result["count"] = rows[0]["fullCount"]
|
||||||
for row in rows:
|
for row in rows:
|
||||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||||
row.pop("fullCount")
|
row.pop("fullCount")
|
||||||
|
|
||||||
return result
|
return result
|
||||||
|
|
||||||
|
|
||||||
def create(tenant_id, user_id, project_id, session_id, data: schemas.SessionNoteSchema):
|
def create(tenant_id, user_id, project_id, session_id, data: schemas.SessionNoteSchema):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(f"""INSERT INTO public.sessions_notes (message, user_id, tag, session_id, project_id, timestamp, is_public, thumbnail, start_at, end_at)
|
query = cur.mogrify(f"""INSERT INTO public.sessions_notes (message, user_id, tag, session_id, project_id, timestamp, is_public)
|
||||||
VALUES (%(message)s, %(user_id)s, %(tag)s, %(session_id)s, %(project_id)s, %(timestamp)s, %(is_public)s, %(thumbnail)s, %(start_at)s, %(end_at)s)
|
VALUES (%(message)s, %(user_id)s, %(tag)s, %(session_id)s, %(project_id)s, %(timestamp)s, %(is_public)s)
|
||||||
RETURNING *,(SELECT name FROM users WHERE users.user_id=%(user_id)s) AS user_name;""",
|
RETURNING *,(SELECT name FROM users WHERE users.user_id=%(user_id)s) AS user_name;""",
|
||||||
{"user_id": user_id, "project_id": project_id, "session_id": session_id,
|
{"user_id": user_id, "project_id": project_id, "session_id": session_id,
|
||||||
**data.model_dump()})
|
**data.model_dump()})
|
||||||
|
|
@ -145,8 +111,6 @@ def edit(tenant_id, user_id, project_id, note_id, data: schemas.SessionUpdateNot
|
||||||
sub_query.append("is_public = %(is_public)s")
|
sub_query.append("is_public = %(is_public)s")
|
||||||
if data.timestamp is not None:
|
if data.timestamp is not None:
|
||||||
sub_query.append("timestamp = %(timestamp)s")
|
sub_query.append("timestamp = %(timestamp)s")
|
||||||
|
|
||||||
sub_query.append("updated_at = timezone('utc'::text, now())")
|
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(f"""UPDATE public.sessions_notes
|
cur.mogrify(f"""UPDATE public.sessions_notes
|
||||||
|
|
@ -1,10 +1,8 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import events, metadata, events_mobile, \
|
from chalicelib.core import events, metadata, events_mobile, \
|
||||||
issues, assist, canvas, user_testing
|
sessions_mobs, issues, assist, sessions_devtool, canvas, user_testing
|
||||||
from . import sessions_mobs, sessions_devtool
|
from chalicelib.utils import errors_helper
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.core.modules import MOB_KEY, get_file_key
|
|
||||||
|
|
||||||
|
|
||||||
def __is_mobile_session(platform):
|
def __is_mobile_session(platform):
|
||||||
|
|
@ -22,7 +20,6 @@ def __group_metadata(session, project_metadata):
|
||||||
|
|
||||||
def get_pre_replay(project_id, session_id):
|
def get_pre_replay(project_id, session_id):
|
||||||
return {
|
return {
|
||||||
**get_file_key(project_id=project_id, session_id=session_id),
|
|
||||||
'domURL': [sessions_mobs.get_first_url(project_id=project_id, session_id=session_id, check_existence=False)]}
|
'domURL': [sessions_mobs.get_first_url(project_id=project_id, session_id=session_id, check_existence=False)]}
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -44,7 +41,6 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
||||||
SELECT
|
SELECT
|
||||||
s.*,
|
s.*,
|
||||||
s.session_id::text AS session_id,
|
s.session_id::text AS session_id,
|
||||||
{MOB_KEY}
|
|
||||||
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
(SELECT project_key FROM public.projects WHERE project_id = %(project_id)s LIMIT 1) AS project_key
|
||||||
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
{"," if len(extra_query) > 0 else ""}{",".join(extra_query)}
|
||||||
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
{(",json_build_object(" + ",".join([f"'{m}',p.{m}" for m in metadata.column_names()]) + ") AS project_metadata") if group_metadata else ''}
|
||||||
|
|
@ -66,7 +62,7 @@ def get_replay(project_id, session_id, context: schemas.CurrentContext, full_dat
|
||||||
else:
|
else:
|
||||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id, check_existence=False)
|
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id, check_existence=False)
|
||||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id,
|
||||||
context=context, check_existence=False)
|
check_existence=False)
|
||||||
data['canvasURL'] = canvas.get_canvas_presigned_urls(session_id=session_id, project_id=project_id)
|
data['canvasURL'] = canvas.get_canvas_presigned_urls(session_id=session_id, project_id=project_id)
|
||||||
if user_testing.has_test_signals(session_id=session_id, project_id=project_id):
|
if user_testing.has_test_signals(session_id=session_id, project_id=project_id):
|
||||||
data['utxVideo'] = user_testing.get_ux_webcam_signed_url(session_id=session_id,
|
data['utxVideo'] = user_testing.get_ux_webcam_signed_url(session_id=session_id,
|
||||||
|
|
@ -1,16 +1,21 @@
|
||||||
import logging
|
import logging
|
||||||
import math
|
|
||||||
import warnings
|
|
||||||
from collections import defaultdict
|
|
||||||
from typing import List
|
|
||||||
|
|
||||||
from psycopg2.extras import RealDictRow
|
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import events, metadata
|
from chalicelib.core import events, metadata
|
||||||
from chalicelib.utils import pg_client, helper
|
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
"""
|
||||||
|
todo: remove LIMIT from the query
|
||||||
|
"""
|
||||||
|
|
||||||
|
from typing import List
|
||||||
|
import math
|
||||||
|
import warnings
|
||||||
|
from collections import defaultdict
|
||||||
|
|
||||||
|
from psycopg2.extras import RealDictRow
|
||||||
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
SIGNIFICANCE_THRSH = 0.4
|
SIGNIFICANCE_THRSH = 0.4
|
||||||
# Taha: the value 24 was estimated in v1.15
|
# Taha: the value 24 was estimated in v1.15
|
||||||
|
|
@ -760,6 +765,30 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
||||||
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
||||||
|
|
||||||
|
|
||||||
|
def get_top_insights(filter_d: schemas.CardSeriesFilterSchema, project_id,
|
||||||
|
metric_format: schemas.MetricExtendedFormatType):
|
||||||
|
output = []
|
||||||
|
stages = filter_d.events
|
||||||
|
|
||||||
|
if len(stages) == 0:
|
||||||
|
logger.debug("no stages found")
|
||||||
|
return output, 0
|
||||||
|
|
||||||
|
# The result of the multi-stage query
|
||||||
|
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)
|
||||||
|
# Obtain the first part of the output
|
||||||
|
stages_list = get_stages(stages, rows, metric_format=metric_format)
|
||||||
|
if len(rows) == 0:
|
||||||
|
return stages_list, 0
|
||||||
|
|
||||||
|
# Obtain the second part of the output
|
||||||
|
total_drop_due_to_issues = get_issues(stages, rows,
|
||||||
|
first_stage=1,
|
||||||
|
last_stage=len(filter_d.events),
|
||||||
|
drop_only=True)
|
||||||
|
return stages_list, total_drop_due_to_issues
|
||||||
|
|
||||||
|
|
||||||
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
|
def get_issues_list(filter_d: schemas.CardSeriesFilterSchema, project_id, first_stage=None, last_stage=None):
|
||||||
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
|
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
|
||||||
stages = filter_d.events
|
stages = filter_d.events
|
||||||
|
|
@ -3,7 +3,7 @@ from urllib.parse import urlparse
|
||||||
import requests
|
import requests
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
from chalicelib.core.sourcemaps import sourcemaps_parser
|
from chalicelib.core import sourcemaps_parser
|
||||||
from chalicelib.utils.storage import StorageClient, generators
|
from chalicelib.utils.storage import StorageClient, generators
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -18,7 +18,7 @@ def refresh_spot_jwt_iat_jti(user_id):
|
||||||
{"user_id": user_id})
|
{"user_id": user_id})
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
return users.RefreshSpotJWTs(**row)
|
return row.get("spot_jwt_iat"), row.get("spot_jwt_refresh_jti"), row.get("spot_jwt_refresh_iat")
|
||||||
|
|
||||||
|
|
||||||
def logout(user_id: int):
|
def logout(user_id: int):
|
||||||
|
|
@ -26,13 +26,13 @@ def logout(user_id: int):
|
||||||
|
|
||||||
|
|
||||||
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
||||||
j = refresh_spot_jwt_iat_jti(user_id=user_id)
|
spot_jwt_iat, spot_jwt_r_jti, spot_jwt_r_iat = refresh_spot_jwt_iat_jti(user_id=user_id)
|
||||||
return {
|
return {
|
||||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_iat,
|
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_iat,
|
||||||
aud=AUDIENCE, for_spot=True),
|
aud=AUDIENCE, for_spot=True),
|
||||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_refresh_iat,
|
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_r_iat,
|
||||||
aud=AUDIENCE, jwt_jti=j.spot_jwt_refresh_jti, for_spot=True),
|
aud=AUDIENCE, jwt_jti=spot_jwt_r_jti, for_spot=True),
|
||||||
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (j.spot_jwt_iat - j.spot_jwt_refresh_iat)
|
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (spot_jwt_iat - spot_jwt_r_iat)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue