Compare commits
236 commits
main
...
assist-api
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
e0cd1994fd | ||
|
|
33e2e8efb9 | ||
|
|
27f3ec5585 | ||
|
|
6c44970666 | ||
|
|
3177ac7229 | ||
|
|
8dbf7d8893 | ||
|
|
d1ed9564c2 | ||
|
|
620fc05d6c | ||
|
|
985ce2812c | ||
|
|
685f784691 | ||
|
|
fd39e03fd1 | ||
|
|
82a29f8623 | ||
|
|
48fdf2c9c9 | ||
|
|
e6d91430f2 | ||
|
|
73b7cbe261 | ||
|
|
27ed4ee6b4 | ||
|
|
59251af8c6 | ||
|
|
3ff994490e | ||
|
|
dfe9cd985d | ||
|
|
edafda9f97 | ||
|
|
c4e4c3d2cd | ||
|
|
f15e84086f | ||
|
|
a7b91ddcad | ||
|
|
17a10da4de | ||
|
|
f659b24838 | ||
|
|
a4a5ce6498 | ||
|
|
7836153fca | ||
|
|
2e85fc953a | ||
|
|
ab1b5c19ec | ||
|
|
5708aa4b39 | ||
|
|
fee79521a1 | ||
|
|
25adc0410d | ||
|
|
1f411662c0 | ||
|
|
bcb3b407e8 | ||
|
|
f25575a0a8 | ||
|
|
4cc30cdcea | ||
|
|
99b25420da | ||
|
|
7dc6c18520 | ||
|
|
31ff31d218 | ||
|
|
8e4292c965 | ||
|
|
3ef71f5044 | ||
|
|
8b617fcbd7 | ||
|
|
72acf77a1b | ||
|
|
70a10ea1d6 | ||
|
|
7c6a52aa73 | ||
|
|
b7d2d9d77a | ||
|
|
28cb2ba74c | ||
|
|
8280c8754c | ||
|
|
b41248571e | ||
|
|
b7df5c7f87 | ||
|
|
9c0f50b2fb | ||
|
|
96f58b94d5 | ||
|
|
df10fa706b | ||
|
|
471b860841 | ||
|
|
1b46863089 | ||
|
|
c103283766 | ||
|
|
373d71e4f3 | ||
|
|
cde427ae4c | ||
|
|
7cfef90cc8 | ||
|
|
04db655776 | ||
|
|
b91f5df89f | ||
|
|
7fd741348c | ||
|
|
2aaafa5b22 | ||
|
|
11f9b865cf | ||
|
|
60a691bbaf | ||
|
|
3f1f6c03f2 | ||
|
|
dcd19e3c83 | ||
|
|
ced855568f | ||
|
|
c8483df795 | ||
|
|
d544da0665 | ||
|
|
408c3122d3 | ||
|
|
c196736c3c | ||
|
|
d47542830f | ||
|
|
055ff8f64a | ||
|
|
2bf92f40f7 | ||
|
|
f0f78341e7 | ||
|
|
dbb805189f | ||
|
|
e32dbe2ee2 | ||
|
|
3272f5b9fd | ||
|
|
ea4e2ab198 | ||
|
|
990e1fa1c4 | ||
|
|
5ca97ceedd | ||
|
|
d3b8c35058 | ||
|
|
1b851a8b72 | ||
|
|
553e3f6045 | ||
|
|
3f73bae22f | ||
|
|
9160b42113 | ||
|
|
36e1a2fca2 | ||
|
|
cbbd480cca | ||
|
|
77ae0cac0e | ||
|
|
5771323800 | ||
|
|
aab8691cf5 | ||
|
|
d9ff3f4691 | ||
|
|
09c2ce0976 | ||
|
|
0141a42911 | ||
|
|
b55e44d450 | ||
|
|
f70cce7e23 | ||
|
|
8b3be469b6 | ||
|
|
dc975bc19a | ||
|
|
c1d51b98a2 | ||
|
|
5a51bfb984 | ||
|
|
b55b9e5515 | ||
|
|
af7b46516f | ||
|
|
05e0306823 | ||
|
|
77a8371543 | ||
|
|
e4406ad26b | ||
|
|
a8971d842b | ||
|
|
c003057cf0 | ||
|
|
586472c7dd | ||
|
|
ecb192f16e | ||
|
|
6dc585417f | ||
|
|
264444c92a | ||
|
|
b2fcd7094b | ||
|
|
f3b98dad8a | ||
|
|
c27213c65d | ||
|
|
f61c5e99b5 | ||
|
|
6412f14b08 | ||
|
|
0a620c6ba3 | ||
|
|
685741f039 | ||
|
|
4ee78e1a5c | ||
|
|
77735d9d72 | ||
|
|
e3065e0530 | ||
|
|
d9d4221ad3 | ||
|
|
0bbde3e75a | ||
|
|
7dec8bb943 | ||
|
|
c6a5ed6c3b | ||
|
|
99d62fa549 | ||
|
|
c0bb05bc0f | ||
|
|
70258e5c1d | ||
|
|
6ec146b24b | ||
|
|
9f464e3b41 | ||
|
|
e95bdab478 | ||
|
|
421b3d1dc5 | ||
|
|
437a25fb97 | ||
|
|
cb55a17227 | ||
|
|
9d160abda5 | ||
|
|
3758cf6565 | ||
|
|
9db5e2a8f7 | ||
|
|
e0dba41065 | ||
|
|
8fbaf25799 | ||
|
|
65072f607f | ||
|
|
cb4bf932c4 | ||
|
|
20b938365c | ||
|
|
8e68ebd52b | ||
|
|
293382ea85 | ||
|
|
ac35bf5179 | ||
|
|
eb610d1c21 | ||
|
|
ac0ccb2169 | ||
|
|
20a57d7ca1 | ||
|
|
856e716507 | ||
|
|
bb17f672fe | ||
|
|
d087736df0 | ||
|
|
ce546bcfa3 | ||
|
|
9f681aca45 | ||
|
|
0500f30d14 | ||
|
|
ec2c42c688 | ||
|
|
7f0bc100f5 | ||
|
|
522a985ef3 | ||
|
|
634d0e8a0f | ||
|
|
28b4fc7598 | ||
|
|
0d4c256ca8 | ||
|
|
35f63a8fb1 | ||
|
|
a4e96822ed | ||
|
|
96f984a76a | ||
|
|
5f15dfafe7 | ||
|
|
b9cca6b388 | ||
|
|
712f07988e | ||
|
|
08bddb3165 | ||
|
|
3efb879cdf | ||
|
|
ccf44fda70 | ||
|
|
ce525a4ccf | ||
|
|
c6299c4592 | ||
|
|
a371c79151 | ||
|
|
f59a8c24f4 | ||
|
|
8be6f63711 | ||
|
|
8ba35b1324 | ||
|
|
28dea3b225 | ||
|
|
666643a6ae | ||
|
|
4cf688f15c | ||
|
|
1e57c90449 | ||
|
|
c0678bab15 | ||
|
|
187a69a61a | ||
|
|
2e96a072e9 | ||
|
|
5a410e63b3 | ||
|
|
300a857a5c | ||
|
|
eba22e0efa | ||
|
|
664f6b9014 | ||
|
|
5bbd7cff10 | ||
|
|
6f172d4f01 | ||
|
|
829e1c8bde | ||
|
|
e7d309dadf | ||
|
|
4bac12308a | ||
|
|
2aba1d9a52 | ||
|
|
1f4e32e4f2 | ||
|
|
49f98967d6 | ||
|
|
356fa02094 | ||
|
|
a8e47e59ad | ||
|
|
c760d29fb4 | ||
|
|
d77a518cf0 | ||
|
|
e04c2aa251 | ||
|
|
e6eb41536d | ||
|
|
4b3ad60565 | ||
|
|
90669b0604 | ||
|
|
f4bf1b8960 | ||
|
|
70423c6d8e | ||
|
|
ae313c17d4 | ||
|
|
0e45fa53ad | ||
|
|
fe20f83130 | ||
|
|
d04e6686ca | ||
|
|
6adb45e15f | ||
|
|
a1337faeee | ||
|
|
7e065ab02f | ||
|
|
1e2dde09b4 | ||
|
|
3cdfe76134 | ||
|
|
39855651d5 | ||
|
|
dd469d2349 | ||
|
|
3d448320bf | ||
|
|
7b0771a581 | ||
|
|
988b396223 | ||
|
|
fa3b585785 | ||
|
|
91e0ebeb56 | ||
|
|
8e68eb9a20 | ||
|
|
13bd3d9121 | ||
|
|
048ae0913c | ||
|
|
73fff8b817 | ||
|
|
605fa96a34 | ||
|
|
2cb33d7894 | ||
|
|
15d427418d | ||
|
|
ed3e553726 | ||
|
|
7eace68de6 | ||
|
|
8009882cef | ||
|
|
7365d8639c | ||
|
|
4c967d4bc1 | ||
|
|
3fdf799bd7 | ||
|
|
9aca716e6b | ||
|
|
cf9ecdc9a4 |
360 changed files with 11437 additions and 6527 deletions
|
|
@ -47,6 +47,7 @@ runs:
|
||||||
"JWT_SECRET:.global.jwtSecret"
|
"JWT_SECRET:.global.jwtSecret"
|
||||||
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
|
"JWT_SPOT_REFRESH_SECRET:.chalice.env.JWT_SPOT_REFRESH_SECRET"
|
||||||
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
|
"JWT_SPOT_SECRET:.global.jwtSpotSecret"
|
||||||
|
"JWT_SECRET:.global.tokenSecret"
|
||||||
"LICENSE_KEY:.global.enterpriseEditionLicense"
|
"LICENSE_KEY:.global.enterpriseEditionLicense"
|
||||||
"MINIO_ACCESS_KEY:.global.s3.accessKey"
|
"MINIO_ACCESS_KEY:.global.s3.accessKey"
|
||||||
"MINIO_SECRET_KEY:.global.s3.secretKey"
|
"MINIO_SECRET_KEY:.global.s3.secretKey"
|
||||||
|
|
|
||||||
8
.github/workflows/patch-build-old.yaml
vendored
8
.github/workflows/patch-build-old.yaml
vendored
|
|
@ -8,11 +8,7 @@ on:
|
||||||
required: true
|
required: true
|
||||||
default: 'chalice,frontend'
|
default: 'chalice,frontend'
|
||||||
tag:
|
tag:
|
||||||
description: 'Tag to update.'
|
description: 'Tag to build patches from.'
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
|
||||||
required: true
|
required: true
|
||||||
type: string
|
type: string
|
||||||
|
|
||||||
|
|
@ -77,7 +73,7 @@ jobs:
|
||||||
- name: Get HEAD Commit ID
|
- name: Get HEAD Commit ID
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
- name: Define Branch Name
|
- name: Define Branch Name
|
||||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
run: echo "BRANCH_NAME=patch/main/${HEAD_COMMIT_ID}" >> $GITHUB_ENV
|
||||||
|
|
||||||
- name: Build
|
- name: Build
|
||||||
id: build-image
|
id: build-image
|
||||||
|
|
|
||||||
246
.github/workflows/patch-build.yaml
vendored
246
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||||
inputs:
|
inputs:
|
||||||
services:
|
services:
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
description: 'Comma separated names of services to build(in small letters).'
|
||||||
|
|
@ -19,20 +20,12 @@ jobs:
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 1
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||||
if: github.ref != 'refs/heads/main'
|
|
||||||
run: |
|
run: |
|
||||||
git remote -v
|
git pull --rebase origin main
|
||||||
git config --global user.email "action@github.com"
|
|
||||||
git config --global user.name "GitHub Action"
|
|
||||||
git config --global rebase.autoStash true
|
|
||||||
git fetch origin main:main
|
|
||||||
git rebase main
|
|
||||||
git log -3
|
|
||||||
|
|
||||||
- name: Downloading yq
|
- name: Downloading yq
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -55,8 +48,6 @@ jobs:
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
- uses: depot/setup-action@v1
|
||||||
env:
|
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
- name: Get HEAD Commit ID
|
- name: Get HEAD Commit ID
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
- name: Define Branch Name
|
- name: Define Branch Name
|
||||||
|
|
@ -74,168 +65,78 @@ jobs:
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
|
||||||
run: |
|
run: |
|
||||||
#!/bin/bash
|
set -exo pipefail
|
||||||
set -euo pipefail
|
git config --local user.email "action@github.com"
|
||||||
|
git config --local user.name "GitHub Action"
|
||||||
# Configuration
|
git checkout -b $BRANCH_NAME
|
||||||
readonly WORKING_DIR=$(pwd)
|
working_dir=$(pwd)
|
||||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
function image_version(){
|
||||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
local service=$1
|
||||||
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
# Initialize git configuration
|
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||||
setup_git() {
|
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||||
git config --local user.email "action@github.com"
|
echo $new_version
|
||||||
git config --local user.name "GitHub Action"
|
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||||
git checkout -b "$BRANCH_NAME"
|
|
||||||
}
|
}
|
||||||
|
function clone_msaas() {
|
||||||
# Get and increment image version
|
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||||
image_version() {
|
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||||
local service=$1
|
cd $MSAAS_REPO_FOLDER
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
||||||
local current_version new_version
|
git log -1
|
||||||
|
cd $MSAAS_REPO_FOLDER
|
||||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
bash git-init.sh
|
||||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
git checkout
|
||||||
echo "$new_version"
|
}
|
||||||
}
|
}
|
||||||
|
function build_managed() {
|
||||||
# Clone MSAAS repository if not exists
|
local service=$1
|
||||||
clone_msaas() {
|
local version=$2
|
||||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
echo building managed
|
||||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
clone_msaas
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
if [[ $service == 'chalice' ]]; then
|
||||||
cd openreplay && git fetch origin && git checkout main
|
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||||
git log -1
|
else
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||||
bash git-init.sh
|
fi
|
||||||
git checkout
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
# Checking for backend images
|
||||||
# Build managed services
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
build_managed() {
|
echo Services: "${{ github.event.inputs.services }}"
|
||||||
local service=$1
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
local version=$2
|
BUILD_SCRIPT_NAME="build.sh"
|
||||||
|
# Build FOSS
|
||||||
echo "Building managed service: $service"
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
clone_msaas
|
# Check if service is backend
|
||||||
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
if [[ $service == 'chalice' ]]; then
|
cd backend
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
foss_build_args="nil $SERVICE"
|
||||||
else
|
ee_build_args="ee $SERVICE"
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
else
|
||||||
fi
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
echo "Executing: $build_cmd"
|
version=$(image_version $SERVICE)
|
||||||
if ! eval "$build_cmd" 2>&1; then
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
echo "Build failed for $service"
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
exit 1
|
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
fi
|
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
}
|
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
# Build service with given arguments
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
build_service() {
|
else
|
||||||
local service=$1
|
build_managed $SERVICE $version
|
||||||
local version=$2
|
fi
|
||||||
local build_args=$3
|
cd $working_dir
|
||||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||||
|
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
git add $chart_path
|
||||||
echo "Executing: $command"
|
git commit -m "Increment $SERVICE chart version"
|
||||||
eval "$command"
|
git push --set-upstream origin $BRANCH_NAME
|
||||||
}
|
done
|
||||||
|
|
||||||
# Update chart version and commit changes
|
|
||||||
update_chart_version() {
|
|
||||||
local service=$1
|
|
||||||
local version=$2
|
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
|
|
||||||
# Ensure we're in the original working directory/repository
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
|
||||||
git add "$chart_path"
|
|
||||||
git commit -m "Increment $service chart version to $version"
|
|
||||||
git push --set-upstream origin "$BRANCH_NAME"
|
|
||||||
cd -
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
setup_git
|
|
||||||
|
|
||||||
# Get backend services list
|
|
||||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
|
||||||
|
|
||||||
# Parse services input (fix for GitHub Actions syntax)
|
|
||||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
|
||||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
|
||||||
|
|
||||||
# Process each service
|
|
||||||
for service in "${services[@]}"; do
|
|
||||||
echo "Processing service: $service"
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
|
||||||
|
|
||||||
# Determine build configuration based on service type
|
|
||||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
|
||||||
# Backend service
|
|
||||||
cd backend
|
|
||||||
foss_build_args="nil $service"
|
|
||||||
ee_build_args="ee $service"
|
|
||||||
else
|
|
||||||
# Non-backend service
|
|
||||||
case "$service" in
|
|
||||||
chalice | alerts | crons)
|
|
||||||
cd "$WORKING_DIR/api"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
cd "$service"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# Special build scripts for alerts/crons
|
|
||||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
|
||||||
build_script="build_${service}.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ee_build_args="ee"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get version and build
|
|
||||||
local version
|
|
||||||
version=$(image_version "$service")
|
|
||||||
|
|
||||||
# Build FOSS and EE versions
|
|
||||||
build_service "$service" "$version" "$foss_build_args"
|
|
||||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
|
||||||
|
|
||||||
# Build managed version for specific services
|
|
||||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
|
||||||
echo "Nothing to build in managed for service $service"
|
|
||||||
else
|
|
||||||
build_managed "$service" "$version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update chart and commit
|
|
||||||
update_chart_version "$service" "$version"
|
|
||||||
done
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
rm -f "$BACKEND_SERVICES_FILE"
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "Working directory: $WORKING_DIR"
|
|
||||||
# Run main function with all arguments
|
|
||||||
main "$SERVICES_INPUT"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: repo-sync/pull-request@v2
|
uses: repo-sync/pull-request@v2
|
||||||
|
|
@ -246,7 +147,8 @@ jobs:
|
||||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||||
pr_body: |
|
pr_body: |
|
||||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||||
Once this PR is merged, tag update job will run automatically.
|
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||||
|
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||||
|
|
||||||
# - name: Debug Job
|
# - name: Debug Job
|
||||||
# if: ${{ failure() }}
|
# if: ${{ failure() }}
|
||||||
|
|
|
||||||
18
.github/workflows/tracker-tests.yaml
vendored
18
.github/workflows/tracker-tests.yaml
vendored
|
|
@ -22,22 +22,14 @@ jobs:
|
||||||
- name: Cache tracker modules
|
- name: Cache tracker modules
|
||||||
uses: actions/cache@v3
|
uses: actions/cache@v3
|
||||||
with:
|
with:
|
||||||
path: tracker/tracker/node_modules
|
path: tracker/node_modules
|
||||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }}
|
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lock') }}
|
||||||
restore-keys: |
|
|
||||||
test_tracker_build{{ runner.OS }}-build-
|
|
||||||
test_tracker_build{{ runner.OS }}-
|
|
||||||
- name: Cache tracker-assist modules
|
|
||||||
uses: actions/cache@v3
|
|
||||||
with:
|
|
||||||
path: tracker/tracker-assist/node_modules
|
|
||||||
key: ${{ runner.OS }}-test_tracker_build-${{ hashFiles('**/bun.lockb') }}
|
|
||||||
restore-keys: |
|
restore-keys: |
|
||||||
test_tracker_build{{ runner.OS }}-build-
|
test_tracker_build{{ runner.OS }}-build-
|
||||||
test_tracker_build{{ runner.OS }}-
|
test_tracker_build{{ runner.OS }}-
|
||||||
- name: Setup Testing packages
|
- name: Setup Testing packages
|
||||||
run: |
|
run: |
|
||||||
cd tracker/tracker
|
cd tracker
|
||||||
bun install
|
bun install
|
||||||
- name: Jest tests
|
- name: Jest tests
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -47,10 +39,6 @@ jobs:
|
||||||
run: |
|
run: |
|
||||||
cd tracker/tracker
|
cd tracker/tracker
|
||||||
bun run build
|
bun run build
|
||||||
- name: (TA) Setup Testing packages
|
|
||||||
run: |
|
|
||||||
cd tracker/tracker-assist
|
|
||||||
bun install
|
|
||||||
- name: (TA) Jest tests
|
- name: (TA) Jest tests
|
||||||
run: |
|
run: |
|
||||||
cd tracker/tracker-assist
|
cd tracker/tracker-assist
|
||||||
|
|
|
||||||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,42 +1,35 @@
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_dispatch:
|
||||||
types: [closed]
|
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||||
branches:
|
inputs:
|
||||||
- main
|
services:
|
||||||
name: Release tag update --force
|
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||||
|
required: true
|
||||||
|
default: "false"
|
||||||
|
|
||||||
|
name: Force Push tag with main branch HEAD
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
name: Build Patch from main
|
name: Build Patch from main
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
env:
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Get latest release tag using GitHub API
|
|
||||||
id: get-latest-tag
|
|
||||||
run: |
|
|
||||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
|
||||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
|
||||||
| jq -r .tag_name)
|
|
||||||
|
|
||||||
# Fallback to git command if API doesn't return a tag
|
|
||||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
|
||||||
echo "Not found latest tag"
|
|
||||||
exit 100
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
|
||||||
echo "Latest tag: $LATEST_TAG"
|
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
- name: Set Remote with GITHUB_TOKEN
|
||||||
run: |
|
run: |
|
||||||
git config --unset http.https://github.com/.extraheader
|
git config --unset http.https://github.com/.extraheader
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||||
|
|
||||||
- name: Push main branch to tag
|
- name: Push main branch to tag
|
||||||
run: |
|
run: |
|
||||||
|
git fetch --tags
|
||||||
git checkout main
|
git checkout main
|
||||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
# - name: Debug Job
|
||||||
|
# if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# with:
|
||||||
|
# limit-access-to-actor: true
|
||||||
|
|
|
||||||
1
.gitignore
vendored
1
.gitignore
vendored
|
|
@ -7,3 +7,4 @@ node_modules
|
||||||
**/*.envrc
|
**/*.envrc
|
||||||
.idea
|
.idea
|
||||||
*.mob*
|
*.mob*
|
||||||
|
install-state.gz
|
||||||
|
|
|
||||||
11
api/Pipfile
11
api/Pipfile
|
|
@ -6,16 +6,15 @@ name = "pypi"
|
||||||
[packages]
|
[packages]
|
||||||
urllib3 = "==2.3.0"
|
urllib3 = "==2.3.0"
|
||||||
requests = "==2.32.3"
|
requests = "==2.32.3"
|
||||||
boto3 = "==1.36.12"
|
boto3 = "==1.37.21"
|
||||||
pyjwt = "==2.10.1"
|
pyjwt = "==2.10.1"
|
||||||
psycopg2-binary = "==2.9.10"
|
psycopg2-binary = "==2.9.10"
|
||||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
psycopg = {extras = ["pool", "binary"], version = "==3.2.6"}
|
||||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
|
||||||
clickhouse-connect = "==0.8.15"
|
clickhouse-connect = "==0.8.15"
|
||||||
elasticsearch = "==8.17.1"
|
elasticsearch = "==8.17.2"
|
||||||
jira = "==3.8.0"
|
jira = "==3.8.0"
|
||||||
cachetools = "==5.5.1"
|
cachetools = "==5.5.2"
|
||||||
fastapi = "==0.115.8"
|
fastapi = "==0.115.12"
|
||||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||||
python-decouple = "==3.8"
|
python-decouple = "==3.8"
|
||||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ from chalicelib.utils import helper
|
||||||
from chalicelib.utils import pg_client, ch_client
|
from chalicelib.utils import pg_client, ch_client
|
||||||
from crons import core_crons, core_dynamic_crons
|
from crons import core_crons, core_dynamic_crons
|
||||||
from routers import core, core_dynamic
|
from routers import core, core_dynamic
|
||||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_analytics
|
||||||
|
|
||||||
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
loglevel = config("LOGLEVEL", default=logging.WARNING)
|
||||||
print(f">Loglevel set to: {loglevel}")
|
print(f">Loglevel set to: {loglevel}")
|
||||||
|
|
@ -129,6 +129,6 @@ app.include_router(spot.public_app)
|
||||||
app.include_router(spot.app)
|
app.include_router(spot.app)
|
||||||
app.include_router(spot.app_apikey)
|
app.include_router(spot.app_apikey)
|
||||||
|
|
||||||
app.include_router(product_anaytics.public_app)
|
app.include_router(product_analytics.public_app, prefix="/pa")
|
||||||
app.include_router(product_anaytics.app)
|
app.include_router(product_analytics.app, prefix="/pa")
|
||||||
app.include_router(product_anaytics.app_apikey)
|
app.include_router(product_analytics.app_apikey, prefix="/pa")
|
||||||
|
|
|
||||||
|
|
@ -85,8 +85,7 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
return f"""(SELECT DISTINCT value, type
|
||||||
((SELECT DISTINCT value, type
|
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -102,7 +101,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5)) AS raw;"""
|
LIMIT 5);"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -327,7 +326,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(cur.mogrify(f"""\
|
cur.execute(cur.mogrify(f"""\
|
||||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
SELECT key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||||
|
|
|
||||||
|
|
@ -338,14 +338,14 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
SELECT details.error_id as error_id,
|
SELECT details.error_id as error_id,
|
||||||
name, message, users, total,
|
name, message, users, total,
|
||||||
sessions, last_occurrence, first_occurrence, chart
|
sessions, last_occurrence, first_occurrence, chart
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||||
COUNT(DISTINCT user_id) AS users,
|
COUNT(DISTINCT user_id) AS users,
|
||||||
COUNT(DISTINCT events.session_id) AS sessions,
|
COUNT(DISTINCT events.session_id) AS sessions,
|
||||||
MAX(created_at) AS max_datetime,
|
MAX(created_at) AS max_datetime,
|
||||||
MIN(created_at) AS min_datetime,
|
MIN(created_at) AS min_datetime,
|
||||||
COUNT(DISTINCT error_id)
|
COUNT(DISTINCT JSONExtractString(toString(`$properties`), 'error_id'))
|
||||||
OVER() AS total
|
OVER() AS total
|
||||||
FROM {MAIN_EVENTS_TABLE} AS events
|
FROM {MAIN_EVENTS_TABLE} AS events
|
||||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||||
|
|
@ -357,7 +357,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
GROUP BY error_id, name, message
|
GROUP BY error_id, name, message
|
||||||
ORDER BY {sort} {order}
|
ORDER BY {sort} {order}
|
||||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||||
INNER JOIN (SELECT error_id,
|
INNER JOIN (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||||
FROM {MAIN_EVENTS_TABLE}
|
FROM {MAIN_EVENTS_TABLE}
|
||||||
|
|
@ -366,7 +366,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
GROUP BY error_id) AS time_details
|
GROUP BY error_id) AS time_details
|
||||||
ON details.error_id=time_details.error_id
|
ON details.error_id=time_details.error_id
|
||||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
gs.generate_series AS timestamp,
|
gs.generate_series AS timestamp,
|
||||||
COUNT(DISTINCT session_id) AS count
|
COUNT(DISTINCT session_id) AS count
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||||
|
|
|
||||||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
"""SELECT username, token, url
|
"""SELECT username, token, url
|
||||||
FROM public.jira_cloud
|
FROM public.jira_cloud
|
||||||
WHERE user_id = %(user_id)s;""",
|
WHERE user_id=%(user_id)s;""",
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
data = helper.dict_to_camel_case(cur.fetchone())
|
data = helper.dict_to_camel_case(cur.fetchone())
|
||||||
|
|
@ -95,9 +95,10 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def add(self, username, token, url, obfuscate=False):
|
def add(self, username, token, url, obfuscate=False):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||||
|
RETURNING username, token, url;""",
|
||||||
{"user_id": self._user_id, "username": username,
|
{"user_id": self._user_id, "username": username,
|
||||||
"token": token, "url": url})
|
"token": token, "url": url})
|
||||||
)
|
)
|
||||||
|
|
@ -111,10 +112,9 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def delete(self):
|
def delete(self):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
DELETE
|
DELETE FROM public.jira_cloud
|
||||||
FROM public.jira_cloud
|
WHERE user_id=%(user_id)s;""",
|
||||||
WHERE user_id = %(user_id)s;""",
|
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
return {"state": "success"}
|
return {"state": "success"}
|
||||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
changes={
|
changes={
|
||||||
"username": data.username,
|
"username": data.username,
|
||||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||||
else self.integration["token"],
|
else self.integration.token,
|
||||||
"url": str(data.url)
|
"url": str(data.url)
|
||||||
},
|
},
|
||||||
obfuscate=True
|
obfuscate=True
|
||||||
|
|
|
||||||
|
|
@ -241,3 +241,25 @@ def get_colname_by_key(project_id, key):
|
||||||
return None
|
return None
|
||||||
|
|
||||||
return index_to_colname(meta_keys[key])
|
return index_to_colname(meta_keys[key])
|
||||||
|
|
||||||
|
|
||||||
|
def get_for_filters(project_id):
|
||||||
|
with pg_client.PostgresClient() as cur:
|
||||||
|
query = cur.mogrify(f"""SELECT {",".join(column_names())}
|
||||||
|
FROM public.projects
|
||||||
|
WHERE project_id = %(project_id)s
|
||||||
|
AND deleted_at ISNULL
|
||||||
|
LIMIT 1;""", {"project_id": project_id})
|
||||||
|
cur.execute(query=query)
|
||||||
|
metas = cur.fetchone()
|
||||||
|
results = []
|
||||||
|
if metas is not None:
|
||||||
|
for i, k in enumerate(metas.keys()):
|
||||||
|
if metas[k] is not None:
|
||||||
|
results.append({"id": f"meta_{i}",
|
||||||
|
"name": k,
|
||||||
|
"displayName": metas[k],
|
||||||
|
"possibleTypes": ["String"],
|
||||||
|
"autoCaptured": False,
|
||||||
|
"icon": None})
|
||||||
|
return {"total": len(results), "list": results}
|
||||||
|
|
|
||||||
|
|
@ -6,7 +6,7 @@ from chalicelib.utils import helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
|
||||||
def filter_stages(stages: List[schemas.SessionSearchEventSchema2]):
|
def filter_stages(stages: List[schemas.SessionSearchEventSchema]):
|
||||||
ALLOW_TYPES = [schemas.EventType.CLICK, schemas.EventType.INPUT,
|
ALLOW_TYPES = [schemas.EventType.CLICK, schemas.EventType.INPUT,
|
||||||
schemas.EventType.LOCATION, schemas.EventType.CUSTOM,
|
schemas.EventType.LOCATION, schemas.EventType.CUSTOM,
|
||||||
schemas.EventType.CLICK_MOBILE, schemas.EventType.INPUT_MOBILE,
|
schemas.EventType.CLICK_MOBILE, schemas.EventType.INPUT_MOBILE,
|
||||||
|
|
@ -15,10 +15,10 @@ def filter_stages(stages: List[schemas.SessionSearchEventSchema2]):
|
||||||
|
|
||||||
|
|
||||||
def __parse_events(f_events: List[dict]):
|
def __parse_events(f_events: List[dict]):
|
||||||
return [schemas.SessionSearchEventSchema2.parse_obj(e) for e in f_events]
|
return [schemas.SessionSearchEventSchema.parse_obj(e) for e in f_events]
|
||||||
|
|
||||||
|
|
||||||
def __fix_stages(f_events: List[schemas.SessionSearchEventSchema2]):
|
def __fix_stages(f_events: List[schemas.SessionSearchEventSchema]):
|
||||||
if f_events is None:
|
if f_events is None:
|
||||||
return
|
return
|
||||||
events = []
|
events = []
|
||||||
|
|
|
||||||
|
|
@ -160,7 +160,7 @@ s.start_ts,
|
||||||
s.duration"""
|
s.duration"""
|
||||||
|
|
||||||
|
|
||||||
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
def __get_1_url(location_condition: schemas.SessionSearchEventSchema | None, session_id: str, project_id: int,
|
||||||
start_time: int,
|
start_time: int,
|
||||||
end_time: int) -> str | None:
|
end_time: int) -> str | None:
|
||||||
full_args = {
|
full_args = {
|
||||||
|
|
@ -240,13 +240,13 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
|
||||||
value=[schemas.PlatformType.DESKTOP],
|
value=[schemas.PlatformType.DESKTOP],
|
||||||
operator=schemas.SearchEventOperator.IS))
|
operator=schemas.SearchEventOperator.IS))
|
||||||
if not location_condition:
|
if not location_condition:
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.LOCATION,
|
||||||
value=[],
|
value=[],
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
if no_click:
|
if no_click:
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.CLICK,
|
||||||
value=[],
|
value=[],
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
|
|
||||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||||
value=[0],
|
value=[0],
|
||||||
|
|
|
||||||
|
|
@ -24,8 +24,9 @@ def get_by_url(project_id, data: schemas.GetHeatMapPayloadSchema):
|
||||||
"main_events.`$event_name` = 'CLICK'",
|
"main_events.`$event_name` = 'CLICK'",
|
||||||
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
"isNotNull(JSON_VALUE(CAST(main_events.`$properties` AS String), '$.normalized_x'))"
|
||||||
]
|
]
|
||||||
|
if data.operator == schemas.SearchEventOperator.PATTERN:
|
||||||
if data.operator == schemas.SearchEventOperator.IS:
|
constraints.append("match(main_events.`$properties`.url_path'.:String,%(url)s)")
|
||||||
|
elif data.operator == schemas.SearchEventOperator.IS:
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') = %(url)s")
|
||||||
else:
|
else:
|
||||||
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
constraints.append("JSON_VALUE(CAST(main_events.`$properties` AS String), '$.url_path') ILIKE %(url)s")
|
||||||
|
|
@ -179,7 +180,7 @@ toUnixTimestamp(s.datetime)*1000 AS start_ts,
|
||||||
s.duration AS duration"""
|
s.duration AS duration"""
|
||||||
|
|
||||||
|
|
||||||
def __get_1_url(location_condition: schemas.SessionSearchEventSchema2 | None, session_id: str, project_id: int,
|
def __get_1_url(location_condition: schemas.SessionSearchEventSchema | None, session_id: str, project_id: int,
|
||||||
start_time: int,
|
start_time: int,
|
||||||
end_time: int) -> str | None:
|
end_time: int) -> str | None:
|
||||||
full_args = {
|
full_args = {
|
||||||
|
|
@ -262,13 +263,13 @@ def search_short_session(data: schemas.HeatMapSessionsSearch, project_id, user_i
|
||||||
value=[schemas.PlatformType.DESKTOP],
|
value=[schemas.PlatformType.DESKTOP],
|
||||||
operator=schemas.SearchEventOperator.IS))
|
operator=schemas.SearchEventOperator.IS))
|
||||||
if not location_condition:
|
if not location_condition:
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.LOCATION,
|
data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.LOCATION,
|
||||||
value=[],
|
value=[],
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
if no_click:
|
if no_click:
|
||||||
data.events.append(schemas.SessionSearchEventSchema2(type=schemas.EventType.CLICK,
|
data.events.append(schemas.SessionSearchEventSchema(type=schemas.EventType.CLICK,
|
||||||
value=[],
|
value=[],
|
||||||
operator=schemas.SearchEventOperator.IS_ANY))
|
operator=schemas.SearchEventOperator.IS_ANY))
|
||||||
|
|
||||||
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
data.filters.append(schemas.SessionSearchFilterSchema(type=schemas.FilterType.EVENTS_COUNT,
|
||||||
value=[0],
|
value=[0],
|
||||||
|
|
|
||||||
|
|
@ -241,7 +241,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
||||||
:return:
|
:return:
|
||||||
"""
|
"""
|
||||||
|
|
||||||
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events
|
stages: List[schemas.SessionSearchEventSchema] = filter_d.events
|
||||||
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
||||||
|
|
||||||
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
|
stage_constraints = ["main.timestamp <= %(endTimestamp)s"]
|
||||||
|
|
|
||||||
|
|
@ -15,7 +15,7 @@ logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
|
def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas.ProjectContext,
|
||||||
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
|
metric_format: schemas.MetricExtendedFormatType) -> List[RealDictRow]:
|
||||||
stages: List[schemas.SessionSearchEventSchema2] = filter_d.events
|
stages: List[schemas.SessionSearchEventSchema] = filter_d.events
|
||||||
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
filters: List[schemas.SessionSearchFilterSchema] = filter_d.filters
|
||||||
platform = project.platform
|
platform = project.platform
|
||||||
constraints = ["e.project_id = %(project_id)s",
|
constraints = ["e.project_id = %(project_id)s",
|
||||||
|
|
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
from chalicelib.utils.ch_client import ClickHouseClient
|
|
||||||
|
|
||||||
|
|
||||||
def search_events(project_id: int, data: dict):
|
|
||||||
with ClickHouseClient() as ch_client:
|
|
||||||
r = ch_client.format(
|
|
||||||
"""SELECT *
|
|
||||||
FROM taha.events
|
|
||||||
WHERE project_id=%(project_id)s
|
|
||||||
ORDER BY created_at;""",
|
|
||||||
params={"project_id": project_id})
|
|
||||||
x = ch_client.execute(r)
|
|
||||||
|
|
||||||
return x
|
|
||||||
0
api/chalicelib/core/product_analytics/__init__.py
Normal file
0
api/chalicelib/core/product_analytics/__init__.py
Normal file
139
api/chalicelib/core/product_analytics/events.py
Normal file
139
api/chalicelib/core/product_analytics/events.py
Normal file
|
|
@ -0,0 +1,139 @@
|
||||||
|
import logging
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.utils import helper
|
||||||
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
from chalicelib.utils.ch_client import ClickHouseClient
|
||||||
|
from chalicelib.utils.exp_ch_helper import get_sub_condition
|
||||||
|
|
||||||
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
||||||
|
def get_events(project_id: int, page: schemas.PaginatedSchema):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT DISTINCT ON(event_name,auto_captured)
|
||||||
|
COUNT(1) OVER () AS total,
|
||||||
|
event_name AS name, display_name, description,
|
||||||
|
auto_captured
|
||||||
|
FROM product_analytics.all_events
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
ORDER BY auto_captured,display_name
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
parameters={"project_id": project_id, "limit": page.limit, "offset": (page.page - 1) * page.limit})
|
||||||
|
rows = ch_client.execute(r)
|
||||||
|
if len(rows) == 0:
|
||||||
|
return {"total": 0, "list": []}
|
||||||
|
total = rows[0]["total"]
|
||||||
|
for i, row in enumerate(rows):
|
||||||
|
row["id"] = f"event_{i}"
|
||||||
|
row["icon"] = None
|
||||||
|
row["possibleTypes"] = ["string"]
|
||||||
|
row.pop("total")
|
||||||
|
return {"total": total, "list": helper.list_to_camel_case(rows)}
|
||||||
|
|
||||||
|
|
||||||
|
def search_events(project_id: int, data: schemas.EventsSearchPayloadSchema):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
full_args = {"project_id": project_id, "startDate": data.startTimestamp, "endDate": data.endTimestamp,
|
||||||
|
"projectId": project_id, "limit": data.limit, "offset": (data.page - 1) * data.limit}
|
||||||
|
|
||||||
|
constraints = ["project_id = %(projectId)s",
|
||||||
|
"created_at >= toDateTime(%(startDate)s/1000)",
|
||||||
|
"created_at <= toDateTime(%(endDate)s/1000)"]
|
||||||
|
ev_constraints = []
|
||||||
|
for i, f in enumerate(data.filters):
|
||||||
|
if not f.is_event:
|
||||||
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||||
|
f_k = f"f_value{i}"
|
||||||
|
full_args = {**full_args, f_k: sh.single_value(f.value), **sh.multi_values(f.value, value_key=f_k)}
|
||||||
|
is_any = sh.isAny_opreator(f.operator)
|
||||||
|
is_undefined = sh.isUndefined_operator(f.operator)
|
||||||
|
full_args = {**full_args, f_k: sh.single_value(f.value), **sh.multi_values(f.value, value_key=f_k)}
|
||||||
|
if f.is_predefined:
|
||||||
|
column = f.name
|
||||||
|
else:
|
||||||
|
column = f"properties.{f.name}"
|
||||||
|
|
||||||
|
if is_any:
|
||||||
|
condition = f"notEmpty{column})"
|
||||||
|
elif is_undefined:
|
||||||
|
condition = f"empty({column})"
|
||||||
|
else:
|
||||||
|
condition = sh.multi_conditions(
|
||||||
|
get_sub_condition(col_name=column, val_name=f_k, operator=f.operator),
|
||||||
|
values=f.value, value_key=f_k)
|
||||||
|
constraints.append(condition)
|
||||||
|
|
||||||
|
else:
|
||||||
|
e_k = f"e_value{i}"
|
||||||
|
full_args = {**full_args, e_k: f.name}
|
||||||
|
condition = f"`$event_name` = %({e_k})s"
|
||||||
|
sub_conditions = []
|
||||||
|
for j, ef in enumerate(f.properties.filters):
|
||||||
|
p_k = f"e_{i}_p_{j}"
|
||||||
|
full_args = {**full_args, **sh.multi_values(ef.value, value_key=p_k)}
|
||||||
|
if ef.is_predefined:
|
||||||
|
sub_condition = get_sub_condition(col_name=ef.name, val_name=p_k, operator=ef.operator)
|
||||||
|
else:
|
||||||
|
sub_condition = get_sub_condition(col_name=f"properties.{ef.name}",
|
||||||
|
val_name=p_k, operator=ef.operator)
|
||||||
|
sub_conditions.append(sh.multi_conditions(sub_condition, ef.value, value_key=p_k))
|
||||||
|
if len(sub_conditions) > 0:
|
||||||
|
condition += " AND (" + (" " + f.properties.operator + " ").join(sub_conditions) + ")"
|
||||||
|
|
||||||
|
ev_constraints.append(condition)
|
||||||
|
|
||||||
|
constraints.append("(" + " OR ".join(ev_constraints) + ")")
|
||||||
|
query = ch_client.format(
|
||||||
|
f"""SELECT COUNT(1) OVER () AS total,
|
||||||
|
event_id,
|
||||||
|
`$event_name`,
|
||||||
|
created_at,
|
||||||
|
`distinct_id`,
|
||||||
|
`$browser`,
|
||||||
|
`$import`,
|
||||||
|
`$os`,
|
||||||
|
`$country`,
|
||||||
|
`$state`,
|
||||||
|
`$city`,
|
||||||
|
`$screen_height`,
|
||||||
|
`$screen_width`,
|
||||||
|
`$source`,
|
||||||
|
`$user_id`,
|
||||||
|
`$device`
|
||||||
|
FROM product_analytics.events
|
||||||
|
WHERE {" AND ".join(constraints)}
|
||||||
|
ORDER BY created_at
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
parameters=full_args)
|
||||||
|
rows = ch_client.execute(query)
|
||||||
|
if len(rows) == 0:
|
||||||
|
return {"total": 0, "rows": [], "src": 2}
|
||||||
|
total = rows[0]["total"]
|
||||||
|
for r in rows:
|
||||||
|
r.pop("total")
|
||||||
|
return {"total": total, "rows": rows, "src": 2}
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexicon(project_id: int, page: schemas.PaginatedSchema):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT COUNT(1) OVER () AS total,
|
||||||
|
all_events.event_name AS name,
|
||||||
|
*
|
||||||
|
FROM product_analytics.all_events
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
ORDER BY display_name
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
parameters={"project_id": project_id, "limit": page.limit, "offset": (page.page - 1) * page.limit})
|
||||||
|
rows = ch_client.execute(r)
|
||||||
|
if len(rows) == 0:
|
||||||
|
return {"total": 0, "list": []}
|
||||||
|
total = rows[0]["total"]
|
||||||
|
for i, row in enumerate(rows):
|
||||||
|
row["id"] = f"event_{i}"
|
||||||
|
row["icon"] = None
|
||||||
|
row["possibleTypes"] = ["string"]
|
||||||
|
row.pop("total")
|
||||||
|
return {"total": total, "list": helper.list_to_camel_case(rows)}
|
||||||
83
api/chalicelib/core/product_analytics/properties.py
Normal file
83
api/chalicelib/core/product_analytics/properties.py
Normal file
|
|
@ -0,0 +1,83 @@
|
||||||
|
from chalicelib.utils import helper, exp_ch_helper
|
||||||
|
from chalicelib.utils.ch_client import ClickHouseClient
|
||||||
|
import schemas
|
||||||
|
|
||||||
|
|
||||||
|
def get_all_properties(project_id: int, page: schemas.PaginatedSchema):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT COUNT(1) OVER () AS total,
|
||||||
|
property_name AS name, display_name,
|
||||||
|
array_agg(DISTINCT event_properties.value_type) AS possible_types
|
||||||
|
FROM product_analytics.all_properties
|
||||||
|
LEFT JOIN product_analytics.event_properties USING (project_id, property_name)
|
||||||
|
WHERE all_properties.project_id=%(project_id)s
|
||||||
|
GROUP BY property_name,display_name
|
||||||
|
ORDER BY display_name
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
parameters={"project_id": project_id,
|
||||||
|
"limit": page.limit,
|
||||||
|
"offset": (page.page - 1) * page.limit})
|
||||||
|
properties = ch_client.execute(r)
|
||||||
|
if len(properties) == 0:
|
||||||
|
return {"total": 0, "list": []}
|
||||||
|
total = properties[0]["total"]
|
||||||
|
properties = helper.list_to_camel_case(properties)
|
||||||
|
for i, p in enumerate(properties):
|
||||||
|
p["id"] = f"prop_{i}"
|
||||||
|
p["icon"] = None
|
||||||
|
p["possibleTypes"] = exp_ch_helper.simplify_clickhouse_types(p["possibleTypes"])
|
||||||
|
p.pop("total")
|
||||||
|
return {"total": total, "list": properties}
|
||||||
|
|
||||||
|
|
||||||
|
def get_event_properties(project_id: int, event_name):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT all_properties.property_name,
|
||||||
|
all_properties.display_name
|
||||||
|
FROM product_analytics.event_properties
|
||||||
|
INNER JOIN product_analytics.all_properties USING (property_name)
|
||||||
|
WHERE event_properties.project_id=%(project_id)s
|
||||||
|
AND all_properties.project_id=%(project_id)s
|
||||||
|
AND event_properties.event_name=%(event_name)s
|
||||||
|
ORDER BY created_at;""",
|
||||||
|
parameters={"project_id": project_id, "event_name": event_name})
|
||||||
|
properties = ch_client.execute(r)
|
||||||
|
|
||||||
|
return helper.list_to_camel_case(properties)
|
||||||
|
|
||||||
|
|
||||||
|
def get_lexicon(project_id: int, page: schemas.PaginatedSchema):
|
||||||
|
with ClickHouseClient() as ch_client:
|
||||||
|
r = ch_client.format(
|
||||||
|
"""SELECT COUNT(1) OVER () AS total,
|
||||||
|
all_properties.property_name AS name,
|
||||||
|
all_properties.*,
|
||||||
|
possible_types.values AS possible_types,
|
||||||
|
possible_values.values AS sample_values
|
||||||
|
FROM product_analytics.all_properties
|
||||||
|
LEFT JOIN (SELECT project_id, property_name, array_agg(DISTINCT value_type) AS values
|
||||||
|
FROM product_analytics.event_properties
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
GROUP BY 1, 2) AS possible_types
|
||||||
|
USING (project_id, property_name)
|
||||||
|
LEFT JOIN (SELECT project_id, property_name, array_agg(DISTINCT value) AS values
|
||||||
|
FROM product_analytics.property_values_samples
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
GROUP BY 1, 2) AS possible_values USING (project_id, property_name)
|
||||||
|
WHERE project_id=%(project_id)s
|
||||||
|
ORDER BY display_name
|
||||||
|
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||||
|
parameters={"project_id": project_id,
|
||||||
|
"limit": page.limit,
|
||||||
|
"offset": (page.page - 1) * page.limit})
|
||||||
|
properties = ch_client.execute(r)
|
||||||
|
if len(properties) == 0:
|
||||||
|
return {"total": 0, "list": []}
|
||||||
|
total = properties[0]["total"]
|
||||||
|
for i, p in enumerate(properties):
|
||||||
|
p["id"] = f"prop_{i}"
|
||||||
|
p["icon"] = None
|
||||||
|
p.pop("total")
|
||||||
|
return {"total": total, "list": helper.list_to_camel_case(properties)}
|
||||||
|
|
@ -6,8 +6,18 @@ logger = logging.getLogger(__name__)
|
||||||
from . import sessions_pg
|
from . import sessions_pg
|
||||||
from . import sessions_pg as sessions_legacy
|
from . import sessions_pg as sessions_legacy
|
||||||
from . import sessions_ch
|
from . import sessions_ch
|
||||||
|
from . import sessions_search_pg
|
||||||
|
from . import sessions_search_pg as sessions_search_legacy
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||||
|
logger.info(">>> Using experimental sessions search")
|
||||||
from . import sessions_ch as sessions
|
from . import sessions_ch as sessions
|
||||||
|
from . import sessions_search_ch as sessions_search
|
||||||
else:
|
else:
|
||||||
from . import sessions_pg as sessions
|
from . import sessions_pg as sessions
|
||||||
|
from . import sessions_search_pg as sessions_search
|
||||||
|
|
||||||
|
# if config("EXP_METRICS", cast=bool, default=False):
|
||||||
|
# from . import sessions_ch as sessions
|
||||||
|
# else:
|
||||||
|
# from . import sessions_pg as sessions
|
||||||
|
|
|
||||||
|
|
@ -6,6 +6,7 @@ from chalicelib.core import events, metadata
|
||||||
from . import performance_event, sessions_legacy
|
from . import performance_event, sessions_legacy
|
||||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
from chalicelib.utils.exp_ch_helper import get_sub_condition
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -48,8 +49,8 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
||||||
query = f"""SELECT gs.generate_series AS timestamp,
|
query = f"""SELECT gs.generate_series AS timestamp,
|
||||||
COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count
|
COALESCE(COUNT(DISTINCT processed_sessions.user_id),0) AS count
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||||
LEFT JOIN (SELECT multiIf(s.user_id IS NOT NULL AND s.user_id != '', s.user_id,
|
LEFT JOIN (SELECT multiIf(isNotNull(s.user_id) AND notEmpty(s.user_id), s.user_id,
|
||||||
s.user_anonymous_id IS NOT NULL AND s.user_anonymous_id != '',
|
isNotNull(s.user_anonymous_id) AND notEmpty(s.user_anonymous_id),
|
||||||
s.user_anonymous_id, toString(s.user_uuid)) AS user_id,
|
s.user_anonymous_id, toString(s.user_uuid)) AS user_id,
|
||||||
s.datetime AS datetime
|
s.datetime AS datetime
|
||||||
{query_part}) AS processed_sessions ON(TRUE)
|
{query_part}) AS processed_sessions ON(TRUE)
|
||||||
|
|
@ -148,12 +149,12 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
for e in data.events:
|
for e in data.events:
|
||||||
if e.type == schemas.EventType.LOCATION:
|
if e.type == schemas.EventType.LOCATION:
|
||||||
if e.operator not in extra_conditions:
|
if e.operator not in extra_conditions:
|
||||||
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
|
extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
|
||||||
"type": e.type,
|
"type": e.type,
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -173,12 +174,12 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
for e in data.events:
|
for e in data.events:
|
||||||
if e.type == schemas.EventType.REQUEST_DETAILS:
|
if e.type == schemas.EventType.REQUEST_DETAILS:
|
||||||
if e.operator not in extra_conditions:
|
if e.operator not in extra_conditions:
|
||||||
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
|
extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
|
||||||
"type": e.type,
|
"type": e.type,
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -253,7 +254,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
FROM (SELECT s.user_id AS user_id {extra_col}
|
FROM (SELECT s.user_id AS user_id {extra_col}
|
||||||
{query_part}
|
{query_part}
|
||||||
WHERE isNotNull(user_id)
|
WHERE isNotNull(user_id)
|
||||||
AND user_id != '') AS filtred_sessions
|
AND notEmpty(user_id)) AS filtred_sessions
|
||||||
{extra_where}
|
{extra_where}
|
||||||
GROUP BY {main_col}
|
GROUP BY {main_col}
|
||||||
ORDER BY total DESC
|
ORDER BY total DESC
|
||||||
|
|
@ -277,7 +278,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
return sessions
|
return sessions
|
||||||
|
|
||||||
|
|
||||||
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2):
|
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
|
||||||
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
||||||
schemas.EventType.GRAPHQL] \
|
schemas.EventType.GRAPHQL] \
|
||||||
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
||||||
|
|
@ -330,7 +331,11 @@ def json_condition(table_alias, json_column, json_key, op, values, value_key, ch
|
||||||
extract_func = "JSONExtractFloat" if numeric_type == "float" else "JSONExtractInt"
|
extract_func = "JSONExtractFloat" if numeric_type == "float" else "JSONExtractInt"
|
||||||
condition = f"{extract_func}(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
|
condition = f"{extract_func}(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
|
||||||
else:
|
else:
|
||||||
condition = f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
|
# condition = f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}') {op} %({value_key})s"
|
||||||
|
condition = get_sub_condition(
|
||||||
|
col_name=f"JSONExtractString(toString({table_alias}.`{json_column}`), '{json_key}')",
|
||||||
|
val_name=value_key, operator=op
|
||||||
|
)
|
||||||
|
|
||||||
conditions.append(sh.multi_conditions(condition, values, value_key=value_key))
|
conditions.append(sh.multi_conditions(condition, values, value_key=value_key))
|
||||||
|
|
||||||
|
|
@ -660,7 +665,8 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
event.value = helper.values_for_operator(value=event.value, op=event.operator)
|
event.value = helper.values_for_operator(value=event.value, op=event.operator)
|
||||||
full_args = {**full_args,
|
full_args = {**full_args,
|
||||||
**sh.multi_values(event.value, value_key=e_k),
|
**sh.multi_values(event.value, value_key=e_k),
|
||||||
**sh.multi_values(event.source, value_key=s_k)}
|
**sh.multi_values(event.source, value_key=s_k),
|
||||||
|
e_k: event.value[0] if len(event.value) > 0 else event.value}
|
||||||
|
|
||||||
if event_type == events.EventType.CLICK.ui_type:
|
if event_type == events.EventType.CLICK.ui_type:
|
||||||
event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main "
|
event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main "
|
||||||
|
|
@ -671,24 +677,44 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
events_conditions.append({"type": event_where[-1]})
|
events_conditions.append({"type": event_where[-1]})
|
||||||
if not is_any:
|
if not is_any:
|
||||||
if schemas.ClickEventExtraOperator.has_value(event.operator):
|
if schemas.ClickEventExtraOperator.has_value(event.operator):
|
||||||
event_where.append(json_condition(
|
# event_where.append(json_condition(
|
||||||
"main",
|
# "main",
|
||||||
"$properties",
|
# "$properties",
|
||||||
"selector", op, event.value, e_k)
|
# "selector", op, event.value, e_k)
|
||||||
|
# )
|
||||||
|
event_where.append(
|
||||||
|
sh.multi_conditions(
|
||||||
|
get_sub_condition(col_name=f"main.`$properties`.selector",
|
||||||
|
val_name=e_k, operator=event.operator),
|
||||||
|
event.value, value_key=e_k)
|
||||||
)
|
)
|
||||||
events_conditions[-1]["condition"] = event_where[-1]
|
events_conditions[-1]["condition"] = event_where[-1]
|
||||||
else:
|
else:
|
||||||
if is_not:
|
if is_not:
|
||||||
event_where.append(json_condition(
|
# event_where.append(json_condition(
|
||||||
"sub", "$properties", _column, op, event.value, e_k
|
# "sub", "$properties", _column, op, event.value, e_k
|
||||||
))
|
# ))
|
||||||
|
event_where.append(
|
||||||
|
sh.multi_conditions(
|
||||||
|
get_sub_condition(col_name=f"sub.`$properties`.{_column}",
|
||||||
|
val_name=e_k, operator=event.operator),
|
||||||
|
event.value, value_key=e_k)
|
||||||
|
)
|
||||||
events_conditions_not.append(
|
events_conditions_not.append(
|
||||||
{
|
{
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"
|
||||||
|
}
|
||||||
|
)
|
||||||
events_conditions_not[-1]["condition"] = event_where[-1]
|
events_conditions_not[-1]["condition"] = event_where[-1]
|
||||||
else:
|
else:
|
||||||
|
# event_where.append(
|
||||||
|
# json_condition("main", "$properties", _column, op, event.value, e_k)
|
||||||
|
# )
|
||||||
event_where.append(
|
event_where.append(
|
||||||
json_condition("main", "$properties", _column, op, event.value, e_k)
|
sh.multi_conditions(
|
||||||
|
get_sub_condition(col_name=f"main.`$properties`.{_column}",
|
||||||
|
val_name=e_k, operator=event.operator),
|
||||||
|
event.value, value_key=e_k)
|
||||||
)
|
)
|
||||||
events_conditions[-1]["condition"] = event_where[-1]
|
events_conditions[-1]["condition"] = event_where[-1]
|
||||||
else:
|
else:
|
||||||
|
|
@ -870,12 +896,15 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
events_conditions[-1]["condition"] = []
|
events_conditions[-1]["condition"] = []
|
||||||
if not is_any and event.value not in [None, "*", ""]:
|
if not is_any and event.value not in [None, "*", ""]:
|
||||||
event_where.append(
|
event_where.append(
|
||||||
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
|
sh.multi_conditions(
|
||||||
event.value, value_key=e_k))
|
f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
|
||||||
|
event.value, value_key=e_k))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
events_extra_join += f" AND {event_where[-1]}"
|
events_extra_join += f" AND {event_where[-1]}"
|
||||||
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
||||||
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k))
|
event_where.append(
|
||||||
|
sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source,
|
||||||
|
value_key=s_k))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
events_extra_join += f" AND {event_where[-1]}"
|
events_extra_join += f" AND {event_where[-1]}"
|
||||||
|
|
||||||
|
|
@ -1108,12 +1137,8 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
is_any = sh.isAny_opreator(f.operator)
|
is_any = sh.isAny_opreator(f.operator)
|
||||||
if is_any or len(f.value) == 0:
|
if is_any or len(f.value) == 0:
|
||||||
continue
|
continue
|
||||||
is_negative_operator = sh.is_negation_operator(f.operator)
|
|
||||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||||
op = sh.get_sql_operator(f.operator)
|
op = sh.get_sql_operator(f.operator)
|
||||||
r_op = ""
|
|
||||||
if is_negative_operator:
|
|
||||||
r_op = sh.reverse_sql_operator(op)
|
|
||||||
e_k_f = e_k + f"_fetch{j}"
|
e_k_f = e_k + f"_fetch{j}"
|
||||||
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
||||||
if f.type == schemas.FetchFilterType.FETCH_URL:
|
if f.type == schemas.FetchFilterType.FETCH_URL:
|
||||||
|
|
@ -1122,12 +1147,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.url_path {r_op} %({e_k_f})s", f.value, value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
||||||
event_where.append(json_condition(
|
event_where.append(json_condition(
|
||||||
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
||||||
|
|
@ -1140,13 +1159,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.method {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
||||||
event_where.append(
|
event_where.append(
|
||||||
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
||||||
|
|
@ -1159,26 +1171,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.request_body {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
||||||
event_where.append(json_condition(
|
event_where.append(json_condition(
|
||||||
"main", "$properties", 'response_body', op, f.value, e_k_f
|
"main", "$properties", 'response_body', op, f.value, e_k_f
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.response_body {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
else:
|
else:
|
||||||
logging.warning(f"undefined FETCH filter: {f.type}")
|
logging.warning(f"undefined FETCH filter: {f.type}")
|
||||||
if not apply:
|
if not apply:
|
||||||
|
|
@ -1222,8 +1220,35 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
else:
|
else:
|
||||||
logging.warning(f"undefined GRAPHQL filter: {f.type}")
|
logging.warning(f"undefined GRAPHQL filter: {f.type}")
|
||||||
events_conditions[-1]["condition"] = " AND ".join(events_conditions[-1]["condition"])
|
events_conditions[-1]["condition"] = " AND ".join(events_conditions[-1]["condition"])
|
||||||
|
elif event_type == schemas.EventType.EVENT:
|
||||||
|
event_from = event_from % f"{MAIN_EVENTS_TABLE} AS main "
|
||||||
|
_column = events.EventType.CLICK.column
|
||||||
|
event_where.append(f"main.`$event_name`=%({e_k})s AND main.session_id>0")
|
||||||
|
events_conditions.append({"type": event_where[-1], "condition": ""})
|
||||||
|
|
||||||
else:
|
else:
|
||||||
continue
|
continue
|
||||||
|
if event.properties is not None and len(event.properties.filters) > 0:
|
||||||
|
sub_conditions = []
|
||||||
|
for l, property in enumerate(event.properties.filters):
|
||||||
|
a_k = f"{e_k}_att_{l}"
|
||||||
|
full_args = {**full_args,
|
||||||
|
**sh.multi_values(property.value, value_key=a_k)}
|
||||||
|
|
||||||
|
if property.is_predefined:
|
||||||
|
condition = get_sub_condition(col_name=f"main.{property.name}",
|
||||||
|
val_name=a_k, operator=property.operator)
|
||||||
|
else:
|
||||||
|
condition = get_sub_condition(col_name=f"main.properties.{property.name}",
|
||||||
|
val_name=a_k, operator=property.operator)
|
||||||
|
event_where.append(
|
||||||
|
sh.multi_conditions(condition, property.value, value_key=a_k)
|
||||||
|
)
|
||||||
|
sub_conditions.append(event_where[-1])
|
||||||
|
if len(sub_conditions) > 0:
|
||||||
|
sub_conditions = (" " + event.properties.operator + " ").join(sub_conditions)
|
||||||
|
events_conditions[-1]["condition"] += " AND " if len(events_conditions[-1]["condition"]) > 0 else ""
|
||||||
|
events_conditions[-1]["condition"] += "(" + sub_conditions + ")"
|
||||||
if event_index == 0 or or_events:
|
if event_index == 0 or or_events:
|
||||||
event_where += ss_constraints
|
event_where += ss_constraints
|
||||||
if is_not:
|
if is_not:
|
||||||
|
|
@ -1426,30 +1451,17 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
if extra_conditions and len(extra_conditions) > 0:
|
if extra_conditions and len(extra_conditions) > 0:
|
||||||
_extra_or_condition = []
|
_extra_or_condition = []
|
||||||
for i, c in enumerate(extra_conditions):
|
for i, c in enumerate(extra_conditions):
|
||||||
if sh.isAny_opreator(c.operator) and c.type != schemas.EventType.REQUEST_DETAILS.value:
|
if sh.isAny_opreator(c.operator):
|
||||||
continue
|
continue
|
||||||
e_k = f"ec_value{i}"
|
e_k = f"ec_value{i}"
|
||||||
op = sh.get_sql_operator(c.operator)
|
op = sh.get_sql_operator(c.operator)
|
||||||
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
||||||
full_args = {**full_args,
|
full_args = {**full_args,
|
||||||
**sh.multi_values(c.value, value_key=e_k)}
|
**sh.multi_values(c.value, value_key=e_k)}
|
||||||
if c.type in (schemas.EventType.LOCATION.value, schemas.EventType.REQUEST.value):
|
if c.type == events.EventType.LOCATION.ui_type:
|
||||||
_extra_or_condition.append(
|
_extra_or_condition.append(
|
||||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||||
c.value, value_key=e_k))
|
c.value, value_key=e_k))
|
||||||
elif c.type == schemas.EventType.REQUEST_DETAILS.value:
|
|
||||||
for j, c_f in enumerate(c.filters):
|
|
||||||
if sh.isAny_opreator(c_f.operator) or len(c_f.value) == 0:
|
|
||||||
continue
|
|
||||||
e_k += f"_{j}"
|
|
||||||
op = sh.get_sql_operator(c_f.operator)
|
|
||||||
c_f.value = helper.values_for_operator(value=c_f.value, op=c_f.operator)
|
|
||||||
full_args = {**full_args,
|
|
||||||
**sh.multi_values(c_f.value, value_key=e_k)}
|
|
||||||
if c_f.type == schemas.FetchFilterType.FETCH_URL.value:
|
|
||||||
_extra_or_condition.append(
|
|
||||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
|
||||||
c_f.value, value_key=e_k))
|
|
||||||
else:
|
else:
|
||||||
logging.warning(f"unsupported extra_event type:${c.type}")
|
logging.warning(f"unsupported extra_event type:${c.type}")
|
||||||
if len(_extra_or_condition) > 0:
|
if len(_extra_or_condition) > 0:
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
import ast
|
import ast
|
||||||
import logging
|
import logging
|
||||||
from typing import List, Union
|
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import events, metadata, projects
|
from chalicelib.core import events, metadata, projects
|
||||||
|
|
@ -219,7 +218,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2):
|
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
|
||||||
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
||||||
schemas.EventType.GRAPHQL] \
|
schemas.EventType.GRAPHQL] \
|
||||||
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
||||||
|
|
@ -143,12 +143,12 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
for e in data.events:
|
for e in data.events:
|
||||||
if e.type == schemas.EventType.LOCATION:
|
if e.type == schemas.EventType.LOCATION:
|
||||||
if e.operator not in extra_conditions:
|
if e.operator not in extra_conditions:
|
||||||
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
|
extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
|
||||||
"type": e.type,
|
"type": e.type,
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -160,12 +160,12 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
for e in data.events:
|
for e in data.events:
|
||||||
if e.type == schemas.EventType.REQUEST_DETAILS:
|
if e.type == schemas.EventType.REQUEST_DETAILS:
|
||||||
if e.operator not in extra_conditions:
|
if e.operator not in extra_conditions:
|
||||||
extra_conditions[e.operator] = schemas.SessionSearchEventSchema2.model_validate({
|
extra_conditions[e.operator] = schemas.SessionSearchEventSchema.model_validate({
|
||||||
"type": e.type,
|
"type": e.type,
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -273,7 +273,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
return sessions
|
return sessions
|
||||||
|
|
||||||
|
|
||||||
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema2):
|
def __is_valid_event(is_any: bool, event: schemas.SessionSearchEventSchema):
|
||||||
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
return not (not is_any and len(event.value) == 0 and event.type not in [schemas.EventType.REQUEST_DETAILS,
|
||||||
schemas.EventType.GRAPHQL] \
|
schemas.EventType.GRAPHQL] \
|
||||||
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
or event.type in [schemas.PerformanceEventType.LOCATION_DOM_COMPLETE,
|
||||||
|
|
@ -989,7 +989,7 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
||||||
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
||||||
c.value, value_key=e_k))
|
c.value, value_key=e_k))
|
||||||
else:
|
else:
|
||||||
logger.warning(f"unsupported extra_event type: {c.type}")
|
logger.warning(f"unsupported extra_event type:${c.type}")
|
||||||
if len(_extra_or_condition) > 0:
|
if len(_extra_or_condition) > 0:
|
||||||
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
||||||
query_part = f"""\
|
query_part = f"""\
|
||||||
|
|
|
||||||
|
|
@ -175,11 +175,11 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
||||||
ORDER BY sort_key {data.order}
|
ORDER BY sort_key {data.order}
|
||||||
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s) AS sorted_sessions;""",
|
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s) AS sorted_sessions;""",
|
||||||
parameters=full_args)
|
parameters=full_args)
|
||||||
logging.debug("--------------------")
|
|
||||||
logging.debug(main_query)
|
|
||||||
logging.debug("--------------------")
|
|
||||||
try:
|
try:
|
||||||
|
logging.debug("--------------------")
|
||||||
sessions_list = cur.execute(main_query)
|
sessions_list = cur.execute(main_query)
|
||||||
|
logging.debug("--------------------")
|
||||||
except Exception as err:
|
except Exception as err:
|
||||||
logging.warning("--------- SESSIONS-CH SEARCH QUERY EXCEPTION -----------")
|
logging.warning("--------- SESSIONS-CH SEARCH QUERY EXCEPTION -----------")
|
||||||
logging.warning(main_query)
|
logging.warning(main_query)
|
||||||
|
|
@ -262,7 +262,7 @@ def search_by_metadata(tenant_id, user_id, m_key, m_value, project_id=None):
|
||||||
FROM public.user_favorite_sessions
|
FROM public.user_favorite_sessions
|
||||||
WHERE user_favorite_sessions.user_id = %(userId)s
|
WHERE user_favorite_sessions.user_id = %(userId)s
|
||||||
) AS favorite_sessions USING (session_id)
|
) AS favorite_sessions USING (session_id)
|
||||||
WHERE s.project_id = %(id)s AND s.duration IS NOT NULL AND s.{col_name} = %(value)s
|
WHERE s.project_id = %(id)s AND isNotNull(s.duration) AND s.{col_name} = %(value)s
|
||||||
) AS full_sessions
|
) AS full_sessions
|
||||||
ORDER BY favorite DESC, issue_score DESC
|
ORDER BY favorite DESC, issue_score DESC
|
||||||
LIMIT 10
|
LIMIT 10
|
||||||
|
|
@ -11,9 +11,3 @@ if smtp.has_smtp():
|
||||||
logger.info("valid SMTP configuration found")
|
logger.info("valid SMTP configuration found")
|
||||||
else:
|
else:
|
||||||
logger.info("no SMTP configuration found or SMTP validation failed")
|
logger.info("no SMTP configuration found or SMTP validation failed")
|
||||||
|
|
||||||
if config("EXP_CH_DRIVER", cast=bool, default=True):
|
|
||||||
logging.info(">>> Using new CH driver")
|
|
||||||
from . import ch_client_exp as ch_client
|
|
||||||
else:
|
|
||||||
from . import ch_client
|
|
||||||
|
|
|
||||||
|
|
@ -1,73 +1,185 @@
|
||||||
import logging
|
import logging
|
||||||
|
import threading
|
||||||
|
import time
|
||||||
|
from functools import wraps
|
||||||
|
from queue import Queue, Empty
|
||||||
|
|
||||||
import clickhouse_driver
|
import clickhouse_connect
|
||||||
|
from clickhouse_connect.driver.query import QueryContext
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
_CH_CONFIG = {"host": config("ch_host"),
|
||||||
|
"user": config("ch_user", default="default"),
|
||||||
|
"password": config("ch_password", default=""),
|
||||||
|
"port": config("ch_port_http", cast=int),
|
||||||
|
"client_name": config("APP_NAME", default="PY")}
|
||||||
|
CH_CONFIG = dict(_CH_CONFIG)
|
||||||
|
|
||||||
settings = {}
|
settings = {}
|
||||||
if config('ch_timeout', cast=int, default=-1) > 0:
|
if config('ch_timeout', cast=int, default=-1) > 0:
|
||||||
logger.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
||||||
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
||||||
|
|
||||||
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
||||||
logger.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
||||||
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
||||||
|
|
||||||
|
extra_args = {}
|
||||||
|
if config("CH_COMPRESSION", cast=bool, default=True):
|
||||||
|
extra_args["compression"] = "lz4"
|
||||||
|
|
||||||
|
|
||||||
|
def transform_result(self, original_function):
|
||||||
|
@wraps(original_function)
|
||||||
|
def wrapper(*args, **kwargs):
|
||||||
|
if kwargs.get("parameters"):
|
||||||
|
if config("LOCAL_DEV", cast=bool, default=False):
|
||||||
|
logger.debug(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters")))
|
||||||
|
else:
|
||||||
|
logger.debug(
|
||||||
|
str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
||||||
|
elif len(args) > 0:
|
||||||
|
if config("LOCAL_DEV", cast=bool, default=False):
|
||||||
|
logger.debug(args[0])
|
||||||
|
else:
|
||||||
|
logger.debug(str.encode(args[0]))
|
||||||
|
result = original_function(*args, **kwargs)
|
||||||
|
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
||||||
|
column_names = result.column_names
|
||||||
|
result = result.result_rows
|
||||||
|
result = [dict(zip(column_names, row)) for row in result]
|
||||||
|
|
||||||
|
return result
|
||||||
|
|
||||||
|
return wrapper
|
||||||
|
|
||||||
|
|
||||||
|
class ClickHouseConnectionPool:
|
||||||
|
def __init__(self, min_size, max_size):
|
||||||
|
self.min_size = min_size
|
||||||
|
self.max_size = max_size
|
||||||
|
self.pool = Queue()
|
||||||
|
self.lock = threading.Lock()
|
||||||
|
self.total_connections = 0
|
||||||
|
|
||||||
|
# Initialize the pool with min_size connections
|
||||||
|
for _ in range(self.min_size):
|
||||||
|
client = clickhouse_connect.get_client(**CH_CONFIG,
|
||||||
|
database=config("ch_database", default="default"),
|
||||||
|
settings=settings,
|
||||||
|
**extra_args)
|
||||||
|
self.pool.put(client)
|
||||||
|
self.total_connections += 1
|
||||||
|
|
||||||
|
def get_connection(self):
|
||||||
|
try:
|
||||||
|
# Try to get a connection without blocking
|
||||||
|
client = self.pool.get_nowait()
|
||||||
|
return client
|
||||||
|
except Empty:
|
||||||
|
with self.lock:
|
||||||
|
if self.total_connections < self.max_size:
|
||||||
|
client = clickhouse_connect.get_client(**CH_CONFIG,
|
||||||
|
database=config("ch_database", default="default"),
|
||||||
|
settings=settings,
|
||||||
|
**extra_args)
|
||||||
|
self.total_connections += 1
|
||||||
|
return client
|
||||||
|
# If max_size reached, wait until a connection is available
|
||||||
|
client = self.pool.get()
|
||||||
|
return client
|
||||||
|
|
||||||
|
def release_connection(self, client):
|
||||||
|
self.pool.put(client)
|
||||||
|
|
||||||
|
def close_all(self):
|
||||||
|
with self.lock:
|
||||||
|
while not self.pool.empty():
|
||||||
|
client = self.pool.get()
|
||||||
|
client.close()
|
||||||
|
self.total_connections = 0
|
||||||
|
|
||||||
|
|
||||||
|
CH_pool: ClickHouseConnectionPool = None
|
||||||
|
|
||||||
|
RETRY_MAX = config("CH_RETRY_MAX", cast=int, default=50)
|
||||||
|
RETRY_INTERVAL = config("CH_RETRY_INTERVAL", cast=int, default=2)
|
||||||
|
RETRY = 0
|
||||||
|
|
||||||
|
|
||||||
|
def make_pool():
|
||||||
|
if not config('CH_POOL', cast=bool, default=True):
|
||||||
|
return
|
||||||
|
global CH_pool
|
||||||
|
global RETRY
|
||||||
|
if CH_pool is not None:
|
||||||
|
try:
|
||||||
|
CH_pool.close_all()
|
||||||
|
except Exception as error:
|
||||||
|
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||||
|
try:
|
||||||
|
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
|
||||||
|
max_size=config("CH_MAXCONN", cast=int, default=8))
|
||||||
|
if CH_pool is not None:
|
||||||
|
logger.info("Connection pool created successfully for CH")
|
||||||
|
except ConnectionError as error:
|
||||||
|
logger.error("Error while connecting to CH", exc_info=error)
|
||||||
|
if RETRY < RETRY_MAX:
|
||||||
|
RETRY += 1
|
||||||
|
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||||
|
time.sleep(RETRY_INTERVAL)
|
||||||
|
make_pool()
|
||||||
|
else:
|
||||||
|
raise error
|
||||||
|
|
||||||
|
|
||||||
class ClickHouseClient:
|
class ClickHouseClient:
|
||||||
__client = None
|
__client = None
|
||||||
|
|
||||||
def __init__(self, database=None):
|
def __init__(self, database=None):
|
||||||
extra_args = {}
|
if self.__client is None:
|
||||||
if config("CH_COMPRESSION", cast=bool, default=True):
|
if database is not None or not config('CH_POOL', cast=bool, default=True):
|
||||||
extra_args["compression"] = "lz4"
|
self.__client = clickhouse_connect.get_client(**CH_CONFIG,
|
||||||
self.__client = clickhouse_driver.Client(host=config("ch_host"),
|
database=database if database else config("ch_database",
|
||||||
database=database if database else config("ch_database",
|
default="default"),
|
||||||
default="default"),
|
settings=settings,
|
||||||
user=config("ch_user", default="default"),
|
**extra_args)
|
||||||
password=config("ch_password", default=""),
|
|
||||||
port=config("ch_port", cast=int),
|
else:
|
||||||
settings=settings,
|
self.__client = CH_pool.get_connection()
|
||||||
**extra_args) \
|
|
||||||
if self.__client is None else self.__client
|
self.__client.execute = transform_result(self, self.__client.query)
|
||||||
|
self.__client.format = self.format
|
||||||
|
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self
|
|
||||||
|
|
||||||
def execute(self, query, parameters=None, **args):
|
|
||||||
try:
|
|
||||||
results = self.__client.execute(query=query, params=parameters, with_column_types=True, **args)
|
|
||||||
keys = tuple(x for x, y in results[1])
|
|
||||||
return [dict(zip(keys, i)) for i in results[0]]
|
|
||||||
except Exception as err:
|
|
||||||
logger.error("--------- CH EXCEPTION -----------", exc_info=err)
|
|
||||||
logger.error("--------- CH QUERY EXCEPTION -----------")
|
|
||||||
logger.error(self.format(query=query, parameters=parameters)
|
|
||||||
.replace('\n', '\\n')
|
|
||||||
.replace(' ', ' ')
|
|
||||||
.replace(' ', ' '))
|
|
||||||
logger.error("--------------------")
|
|
||||||
raise err
|
|
||||||
|
|
||||||
def insert(self, query, params=None, **args):
|
|
||||||
return self.__client.execute(query=query, params=params, **args)
|
|
||||||
|
|
||||||
def client(self):
|
|
||||||
return self.__client
|
return self.__client
|
||||||
|
|
||||||
def format(self, query, parameters):
|
def format(self, query, parameters=None):
|
||||||
if parameters is None:
|
if parameters:
|
||||||
return query
|
ctx = QueryContext(query=query, parameters=parameters)
|
||||||
return self.__client.substitute_params(query, parameters, self.__client.connection.context)
|
return ctx.final_query
|
||||||
|
return query
|
||||||
|
|
||||||
def __exit__(self, *args):
|
def __exit__(self, *args):
|
||||||
pass
|
if config('CH_POOL', cast=bool, default=True):
|
||||||
|
CH_pool.release_connection(self.__client)
|
||||||
|
else:
|
||||||
|
self.__client.close()
|
||||||
|
|
||||||
|
|
||||||
async def init():
|
async def init():
|
||||||
logger.info(f">CH_POOL:not defined")
|
logger.info(f">use CH_POOL:{config('CH_POOL', default=True)}")
|
||||||
|
if config('CH_POOL', cast=bool, default=True):
|
||||||
|
make_pool()
|
||||||
|
|
||||||
|
|
||||||
async def terminate():
|
async def terminate():
|
||||||
pass
|
global CH_pool
|
||||||
|
if CH_pool is not None:
|
||||||
|
try:
|
||||||
|
CH_pool.close_all()
|
||||||
|
logger.info("Closed all connexions to CH")
|
||||||
|
except Exception as error:
|
||||||
|
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||||
|
|
|
||||||
|
|
@ -1,178 +0,0 @@
|
||||||
import logging
|
|
||||||
import threading
|
|
||||||
import time
|
|
||||||
from functools import wraps
|
|
||||||
from queue import Queue, Empty
|
|
||||||
|
|
||||||
import clickhouse_connect
|
|
||||||
from clickhouse_connect.driver.query import QueryContext
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
|
||||||
_CH_CONFIG = {"host": config("ch_host"),
|
|
||||||
"user": config("ch_user", default="default"),
|
|
||||||
"password": config("ch_password", default=""),
|
|
||||||
"port": config("ch_port_http", cast=int),
|
|
||||||
"client_name": config("APP_NAME", default="PY")}
|
|
||||||
CH_CONFIG = dict(_CH_CONFIG)
|
|
||||||
|
|
||||||
settings = {}
|
|
||||||
if config('ch_timeout', cast=int, default=-1) > 0:
|
|
||||||
logging.info(f"CH-max_execution_time set to {config('ch_timeout')}s")
|
|
||||||
settings = {**settings, "max_execution_time": config('ch_timeout', cast=int)}
|
|
||||||
|
|
||||||
if config('ch_receive_timeout', cast=int, default=-1) > 0:
|
|
||||||
logging.info(f"CH-receive_timeout set to {config('ch_receive_timeout')}s")
|
|
||||||
settings = {**settings, "receive_timeout": config('ch_receive_timeout', cast=int)}
|
|
||||||
|
|
||||||
extra_args = {}
|
|
||||||
if config("CH_COMPRESSION", cast=bool, default=True):
|
|
||||||
extra_args["compression"] = "lz4"
|
|
||||||
|
|
||||||
|
|
||||||
def transform_result(self, original_function):
|
|
||||||
@wraps(original_function)
|
|
||||||
def wrapper(*args, **kwargs):
|
|
||||||
if kwargs.get("parameters"):
|
|
||||||
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
|
||||||
elif len(args) > 0:
|
|
||||||
logger.debug(str.encode(args[0]))
|
|
||||||
result = original_function(*args, **kwargs)
|
|
||||||
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
|
||||||
column_names = result.column_names
|
|
||||||
result = result.result_rows
|
|
||||||
result = [dict(zip(column_names, row)) for row in result]
|
|
||||||
|
|
||||||
return result
|
|
||||||
|
|
||||||
return wrapper
|
|
||||||
|
|
||||||
|
|
||||||
class ClickHouseConnectionPool:
|
|
||||||
def __init__(self, min_size, max_size):
|
|
||||||
self.min_size = min_size
|
|
||||||
self.max_size = max_size
|
|
||||||
self.pool = Queue()
|
|
||||||
self.lock = threading.Lock()
|
|
||||||
self.total_connections = 0
|
|
||||||
|
|
||||||
# Initialize the pool with min_size connections
|
|
||||||
for _ in range(self.min_size):
|
|
||||||
client = clickhouse_connect.get_client(**CH_CONFIG,
|
|
||||||
database=config("ch_database", default="default"),
|
|
||||||
settings=settings,
|
|
||||||
**extra_args)
|
|
||||||
self.pool.put(client)
|
|
||||||
self.total_connections += 1
|
|
||||||
|
|
||||||
def get_connection(self):
|
|
||||||
try:
|
|
||||||
# Try to get a connection without blocking
|
|
||||||
client = self.pool.get_nowait()
|
|
||||||
return client
|
|
||||||
except Empty:
|
|
||||||
with self.lock:
|
|
||||||
if self.total_connections < self.max_size:
|
|
||||||
client = clickhouse_connect.get_client(**CH_CONFIG,
|
|
||||||
database=config("ch_database", default="default"),
|
|
||||||
settings=settings,
|
|
||||||
**extra_args)
|
|
||||||
self.total_connections += 1
|
|
||||||
return client
|
|
||||||
# If max_size reached, wait until a connection is available
|
|
||||||
client = self.pool.get()
|
|
||||||
return client
|
|
||||||
|
|
||||||
def release_connection(self, client):
|
|
||||||
self.pool.put(client)
|
|
||||||
|
|
||||||
def close_all(self):
|
|
||||||
with self.lock:
|
|
||||||
while not self.pool.empty():
|
|
||||||
client = self.pool.get()
|
|
||||||
client.close()
|
|
||||||
self.total_connections = 0
|
|
||||||
|
|
||||||
|
|
||||||
CH_pool: ClickHouseConnectionPool = None
|
|
||||||
|
|
||||||
RETRY_MAX = config("CH_RETRY_MAX", cast=int, default=50)
|
|
||||||
RETRY_INTERVAL = config("CH_RETRY_INTERVAL", cast=int, default=2)
|
|
||||||
RETRY = 0
|
|
||||||
|
|
||||||
|
|
||||||
def make_pool():
|
|
||||||
if not config('CH_POOL', cast=bool, default=True):
|
|
||||||
return
|
|
||||||
global CH_pool
|
|
||||||
global RETRY
|
|
||||||
if CH_pool is not None:
|
|
||||||
try:
|
|
||||||
CH_pool.close_all()
|
|
||||||
except Exception as error:
|
|
||||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
|
||||||
try:
|
|
||||||
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
|
|
||||||
max_size=config("CH_MAXCONN", cast=int, default=8))
|
|
||||||
if CH_pool is not None:
|
|
||||||
logger.info("Connection pool created successfully for CH")
|
|
||||||
except ConnectionError as error:
|
|
||||||
logger.error("Error while connecting to CH", exc_info=error)
|
|
||||||
if RETRY < RETRY_MAX:
|
|
||||||
RETRY += 1
|
|
||||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
|
||||||
time.sleep(RETRY_INTERVAL)
|
|
||||||
make_pool()
|
|
||||||
else:
|
|
||||||
raise error
|
|
||||||
|
|
||||||
|
|
||||||
class ClickHouseClient:
|
|
||||||
__client = None
|
|
||||||
|
|
||||||
def __init__(self, database=None):
|
|
||||||
if self.__client is None:
|
|
||||||
if database is not None or not config('CH_POOL', cast=bool, default=True):
|
|
||||||
self.__client = clickhouse_connect.get_client(**CH_CONFIG,
|
|
||||||
database=database if database else config("ch_database",
|
|
||||||
default="default"),
|
|
||||||
settings=settings,
|
|
||||||
**extra_args)
|
|
||||||
|
|
||||||
else:
|
|
||||||
self.__client = CH_pool.get_connection()
|
|
||||||
|
|
||||||
self.__client.execute = transform_result(self, self.__client.query)
|
|
||||||
self.__client.format = self.format
|
|
||||||
|
|
||||||
def __enter__(self):
|
|
||||||
return self.__client
|
|
||||||
|
|
||||||
def format(self, query, parameters=None):
|
|
||||||
if parameters:
|
|
||||||
ctx = QueryContext(query=query, parameters=parameters)
|
|
||||||
return ctx.final_query
|
|
||||||
return query
|
|
||||||
|
|
||||||
def __exit__(self, *args):
|
|
||||||
if config('CH_POOL', cast=bool, default=True):
|
|
||||||
CH_pool.release_connection(self.__client)
|
|
||||||
else:
|
|
||||||
self.__client.close()
|
|
||||||
|
|
||||||
|
|
||||||
async def init():
|
|
||||||
logger.info(f">use CH_POOL:{config('CH_POOL', default=True)}")
|
|
||||||
if config('CH_POOL', cast=bool, default=True):
|
|
||||||
make_pool()
|
|
||||||
|
|
||||||
|
|
||||||
async def terminate():
|
|
||||||
global CH_pool
|
|
||||||
if CH_pool is not None:
|
|
||||||
try:
|
|
||||||
CH_pool.close_all()
|
|
||||||
logger.info("Closed all connexions to CH")
|
|
||||||
except Exception as error:
|
|
||||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
|
||||||
|
|
@ -1,7 +1,10 @@
|
||||||
|
import logging
|
||||||
|
import re
|
||||||
from typing import Union
|
from typing import Union
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
import logging
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
from schemas import SearchEventOperator
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -66,3 +69,94 @@ def get_event_type(event_type: Union[schemas.EventType, schemas.PerformanceEvent
|
||||||
if event_type not in defs:
|
if event_type not in defs:
|
||||||
raise Exception(f"unsupported EventType:{event_type}")
|
raise Exception(f"unsupported EventType:{event_type}")
|
||||||
return defs.get(event_type)
|
return defs.get(event_type)
|
||||||
|
|
||||||
|
|
||||||
|
# AI generated
|
||||||
|
def simplify_clickhouse_type(ch_type: str) -> str:
|
||||||
|
"""
|
||||||
|
Simplify a ClickHouse data type name to a broader category like:
|
||||||
|
int, float, decimal, datetime, string, uuid, enum, array, tuple, map, nested, etc.
|
||||||
|
"""
|
||||||
|
|
||||||
|
# 1) Strip out common wrappers like Nullable(...) or LowCardinality(...)
|
||||||
|
# Possibly multiple wrappers: e.g. "LowCardinality(Nullable(Int32))"
|
||||||
|
pattern_wrappers = re.compile(r'(Nullable|LowCardinality)\((.*)\)')
|
||||||
|
while True:
|
||||||
|
match = pattern_wrappers.match(ch_type)
|
||||||
|
if match:
|
||||||
|
ch_type = match.group(2)
|
||||||
|
else:
|
||||||
|
break
|
||||||
|
|
||||||
|
# 2) Normalize (lowercase) for easier checks
|
||||||
|
normalized_type = ch_type.lower()
|
||||||
|
|
||||||
|
# 3) Use pattern matching or direct checks for known categories
|
||||||
|
# (You can adapt this as you see fit for your environment.)
|
||||||
|
|
||||||
|
# Integers: Int8, Int16, Int32, Int64, Int128, Int256, UInt8, UInt16, ...
|
||||||
|
if re.match(r'^(u?int)(8|16|32|64|128|256)$', normalized_type):
|
||||||
|
return "int"
|
||||||
|
|
||||||
|
# Floats: Float32, Float64
|
||||||
|
if re.match(r'^float(32|64)$', normalized_type):
|
||||||
|
return "float"
|
||||||
|
|
||||||
|
# Decimal: Decimal(P, S)
|
||||||
|
if normalized_type.startswith("decimal"):
|
||||||
|
return "decimal"
|
||||||
|
|
||||||
|
# Date/DateTime
|
||||||
|
if normalized_type.startswith("date"):
|
||||||
|
return "datetime"
|
||||||
|
if normalized_type.startswith("datetime"):
|
||||||
|
return "datetime"
|
||||||
|
|
||||||
|
# Strings: String, FixedString(N)
|
||||||
|
if normalized_type.startswith("string"):
|
||||||
|
return "string"
|
||||||
|
if normalized_type.startswith("fixedstring"):
|
||||||
|
return "string"
|
||||||
|
|
||||||
|
# UUID
|
||||||
|
if normalized_type.startswith("uuid"):
|
||||||
|
return "uuid"
|
||||||
|
|
||||||
|
# Enums: Enum8(...) or Enum16(...)
|
||||||
|
if normalized_type.startswith("enum8") or normalized_type.startswith("enum16"):
|
||||||
|
return "enum"
|
||||||
|
|
||||||
|
# Arrays: Array(T)
|
||||||
|
if normalized_type.startswith("array"):
|
||||||
|
return "array"
|
||||||
|
|
||||||
|
# Tuples: Tuple(T1, T2, ...)
|
||||||
|
if normalized_type.startswith("tuple"):
|
||||||
|
return "tuple"
|
||||||
|
|
||||||
|
# Map(K, V)
|
||||||
|
if normalized_type.startswith("map"):
|
||||||
|
return "map"
|
||||||
|
|
||||||
|
# Nested(...)
|
||||||
|
if normalized_type.startswith("nested"):
|
||||||
|
return "nested"
|
||||||
|
|
||||||
|
# If we didn't match above, just return the original type in lowercase
|
||||||
|
return normalized_type
|
||||||
|
|
||||||
|
|
||||||
|
def simplify_clickhouse_types(ch_types: list[str]) -> list[str]:
|
||||||
|
"""
|
||||||
|
Takes a list of ClickHouse types and returns a list of simplified types
|
||||||
|
by calling `simplify_clickhouse_type` on each.
|
||||||
|
"""
|
||||||
|
return list(set([simplify_clickhouse_type(t) for t in ch_types]))
|
||||||
|
|
||||||
|
|
||||||
|
def get_sub_condition(col_name: str, val_name: str,
|
||||||
|
operator: Union[schemas.SearchEventOperator, schemas.MathOperator]):
|
||||||
|
if operator == SearchEventOperator.PATTERN:
|
||||||
|
return f"match({col_name}, %({val_name})s)"
|
||||||
|
op = sh.get_sql_operator(operator)
|
||||||
|
return f"{col_name} {op} %({val_name})s"
|
||||||
|
|
|
||||||
|
|
@ -4,41 +4,40 @@ import schemas
|
||||||
|
|
||||||
|
|
||||||
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
||||||
if isinstance(op, Enum):
|
|
||||||
op = op.value
|
|
||||||
return {
|
return {
|
||||||
schemas.SearchEventOperator.IS.value: "=",
|
schemas.SearchEventOperator.IS: "=",
|
||||||
schemas.SearchEventOperator.ON.value: "=",
|
schemas.SearchEventOperator.ON: "=",
|
||||||
schemas.SearchEventOperator.ON_ANY.value: "IN",
|
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||||
schemas.SearchEventOperator.IS_NOT.value: "!=",
|
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||||
schemas.SearchEventOperator.NOT_ON.value: "!=",
|
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||||
schemas.SearchEventOperator.CONTAINS.value: "ILIKE",
|
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||||
schemas.SearchEventOperator.STARTS_WITH.value: "ILIKE",
|
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||||
schemas.SearchEventOperator.ENDS_WITH.value: "ILIKE",
|
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||||
# Selector operators:
|
# this is not used as an operator, it is used in order to maintain a valid value for conditions
|
||||||
schemas.ClickEventExtraOperator.IS.value: "=",
|
schemas.SearchEventOperator.PATTERN: "regex",
|
||||||
schemas.ClickEventExtraOperator.IS_NOT.value: "!=",
|
|
||||||
schemas.ClickEventExtraOperator.CONTAINS.value: "ILIKE",
|
|
||||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
|
||||||
schemas.ClickEventExtraOperator.STARTS_WITH.value: "ILIKE",
|
|
||||||
schemas.ClickEventExtraOperator.ENDS_WITH.value: "ILIKE",
|
|
||||||
|
|
||||||
schemas.MathOperator.GREATER.value: ">",
|
# Selector operators:
|
||||||
schemas.MathOperator.GREATER_EQ.value: ">=",
|
schemas.ClickEventExtraOperator.IS: "=",
|
||||||
schemas.MathOperator.LESS.value: "<",
|
schemas.ClickEventExtraOperator.IS_NOT: "!=",
|
||||||
schemas.MathOperator.LESS_EQ.value: "<=",
|
schemas.ClickEventExtraOperator.CONTAINS: "ILIKE",
|
||||||
|
schemas.ClickEventExtraOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||||
|
schemas.ClickEventExtraOperator.STARTS_WITH: "ILIKE",
|
||||||
|
schemas.ClickEventExtraOperator.ENDS_WITH: "ILIKE",
|
||||||
|
|
||||||
|
schemas.MathOperator.GREATER: ">",
|
||||||
|
schemas.MathOperator.GREATER_EQ: ">=",
|
||||||
|
schemas.MathOperator.LESS: "<",
|
||||||
|
schemas.MathOperator.LESS_EQ: "<=",
|
||||||
}.get(op, "=")
|
}.get(op, "=")
|
||||||
|
|
||||||
|
|
||||||
def is_negation_operator(op: schemas.SearchEventOperator):
|
def is_negation_operator(op: schemas.SearchEventOperator):
|
||||||
if isinstance(op, Enum):
|
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||||
op = op.value
|
schemas.SearchEventOperator.NOT_ON,
|
||||||
return op in [schemas.SearchEventOperator.IS_NOT.value,
|
schemas.SearchEventOperator.NOT_CONTAINS,
|
||||||
schemas.SearchEventOperator.NOT_ON.value,
|
schemas.ClickEventExtraOperator.IS_NOT,
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS.value,
|
schemas.ClickEventExtraOperator.NOT_CONTAINS]
|
||||||
schemas.ClickEventExtraOperator.IS_NOT.value,
|
|
||||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value]
|
|
||||||
|
|
||||||
|
|
||||||
def reverse_sql_operator(op):
|
def reverse_sql_operator(op):
|
||||||
|
|
@ -76,4 +75,3 @@ def single_value(values):
|
||||||
if isinstance(v, Enum):
|
if isinstance(v, Enum):
|
||||||
values[i] = v.value
|
values[i] = v.value
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -74,4 +74,5 @@ EXP_CH_DRIVER=true
|
||||||
EXP_AUTOCOMPLETE=true
|
EXP_AUTOCOMPLETE=true
|
||||||
EXP_ALERTS=true
|
EXP_ALERTS=true
|
||||||
EXP_ERRORS_SEARCH=true
|
EXP_ERRORS_SEARCH=true
|
||||||
EXP_METRICS=true
|
EXP_METRICS=true
|
||||||
|
EXP_SESSIONS_SEARCH=true
|
||||||
|
|
@ -1,16 +1,15 @@
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
boto3==1.36.12
|
boto3==1.37.21
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
psycopg[pool,binary]==3.2.4
|
psycopg[pool,binary]==3.2.6
|
||||||
clickhouse-driver[lz4]==0.2.9
|
|
||||||
clickhouse-connect==0.8.15
|
clickhouse-connect==0.8.15
|
||||||
elasticsearch==8.17.1
|
elasticsearch==8.17.2
|
||||||
jira==3.8.0
|
jira==3.8.0
|
||||||
cachetools==5.5.1
|
cachetools==5.5.2
|
||||||
|
|
||||||
fastapi==0.115.8
|
fastapi==0.115.12
|
||||||
uvicorn[standard]==0.34.0
|
uvicorn[standard]==0.34.0
|
||||||
python-decouple==3.8
|
python-decouple==3.8
|
||||||
pydantic[email]==2.10.6
|
pydantic[email]==2.10.6
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,15 @@
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
boto3==1.36.12
|
boto3==1.37.21
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
psycopg[pool,binary]==3.2.4
|
psycopg[pool,binary]==3.2.6
|
||||||
clickhouse-driver[lz4]==0.2.9
|
|
||||||
clickhouse-connect==0.8.15
|
clickhouse-connect==0.8.15
|
||||||
elasticsearch==8.17.1
|
elasticsearch==8.17.2
|
||||||
jira==3.8.0
|
jira==3.8.0
|
||||||
cachetools==5.5.1
|
cachetools==5.5.2
|
||||||
|
|
||||||
fastapi==0.115.8
|
fastapi==0.115.12
|
||||||
uvicorn[standard]==0.34.0
|
uvicorn[standard]==0.34.0
|
||||||
python-decouple==3.8
|
python-decouple==3.8
|
||||||
pydantic[email]==2.10.6
|
pydantic[email]==2.10.6
|
||||||
|
|
|
||||||
55
api/routers/subs/product_analytics.py
Normal file
55
api/routers/subs/product_analytics.py
Normal file
|
|
@ -0,0 +1,55 @@
|
||||||
|
from typing import Annotated
|
||||||
|
|
||||||
|
from fastapi import Body, Depends, Query
|
||||||
|
|
||||||
|
import schemas
|
||||||
|
from chalicelib.core import metadata
|
||||||
|
from chalicelib.core.product_analytics import events, properties
|
||||||
|
from or_dependencies import OR_context
|
||||||
|
from routers.base import get_routers
|
||||||
|
|
||||||
|
public_app, app, app_apikey = get_routers()
|
||||||
|
|
||||||
|
|
||||||
|
@app.get('/{projectId}/filters', tags=["product_analytics"])
|
||||||
|
def get_all_filters(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
return {
|
||||||
|
"data": {
|
||||||
|
"events": events.get_events(project_id=projectId, page=filter_query),
|
||||||
|
"filters": properties.get_all_properties(project_id=projectId, page=filter_query),
|
||||||
|
"metadata": metadata.get_for_filters(project_id=projectId)
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get('/{projectId}/events/names', tags=["product_analytics"])
|
||||||
|
def get_all_events(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
return {"data": events.get_events(project_id=projectId, page=filter_query)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get('/{projectId}/properties/search', tags=["product_analytics"])
|
||||||
|
def get_event_properties(projectId: int, event_name: str = None,
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
if not event_name or len(event_name) == 0:
|
||||||
|
return {"data": []}
|
||||||
|
return {"data": properties.get_event_properties(project_id=projectId, event_name=event_name)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.post('/{projectId}/events/search', tags=["product_analytics"])
|
||||||
|
def search_events(projectId: int, data: schemas.EventsSearchPayloadSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
return {"data": events.search_events(project_id=projectId, data=data)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get('/{projectId}/lexicon/events', tags=["product_analytics", "lexicon"])
|
||||||
|
def get_all_lexicon_events(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
return {"data": events.get_lexicon(project_id=projectId, page=filter_query)}
|
||||||
|
|
||||||
|
|
||||||
|
@app.get('/{projectId}/lexicon/properties', tags=["product_analytics", "lexicon"])
|
||||||
|
def get_all_lexicon_properties(projectId: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context)):
|
||||||
|
return {"data": properties.get_lexicon(project_id=projectId, page=filter_query)}
|
||||||
|
|
@ -1,15 +0,0 @@
|
||||||
import schemas
|
|
||||||
from chalicelib.core.metrics import product_anaytics2
|
|
||||||
from fastapi import Depends
|
|
||||||
from or_dependencies import OR_context
|
|
||||||
from routers.base import get_routers
|
|
||||||
|
|
||||||
|
|
||||||
public_app, app, app_apikey = get_routers()
|
|
||||||
|
|
||||||
|
|
||||||
@app.post('/{projectId}/events/search', tags=["dashboard"])
|
|
||||||
def search_events(projectId: int,
|
|
||||||
# data: schemas.CreateDashboardSchema = Body(...),
|
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
|
||||||
return product_anaytics2.search_events(project_id=projectId, data={})
|
|
||||||
|
|
@ -1,10 +1,12 @@
|
||||||
from fastapi import Body, Depends
|
from typing import Annotated
|
||||||
|
|
||||||
|
from fastapi import Body, Depends, Query
|
||||||
|
|
||||||
|
import schemas
|
||||||
from chalicelib.core.usability_testing import service
|
from chalicelib.core.usability_testing import service
|
||||||
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestUpdate, UTTestSearch
|
from chalicelib.core.usability_testing.schema import UTTestCreate, UTTestUpdate, UTTestSearch
|
||||||
from or_dependencies import OR_context
|
from or_dependencies import OR_context
|
||||||
from routers.base import get_routers
|
from routers.base import get_routers
|
||||||
from schemas import schemas
|
|
||||||
|
|
||||||
public_app, app, app_apikey = get_routers()
|
public_app, app, app_apikey = get_routers()
|
||||||
tags = ["usability-tests"]
|
tags = ["usability-tests"]
|
||||||
|
|
@ -77,9 +79,8 @@ async def update_ut_test(projectId: int, test_id: int, test_update: UTTestUpdate
|
||||||
|
|
||||||
|
|
||||||
@app.get('/{projectId}/usability-tests/{test_id}/sessions', tags=tags)
|
@app.get('/{projectId}/usability-tests/{test_id}/sessions', tags=tags)
|
||||||
async def get_sessions(projectId: int, test_id: int, page: int = 1, limit: int = 10,
|
async def get_sessions(projectId: int, test_id: int, filter_query: Annotated[schemas.PaginatedSchema, Query()],
|
||||||
live: bool = False,
|
live: bool = False, user_id: str = None):
|
||||||
user_id: str = None):
|
|
||||||
"""
|
"""
|
||||||
Get sessions related to a specific UT test.
|
Get sessions related to a specific UT test.
|
||||||
|
|
||||||
|
|
@ -88,20 +89,21 @@ async def get_sessions(projectId: int, test_id: int, page: int = 1, limit: int =
|
||||||
"""
|
"""
|
||||||
|
|
||||||
if live:
|
if live:
|
||||||
return service.ut_tests_sessions_live(projectId, test_id, page, limit)
|
return service.ut_tests_sessions_live(projectId, test_id, filter_query.page, filter_query.limit)
|
||||||
else:
|
else:
|
||||||
return service.ut_tests_sessions(projectId, test_id, page, limit, user_id, live)
|
return service.ut_tests_sessions(projectId, test_id, filter_query.page, filter_query.limit, user_id, live)
|
||||||
|
|
||||||
|
|
||||||
@app.get('/{projectId}/usability-tests/{test_id}/responses/{task_id}', tags=tags)
|
@app.get('/{projectId}/usability-tests/{test_id}/responses/{task_id}', tags=tags)
|
||||||
async def get_responses(projectId: int, test_id: int, task_id: int, page: int = 1, limit: int = 10, query: str = None):
|
async def get_responses(projectId: int, test_id: int, task_id: int,
|
||||||
|
filter_query: Annotated[schemas.PaginatedSchema, Query()], query: str = None):
|
||||||
"""
|
"""
|
||||||
Get responses related to a specific UT test.
|
Get responses related to a specific UT test.
|
||||||
|
|
||||||
- **project_id**: The unique identifier of the project.
|
- **project_id**: The unique identifier of the project.
|
||||||
- **test_id**: The unique identifier of the UT test.
|
- **test_id**: The unique identifier of the UT test.
|
||||||
"""
|
"""
|
||||||
return service.get_responses(test_id, task_id, page, limit, query)
|
return service.get_responses(test_id, task_id, filter_query.page, filter_query.limit, query)
|
||||||
|
|
||||||
|
|
||||||
@app.get('/{projectId}/usability-tests/{test_id}/statistics', tags=tags)
|
@app.get('/{projectId}/usability-tests/{test_id}/statistics', tags=tags)
|
||||||
|
|
|
||||||
|
|
@ -1,2 +1,4 @@
|
||||||
from .schemas import *
|
from .schemas import *
|
||||||
|
from .product_analytics import *
|
||||||
from . import overrides as _overrides
|
from . import overrides as _overrides
|
||||||
|
from .schemas import _PaginatedSchema as PaginatedSchema
|
||||||
|
|
|
||||||
22
api/schemas/product_analytics.py
Normal file
22
api/schemas/product_analytics.py
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
from typing import Optional, List, Literal, Union, Annotated
|
||||||
|
from pydantic import Field
|
||||||
|
|
||||||
|
from .overrides import BaseModel
|
||||||
|
from .schemas import EventPropertiesSchema, SortOrderType, _TimedSchema, \
|
||||||
|
_PaginatedSchema, PropertyFilterSchema
|
||||||
|
|
||||||
|
|
||||||
|
class EventSearchSchema(BaseModel):
|
||||||
|
is_event: Literal[True] = True
|
||||||
|
name: str = Field(...)
|
||||||
|
properties: Optional[EventPropertiesSchema] = Field(default=None)
|
||||||
|
|
||||||
|
|
||||||
|
ProductAnalyticsGroupedFilter = Annotated[Union[EventSearchSchema, PropertyFilterSchema], \
|
||||||
|
Field(discriminator='is_event')]
|
||||||
|
|
||||||
|
|
||||||
|
class EventsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
|
||||||
|
filters: List[ProductAnalyticsGroupedFilter] = Field(...)
|
||||||
|
sort: str = Field(default="startTs")
|
||||||
|
order: SortOrderType = Field(default=SortOrderType.DESC)
|
||||||
|
|
@ -404,6 +404,7 @@ class EventType(str, Enum):
|
||||||
REQUEST_MOBILE = "requestMobile"
|
REQUEST_MOBILE = "requestMobile"
|
||||||
ERROR_MOBILE = "errorMobile"
|
ERROR_MOBILE = "errorMobile"
|
||||||
SWIPE_MOBILE = "swipeMobile"
|
SWIPE_MOBILE = "swipeMobile"
|
||||||
|
EVENT = "event"
|
||||||
|
|
||||||
|
|
||||||
class PerformanceEventType(str, Enum):
|
class PerformanceEventType(str, Enum):
|
||||||
|
|
@ -464,6 +465,7 @@ class SearchEventOperator(str, Enum):
|
||||||
NOT_CONTAINS = "notContains"
|
NOT_CONTAINS = "notContains"
|
||||||
STARTS_WITH = "startsWith"
|
STARTS_WITH = "startsWith"
|
||||||
ENDS_WITH = "endsWith"
|
ENDS_WITH = "endsWith"
|
||||||
|
PATTERN = "regex"
|
||||||
|
|
||||||
|
|
||||||
class ClickEventExtraOperator(str, Enum):
|
class ClickEventExtraOperator(str, Enum):
|
||||||
|
|
@ -545,7 +547,66 @@ class RequestGraphqlFilterSchema(BaseModel):
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
|
||||||
class SessionSearchEventSchema2(BaseModel):
|
class EventPredefinedPropertyType(str, Enum):
|
||||||
|
TIME = "$time"
|
||||||
|
SOURCE = "$source"
|
||||||
|
DURATION_S = "$duration_s"
|
||||||
|
DESCRIPTION = "description"
|
||||||
|
AUTO_CAPTURED = "$auto_captured"
|
||||||
|
SDK_EDITION = "$sdk_edition"
|
||||||
|
SDK_VERSION = "$sdk_version"
|
||||||
|
DEVICE_ID = "$device_id"
|
||||||
|
OS = "$os"
|
||||||
|
OS_VERSION = "$os_version"
|
||||||
|
BROWSER = "$browser"
|
||||||
|
BROWSER_VERSION = "$browser_version"
|
||||||
|
DEVICE = "$device"
|
||||||
|
SCREEN_HEIGHT = "$screen_height"
|
||||||
|
SCREEN_WIDTH = "$screen_width"
|
||||||
|
CURRENT_URL = "$current_url"
|
||||||
|
INITIAL_REFERRER = "$initial_referrer"
|
||||||
|
REFERRING_DOMAIN = "$referring_domain"
|
||||||
|
REFERRER = "$referrer"
|
||||||
|
INITIAL_REFERRING_DOMAIN = "$initial_referring_domain"
|
||||||
|
SEARCH_ENGINE = "$search_engine"
|
||||||
|
SEARCH_ENGINE_KEYWORD = "$search_engine_keyword"
|
||||||
|
UTM_SOURCE = "utm_source"
|
||||||
|
UTM_MEDIUM = "utm_medium"
|
||||||
|
UTM_CAMPAIGN = "utm_campaign"
|
||||||
|
COUNTRY = "$country"
|
||||||
|
STATE = "$state"
|
||||||
|
CITY = "$city"
|
||||||
|
ISSUE_TYPE = "issue_type"
|
||||||
|
TAGS = "$tags"
|
||||||
|
IMPORT = "$import"
|
||||||
|
|
||||||
|
|
||||||
|
class PropertyFilterSchema(BaseModel):
|
||||||
|
is_event: Literal[False] = False
|
||||||
|
name: Union[EventPredefinedPropertyType, str] = Field(...)
|
||||||
|
operator: Union[SearchEventOperator, MathOperator] = Field(...)
|
||||||
|
value: List[Union[int, str]] = Field(...)
|
||||||
|
|
||||||
|
# property_type: Optional[Literal["string", "number", "date"]] = Field(default=None)
|
||||||
|
|
||||||
|
@computed_field
|
||||||
|
@property
|
||||||
|
def is_predefined(self) -> bool:
|
||||||
|
return EventPredefinedPropertyType.has_value(self.name)
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def transform_name(self):
|
||||||
|
if isinstance(self.name, Enum):
|
||||||
|
self.name = self.name.value
|
||||||
|
return self
|
||||||
|
|
||||||
|
|
||||||
|
class EventPropertiesSchema(BaseModel):
|
||||||
|
operator: Literal["and", "or"] = Field(...)
|
||||||
|
filters: List[PropertyFilterSchema] = Field(...)
|
||||||
|
|
||||||
|
|
||||||
|
class SessionSearchEventSchema(BaseModel):
|
||||||
is_event: Literal[True] = True
|
is_event: Literal[True] = True
|
||||||
value: List[Union[str, int]] = Field(...)
|
value: List[Union[str, int]] = Field(...)
|
||||||
type: Union[EventType, PerformanceEventType] = Field(...)
|
type: Union[EventType, PerformanceEventType] = Field(...)
|
||||||
|
|
@ -553,6 +614,7 @@ class SessionSearchEventSchema2(BaseModel):
|
||||||
source: Optional[List[Union[ErrorSource, int, str]]] = Field(default=None)
|
source: Optional[List[Union[ErrorSource, int, str]]] = Field(default=None)
|
||||||
sourceOperator: Optional[MathOperator] = Field(default=None)
|
sourceOperator: Optional[MathOperator] = Field(default=None)
|
||||||
filters: Optional[List[RequestGraphqlFilterSchema]] = Field(default_factory=list)
|
filters: Optional[List[RequestGraphqlFilterSchema]] = Field(default_factory=list)
|
||||||
|
properties: Optional[EventPropertiesSchema] = Field(default=None)
|
||||||
|
|
||||||
_remove_duplicate_values = field_validator('value', mode='before')(remove_duplicate_values)
|
_remove_duplicate_values = field_validator('value', mode='before')(remove_duplicate_values)
|
||||||
_single_to_list_values = field_validator('value', mode='before')(single_to_list)
|
_single_to_list_values = field_validator('value', mode='before')(single_to_list)
|
||||||
|
|
@ -660,12 +722,12 @@ def add_missing_is_event(values: dict):
|
||||||
|
|
||||||
|
|
||||||
# this type is created to allow mixing events&filters and specifying a discriminator
|
# this type is created to allow mixing events&filters and specifying a discriminator
|
||||||
GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema2],
|
GroupedFilterType = Annotated[Union[SessionSearchFilterSchema, SessionSearchEventSchema],
|
||||||
Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)]
|
Field(discriminator='is_event'), BeforeValidator(add_missing_is_event)]
|
||||||
|
|
||||||
|
|
||||||
class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
|
class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
|
||||||
events: List[SessionSearchEventSchema2] = Field(default_factory=list, doc_hidden=True)
|
events: List[SessionSearchEventSchema] = Field(default_factory=list, doc_hidden=True)
|
||||||
filters: List[GroupedFilterType] = Field(default_factory=list)
|
filters: List[GroupedFilterType] = Field(default_factory=list)
|
||||||
sort: str = Field(default="startTs")
|
sort: str = Field(default="startTs")
|
||||||
order: SortOrderType = Field(default=SortOrderType.DESC)
|
order: SortOrderType = Field(default=SortOrderType.DESC)
|
||||||
|
|
@ -690,6 +752,8 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
|
||||||
def add_missing_attributes(cls, values):
|
def add_missing_attributes(cls, values):
|
||||||
# in case isEvent is wrong:
|
# in case isEvent is wrong:
|
||||||
for f in values.get("filters") or []:
|
for f in values.get("filters") or []:
|
||||||
|
if f.get("type") is None:
|
||||||
|
continue
|
||||||
if EventType.has_value(f["type"]) and not f.get("isEvent"):
|
if EventType.has_value(f["type"]) and not f.get("isEvent"):
|
||||||
f["isEvent"] = True
|
f["isEvent"] = True
|
||||||
elif FilterType.has_value(f["type"]) and f.get("isEvent"):
|
elif FilterType.has_value(f["type"]) and f.get("isEvent"):
|
||||||
|
|
@ -715,6 +779,15 @@ class SessionsSearchPayloadSchema(_TimedSchema, _PaginatedSchema):
|
||||||
f["value"] = vals
|
f["value"] = vals
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
@model_validator(mode="after")
|
||||||
|
def check_pa_event_filter(self):
|
||||||
|
for v in self.filters + self.events:
|
||||||
|
if v.type == EventType.EVENT:
|
||||||
|
assert v.operator in (SearchEventOperator.IS, MathOperator.EQUAL), \
|
||||||
|
"operator must be {SearchEventOperator.IS} or {MathOperator.EQUAL} for EVENT type"
|
||||||
|
assert len(v.value) == 1, "value must have 1 single value for EVENT type"
|
||||||
|
return self
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def split_filters_events(self):
|
def split_filters_events(self):
|
||||||
n_filters = []
|
n_filters = []
|
||||||
|
|
@ -960,6 +1033,36 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
# We don't need this as the UI is expecting filters to override the full series' filters
|
||||||
|
# @model_validator(mode="after")
|
||||||
|
# def __merge_out_filters_with_series(self):
|
||||||
|
# for f in self.filters:
|
||||||
|
# for s in self.series:
|
||||||
|
# found = False
|
||||||
|
#
|
||||||
|
# if f.is_event:
|
||||||
|
# sub = s.filter.events
|
||||||
|
# else:
|
||||||
|
# sub = s.filter.filters
|
||||||
|
#
|
||||||
|
# for e in sub:
|
||||||
|
# if f.type == e.type and f.operator == e.operator:
|
||||||
|
# found = True
|
||||||
|
# if f.is_event:
|
||||||
|
# # If extra event: append value
|
||||||
|
# for v in f.value:
|
||||||
|
# if v not in e.value:
|
||||||
|
# e.value.append(v)
|
||||||
|
# else:
|
||||||
|
# # If extra filter: override value
|
||||||
|
# e.value = f.value
|
||||||
|
# if not found:
|
||||||
|
# sub.append(f)
|
||||||
|
#
|
||||||
|
# self.filters = []
|
||||||
|
#
|
||||||
|
# return self
|
||||||
|
|
||||||
# UI is expecting filters to override the full series' filters
|
# UI is expecting filters to override the full series' filters
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def __override_series_filters_with_outer_filters(self):
|
def __override_series_filters_with_outer_filters(self):
|
||||||
|
|
@ -1030,16 +1133,6 @@ class CardTable(__CardSchema):
|
||||||
values["metricValue"] = []
|
values["metricValue"] = []
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
|
||||||
def __enforce_AND_operator(self):
|
|
||||||
self.metric_of = MetricOfTable(self.metric_of)
|
|
||||||
if self.metric_of in (MetricOfTable.VISITED_URL, MetricOfTable.FETCH, \
|
|
||||||
MetricOfTable.VISITED_URL.value, MetricOfTable.FETCH.value):
|
|
||||||
for s in self.series:
|
|
||||||
if s.filter is not None:
|
|
||||||
s.filter.events_order = SearchEventOrder.AND
|
|
||||||
return self
|
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def __transform(self):
|
def __transform(self):
|
||||||
self.metric_of = MetricOfTable(self.metric_of)
|
self.metric_of = MetricOfTable(self.metric_of)
|
||||||
|
|
@ -1384,7 +1477,7 @@ class MetricSearchSchema(_PaginatedSchema):
|
||||||
mine_only: bool = Field(default=False)
|
mine_only: bool = Field(default=False)
|
||||||
|
|
||||||
|
|
||||||
class _HeatMapSearchEventRaw(SessionSearchEventSchema2):
|
class _HeatMapSearchEventRaw(SessionSearchEventSchema):
|
||||||
type: Literal[EventType.LOCATION] = Field(...)
|
type: Literal[EventType.LOCATION] = Field(...)
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -1509,3 +1602,30 @@ class TagCreate(TagUpdate):
|
||||||
|
|
||||||
class ScopeSchema(BaseModel):
|
class ScopeSchema(BaseModel):
|
||||||
scope: int = Field(default=1, ge=1, le=2)
|
scope: int = Field(default=1, ge=1, le=2)
|
||||||
|
|
||||||
|
|
||||||
|
class SessionModel(BaseModel):
|
||||||
|
duration: int
|
||||||
|
errorsCount: int
|
||||||
|
eventsCount: int
|
||||||
|
favorite: bool = Field(default=False)
|
||||||
|
issueScore: int
|
||||||
|
issueTypes: List[IssueType] = Field(default=[])
|
||||||
|
metadata: dict = Field(default={})
|
||||||
|
pagesCount: int
|
||||||
|
platform: str
|
||||||
|
projectId: int
|
||||||
|
sessionId: str
|
||||||
|
startTs: int
|
||||||
|
timezone: Optional[str]
|
||||||
|
userAnonymousId: Optional[str]
|
||||||
|
userBrowser: str
|
||||||
|
userCity: str
|
||||||
|
userCountry: str
|
||||||
|
userDevice: Optional[str]
|
||||||
|
userDeviceType: str
|
||||||
|
userId: Optional[str]
|
||||||
|
userOs: str
|
||||||
|
userState: str
|
||||||
|
userUuid: str
|
||||||
|
viewed: bool = Field(default=False)
|
||||||
|
|
|
||||||
24
assist-server/Makefile
Normal file
24
assist-server/Makefile
Normal file
|
|
@ -0,0 +1,24 @@
|
||||||
|
ee ?= "false" # true to build ee
|
||||||
|
arch ?= "amd64" # default amd64
|
||||||
|
docker_runtime ?= "docker" # default docker runtime
|
||||||
|
docker_repo ?= "public.ecr.aws/p1t3u8a3"
|
||||||
|
docker_build_args ?= $(if $(filter depot,$(docker_runtime)),"--push","")
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help: ## Prints help for targets with comments
|
||||||
|
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||||
|
|
||||||
|
##@ Docker
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build: ## Build the backend. ee=true for ee build.
|
||||||
|
@DOCKER_BUILD_ARGS=$(docker_build_args) DOCKER_REPO=$(docker_repo) ARCH=$(arch) DOCKER_RUNTIME=$(docker_runtime) bash build.sh $(ee)
|
||||||
|
|
||||||
|
##@ Local Dev
|
||||||
|
|
||||||
|
.PHONY: scan
|
||||||
|
scan: ## Scan the backend
|
||||||
|
@echo scanning foss
|
||||||
|
@trivy fs -q .
|
||||||
|
@echo scanning ee
|
||||||
|
@trivy fs -q ../ee/assist-server/
|
||||||
61
assist-server/build.sh
Normal file
61
assist-server/build.sh
Normal file
|
|
@ -0,0 +1,61 @@
|
||||||
|
#!/bin/bash
|
||||||
|
|
||||||
|
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||||
|
|
||||||
|
ARCH=${ARCH:-amd64}
|
||||||
|
git_sha=$(git rev-parse --short HEAD)
|
||||||
|
image_tag=${IMAGE_TAG:-git_sha}
|
||||||
|
check_prereq() {
|
||||||
|
which docker || {
|
||||||
|
echo "Docker not installed, please install docker."
|
||||||
|
exit 1
|
||||||
|
}
|
||||||
|
}
|
||||||
|
source ../scripts/lib/_docker.sh
|
||||||
|
|
||||||
|
[[ $PATCH -eq 1 ]] && {
|
||||||
|
image_tag="$(grep -ER ^.ppVersion ../scripts/helmcharts/openreplay/charts/$chart | xargs | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||||
|
image_tag="${image_tag}-ee"
|
||||||
|
}
|
||||||
|
update_helm_release() {
|
||||||
|
chart=$1
|
||||||
|
HELM_TAG="$(grep -iER ^version ../scripts/helmcharts/openreplay/charts/$chart | awk '{print $2}' | awk -F. -v OFS=. '{$NF += 1 ; print}')"
|
||||||
|
# Update the chart version
|
||||||
|
sed -i "s#^version.*#version: $HELM_TAG# g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||||
|
# Update image tags
|
||||||
|
sed -i "s#ppVersion.*#ppVersion: \"$image_tag\"#g" ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||||
|
# Commit the changes
|
||||||
|
git add ../scripts/helmcharts/openreplay/charts/$chart/Chart.yaml
|
||||||
|
git commit -m "chore(helm): Updating $chart image release"
|
||||||
|
}
|
||||||
|
|
||||||
|
function build_api() {
|
||||||
|
destination="_assist-server_ee"
|
||||||
|
[[ -d ../${destination} ]] && {
|
||||||
|
echo "Removing previous build cache"
|
||||||
|
rm -rf ../${destination}
|
||||||
|
}
|
||||||
|
cp -R ../assist-server ../${destination}
|
||||||
|
cd ../${destination} || exit 1
|
||||||
|
cp -rf ../ee/assist-server/* ./
|
||||||
|
|
||||||
|
docker build -f ./Dockerfile --build-arg GIT_SHA=$git_sha -t ${DOCKER_REPO:-'local'}/assist-server:${image_tag} .
|
||||||
|
|
||||||
|
cd ../assist-server || exit 1
|
||||||
|
rm -rf ../${destination}
|
||||||
|
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||||
|
docker push ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
|
||||||
|
docker tag ${DOCKER_REPO:-'local'}/assist-server:${image_tag} ${DOCKER_REPO:-'local'}/assist-server:latest
|
||||||
|
docker push ${DOCKER_REPO:-'local'}/assist-server:latest
|
||||||
|
}
|
||||||
|
[[ $SIGN_IMAGE -eq 1 ]] && {
|
||||||
|
cosign sign --key $SIGN_KEY ${DOCKER_REPO:-'local'}/assist-server:${image_tag}
|
||||||
|
}
|
||||||
|
echo "build completed for assist-server"
|
||||||
|
}
|
||||||
|
|
||||||
|
check_prereq
|
||||||
|
build_api $1
|
||||||
|
if [[ $PATCH -eq 1 ]]; then
|
||||||
|
update_helm_release assist-server
|
||||||
|
fi
|
||||||
33
backend/Makefile
Normal file
33
backend/Makefile
Normal file
|
|
@ -0,0 +1,33 @@
|
||||||
|
distro ?= foss # ee to build ee
|
||||||
|
app ?= "" # app name, default all
|
||||||
|
arch ?= "amd64" # default amd64
|
||||||
|
docker_repo ?= "public.ecr.aws/p1t3u8a3"
|
||||||
|
docker_runtime ?= "docker" # default docker runtime
|
||||||
|
image_tag ?= "" # image tag to build. Default is git sha short
|
||||||
|
docker_build_args ?= $(if $(filter depot,$(docker_runtime)),"--push","")
|
||||||
|
|
||||||
|
.PHONY: help
|
||||||
|
help: ## Prints help for targets with comments
|
||||||
|
@awk 'BEGIN {FS = ":.*##"; printf "\nUsage:\n make \033[36m<target>\033[0m\n"} /^[a-zA-Z_0-9-]+:.*?##/ { printf " \033[36m%-25s\033[0m %s\n", $$1, $$2 } /^##@/ { printf "\n\033[1m%s\033[0m\n", substr($$0, 5) } ' $(MAKEFILE_LIST)
|
||||||
|
|
||||||
|
##@ Docker
|
||||||
|
|
||||||
|
.PHONY: build
|
||||||
|
build: ## Build the backend. ee=true for ee build. app=app name for only one app. Default build all apps.
|
||||||
|
IMAGE_TAG=$(image_tag) DOCKER_BUILD_ARGS=$(docker_build_args) DOCKER_REPO=$(docker_repo) ARCH=$(arch) DOCKER_RUNTIME=$(docker_runtime) bash build.sh $(distro) $(app)
|
||||||
|
|
||||||
|
##@ Local Dev
|
||||||
|
|
||||||
|
.PHONY: scan
|
||||||
|
scan: ## Scan the backend
|
||||||
|
@trivy fs -q .
|
||||||
|
|
||||||
|
.PHONY: update
|
||||||
|
update: ## Update the backend dependecies
|
||||||
|
@echo Updating dependencies
|
||||||
|
@go get -u -v ./...
|
||||||
|
@go mod tidy
|
||||||
|
|
||||||
|
run: ## Run the backend. app=app name for app to run
|
||||||
|
@if [ $(app) == "" ]; then echo "Error: app parameter is required. Usage: make run app=<app_name>"; exit 1; fi
|
||||||
|
@go run "cmd/$(app)/main.go"
|
||||||
|
|
@ -2,44 +2,71 @@ package main
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
|
"os"
|
||||||
|
"os/signal"
|
||||||
|
"syscall"
|
||||||
|
|
||||||
analyticsConfig "openreplay/backend/internal/config/analytics"
|
|
||||||
"openreplay/backend/pkg/analytics"
|
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/metrics"
|
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"openreplay/backend/pkg/metrics/web"
|
|
||||||
"openreplay/backend/pkg/server"
|
|
||||||
"openreplay/backend/pkg/server/api"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
func main() {
|
func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := analyticsConfig.New(log)
|
log.Info(ctx, "Cacher service started")
|
||||||
// Observability
|
|
||||||
webMetrics := web.New("analytics")
|
|
||||||
dbMetrics := database.New("analytics")
|
|
||||||
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
|
sigchan := make(chan os.Signal, 1)
|
||||||
if err != nil {
|
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case sig := <-sigchan:
|
||||||
|
log.Error(ctx, "Caught signal %v: terminating", sig)
|
||||||
|
os.Exit(0)
|
||||||
|
}
|
||||||
}
|
}
|
||||||
defer pgConn.Close()
|
|
||||||
|
|
||||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(ctx, "can't init services: %s", err)
|
|
||||||
}
|
|
||||||
|
|
||||||
router, err := api.NewRouter(&cfg.HTTP, log)
|
|
||||||
if err != nil {
|
|
||||||
log.Fatal(ctx, "failed while creating router: %s", err)
|
|
||||||
}
|
|
||||||
router.AddHandlers(api.NoPrefix, builder.CardsAPI, builder.DashboardsAPI, builder.ChartsAPI)
|
|
||||||
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
|
|
||||||
|
|
||||||
server.Run(ctx, log, &cfg.HTTP, router)
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
//
|
||||||
|
//import (
|
||||||
|
// "context"
|
||||||
|
//
|
||||||
|
// analyticsConfig "openreplay/backend/internal/config/analytics"
|
||||||
|
// "openreplay/backend/pkg/analytics"
|
||||||
|
// "openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
// "openreplay/backend/pkg/logger"
|
||||||
|
// "openreplay/backend/pkg/metrics"
|
||||||
|
// "openreplay/backend/pkg/metrics/database"
|
||||||
|
// "openreplay/backend/pkg/metrics/web"
|
||||||
|
// "openreplay/backend/pkg/server"
|
||||||
|
// "openreplay/backend/pkg/server/api"
|
||||||
|
//)
|
||||||
|
//
|
||||||
|
//func main() {
|
||||||
|
// ctx := context.Background()
|
||||||
|
// log := logger.New()
|
||||||
|
// cfg := analyticsConfig.New(log)
|
||||||
|
// // Observability
|
||||||
|
// webMetrics := web.New("analytics")
|
||||||
|
// dbMetrics := database.New("analytics")
|
||||||
|
// metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
|
||||||
|
//
|
||||||
|
// pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
|
// }
|
||||||
|
// defer pgConn.Close()
|
||||||
|
//
|
||||||
|
// builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(ctx, "can't init services: %s", err)
|
||||||
|
// }
|
||||||
|
//
|
||||||
|
// router, err := api.NewRouter(&cfg.HTTP, log)
|
||||||
|
// if err != nil {
|
||||||
|
// log.Fatal(ctx, "failed while creating router: %s", err)
|
||||||
|
// }
|
||||||
|
// router.AddHandlers(api.NoPrefix, builder.CardsAPI, builder.DashboardsAPI, builder.ChartsAPI)
|
||||||
|
// router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
|
||||||
|
//
|
||||||
|
// server.Run(ctx, log, &cfg.HTTP, router)
|
||||||
|
//}
|
||||||
|
|
|
||||||
|
|
@ -31,6 +31,7 @@ require (
|
||||||
github.com/oschwald/maxminddb-golang v1.13.1
|
github.com/oschwald/maxminddb-golang v1.13.1
|
||||||
github.com/pkg/errors v0.9.1
|
github.com/pkg/errors v0.9.1
|
||||||
github.com/prometheus/client_golang v1.20.5
|
github.com/prometheus/client_golang v1.20.5
|
||||||
|
github.com/redis/go-redis/v9 v9.8.0
|
||||||
github.com/rs/xid v1.6.0
|
github.com/rs/xid v1.6.0
|
||||||
github.com/sethvargo/go-envconfig v1.1.0
|
github.com/sethvargo/go-envconfig v1.1.0
|
||||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||||
|
|
@ -45,6 +46,7 @@ require (
|
||||||
github.com/DataDog/zstd v1.5.6 // indirect
|
github.com/DataDog/zstd v1.5.6 // indirect
|
||||||
github.com/beorn7/perks v1.0.1 // indirect
|
github.com/beorn7/perks v1.0.1 // indirect
|
||||||
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
github.com/cespare/xxhash/v2 v2.3.0 // indirect
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f // indirect
|
||||||
github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect
|
github.com/elastic/elastic-transport-go/v8 v8.6.0 // indirect
|
||||||
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
github.com/gabriel-vasile/mimetype v1.4.8 // indirect
|
||||||
github.com/go-faster/city v1.0.1 // indirect
|
github.com/go-faster/city v1.0.1 // indirect
|
||||||
|
|
|
||||||
|
|
@ -19,12 +19,8 @@ github.com/Azure/go-ansiterm v0.0.0-20210617225240-d185dfc1b5a1/go.mod h1:xomTg6
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2 h1:kYRSnvJju5gYVyhkij+RTJ/VR6QIUaCfWeaFm2ycsjQ=
|
||||||
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
github.com/AzureAD/microsoft-authentication-library-for-go v1.3.2/go.mod h1:wP83P5OoQ5p6ip3ScPr0BAq0BvuPAvacpEuSzyouqAI=
|
||||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||||
github.com/ClickHouse/ch-go v0.63.1 h1:s2JyZvWLTCSAGdtjMBBmAgQQHMco6pawLJMOXi0FODM=
|
|
||||||
github.com/ClickHouse/ch-go v0.63.1/go.mod h1:I1kJJCL3WJcBMGe1m+HVK0+nREaG+JOYYBWjrDrF3R0=
|
|
||||||
github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY=
|
github.com/ClickHouse/ch-go v0.65.0 h1:vZAXfTQliuNNefqkPDewX3kgRxN6Q4vUENnnY+ynTRY=
|
||||||
github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k=
|
github.com/ClickHouse/ch-go v0.65.0/go.mod h1:tCM0XEH5oWngoi9Iu/8+tjPBo04I/FxNIffpdjtwx3k=
|
||||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.1 h1:Dy0n0l+cMbPXs8hFkeeWGaPKrB+MDByUNQBSmRO3W6k=
|
|
||||||
github.com/ClickHouse/clickhouse-go/v2 v2.30.1/go.mod h1:szk8BMoQV/NgHXZ20ZbwDyvPWmpfhRKjFkc6wzASGxM=
|
|
||||||
github.com/ClickHouse/clickhouse-go/v2 v2.32.1 h1:RLhkxA6iH/bLTXeDtEj/u4yUx9Q03Y95P+cjHScQK78=
|
github.com/ClickHouse/clickhouse-go/v2 v2.32.1 h1:RLhkxA6iH/bLTXeDtEj/u4yUx9Q03Y95P+cjHScQK78=
|
||||||
github.com/ClickHouse/clickhouse-go/v2 v2.32.1/go.mod h1:YtaiIFlHCGNPbOpAvFGYobtcVnmgYvD/WmzitixxWYc=
|
github.com/ClickHouse/clickhouse-go/v2 v2.32.1/go.mod h1:YtaiIFlHCGNPbOpAvFGYobtcVnmgYvD/WmzitixxWYc=
|
||||||
github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M=
|
github.com/DataDog/datadog-api-client-go/v2 v2.34.0 h1:0VVmv8uZg8vdBuEpiF2nBGUezl2QITrxdEsLgh38j8M=
|
||||||
|
|
@ -75,6 +71,10 @@ github.com/aws/smithy-go v1.20.2 h1:tbp628ireGtzcHDDmLT/6ADHidqnwgF57XOXZe6tp4Q=
|
||||||
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
github.com/aws/smithy-go v1.20.2/go.mod h1:krry+ya/rV9RDcV/Q16kpu6ypI4K2czasz0NC3qS14E=
|
||||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||||
|
github.com/bsm/ginkgo/v2 v2.12.0 h1:Ny8MWAHyOepLGlLKYmXG4IEkioBysk6GpaRTLC8zwWs=
|
||||||
|
github.com/bsm/ginkgo/v2 v2.12.0/go.mod h1:SwYbGRRDovPVboqFv0tPTcG1sN61LM1Z4ARdbAV9g4c=
|
||||||
|
github.com/bsm/gomega v1.27.10 h1:yeMWxP2pV2fG3FgAODIY8EiRE3dy0aeFYt4l7wh6yKA=
|
||||||
|
github.com/bsm/gomega v1.27.10/go.mod h1:JyEr/xRbxbtgWNi8tIEVPUYZ5Dzef52k01W3YH0H+O0=
|
||||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||||
|
|
@ -122,6 +122,8 @@ github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2
|
||||||
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
|
||||||
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
github.com/davecgh/go-spew v1.1.1/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f h1:lO4WD4F/rVNCu3HqELle0jiPLLBs70cWOduZpkS1E78=
|
||||||
|
github.com/dgryski/go-rendezvous v0.0.0-20200823014737-9f7001d12a5f/go.mod h1:cuUVRXasLTGF7a8hSLbxyZXjz+1KgoB3wDUb6vlszIc=
|
||||||
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
github.com/distribution/reference v0.6.0 h1:0IXCQ5g4/QMHHkarYzh5l+u8T3t73zM5QvfrDyIgxBk=
|
||||||
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
github.com/distribution/reference v0.6.0/go.mod h1:BbU0aIcezP1/5jX/8MP0YiH4SdvB5Y4f/wlDRiLyi3E=
|
||||||
github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw=
|
github.com/docker/buildx v0.15.1 h1:1cO6JIc0rOoC8tlxfXoh1HH1uxaNvYH1q7J7kv5enhw=
|
||||||
|
|
@ -132,8 +134,8 @@ github.com/docker/compose/v2 v2.28.1 h1:ORPfiVHrpnRQBDoC3F8JJyWAY8N5gWuo3Fgwyivx
|
||||||
github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA=
|
github.com/docker/compose/v2 v2.28.1/go.mod h1:wDtGQFHe99sPLCHXeVbCkc+Wsl4Y/2ZxiAJa/nga6rA=
|
||||||
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
github.com/docker/distribution v2.8.3+incompatible h1:AtKxIZ36LoNK51+Z6RpzLpddBirtxJnzDrHLEKxTAYk=
|
||||||
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
github.com/docker/distribution v2.8.3+incompatible/go.mod h1:J2gT2udsDAN96Uj4KfcMRqY0/ypR+oyYUYmja8H+y+w=
|
||||||
github.com/docker/docker v27.4.1+incompatible h1:ZJvcY7gfwHn1JF48PfbyXg7Jyt9ZCWDW+GGXOIxEwp4=
|
github.com/docker/docker v27.5.1+incompatible h1:4PYU5dnBYqRQi0294d1FBECqT9ECWeQAIfE8q4YnPY8=
|
||||||
github.com/docker/docker v27.4.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
github.com/docker/docker v27.5.1+incompatible/go.mod h1:eEKB0N0r5NX/I1kEveEz05bcu8tLC/8azJZsviup8Sk=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
github.com/docker/docker-credential-helpers v0.8.0 h1:YQFtbBQb4VrpoPxhFuzEBPQ9E16qz5SpHLS+uswaCp8=
|
||||||
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
github.com/docker/docker-credential-helpers v0.8.0/go.mod h1:UGFXcuoQ5TxPiB54nHOZ32AWRqQdECoh/Mg0AlEYb40=
|
||||||
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
github.com/docker/go v1.5.1-1.0.20160303222718-d30aec9fd63c h1:lzqkGL9b3znc+ZUgi7FlLnqjQhcXxkNM/quxIjBVMD0=
|
||||||
|
|
@ -449,6 +451,8 @@ github.com/prometheus/procfs v0.15.1 h1:YagwOFzUgYfKKHX6Dr+sHT7km/hxC76UB0leargg
|
||||||
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
github.com/prometheus/procfs v0.15.1/go.mod h1:fB45yRUv8NstnjriLhBQLuOUt+WW4BsoGhij/e3PBqk=
|
||||||
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o=
|
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc h1:zAsgcP8MhzAbhMnB1QQ2O7ZhWYVGYSR2iVcjzQuPV+o=
|
||||||
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8=
|
github.com/r3labs/sse v0.0.0-20210224172625-26fe804710bc/go.mod h1:S8xSOnV3CgpNrWd0GQ/OoQfMtlg2uPRSuTzcSGrzwK8=
|
||||||
|
github.com/redis/go-redis/v9 v9.8.0 h1:q3nRvjrlge/6UD7eTu/DSg2uYiU2mCL0G/uzBWqhicI=
|
||||||
|
github.com/redis/go-redis/v9 v9.8.0/go.mod h1:huWgSWd8mW6+m0VPhJjSSQ+d6Nh1VICQ6Q5lHuCH/Iw=
|
||||||
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
github.com/rivo/uniseg v0.2.0 h1:S1pD9weZBuJdFmowNwbpi7BJ8TNftyUImj/0WQi72jY=
|
||||||
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
github.com/rivo/uniseg v0.2.0/go.mod h1:J6wj4VEh+S6ZtnVlnTBMWIodfgj8LQOQFoIToxlJtxc=
|
||||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||||
|
|
@ -571,8 +575,8 @@ go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0 h1:digkE
|
||||||
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
go.opentelemetry.io/otel/exporters/otlp/otlptrace/otlptracehttp v1.21.0/go.mod h1:/OpE/y70qVkndM0TrxT4KBoN3RsFZP0QaofcfYrj76I=
|
||||||
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
go.opentelemetry.io/otel/metric v1.34.0 h1:+eTR3U0MyfWjRDhmFMxe2SsW64QrZ84AOhvqS7Y+PoQ=
|
||||||
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
go.opentelemetry.io/otel/metric v1.34.0/go.mod h1:CEDrp0fy2D0MvkXE+dPV7cMi8tWZwX3dmaIhwPOaqHE=
|
||||||
go.opentelemetry.io/otel/sdk v1.24.0 h1:YMPPDNymmQN3ZgczicBY3B6sf9n62Dlj9pWD3ucgoDw=
|
go.opentelemetry.io/otel/sdk v1.34.0 h1:95zS4k/2GOy069d321O8jWgYsW3MzVV+KuSPKp7Wr1A=
|
||||||
go.opentelemetry.io/otel/sdk v1.24.0/go.mod h1:KVrIYw6tEubO9E96HQpcmpTKDVn9gdv35HoYiQWGDFg=
|
go.opentelemetry.io/otel/sdk v1.34.0/go.mod h1:0e/pNiaMAqaykJGKbi+tSjWfNNHMTxoC9qANsCzbyxU=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
|
go.opentelemetry.io/otel/sdk/metric v1.21.0 h1:smhI5oD714d6jHE6Tie36fPx4WDFIg+Y6RfAY4ICcR0=
|
||||||
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
|
go.opentelemetry.io/otel/sdk/metric v1.21.0/go.mod h1:FJ8RAsoPGv/wYMgBdUJXOm+6pzFY3YdljnXtv1SBE8Q=
|
||||||
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
go.opentelemetry.io/otel/trace v1.34.0 h1:+ouXS2V8Rd4hp4580a8q23bg0azF2nI8cqLYnC8mh/k=
|
||||||
|
|
@ -613,8 +617,6 @@ golang.org/x/crypto v0.0.0-20210921155107-089bfa567519/go.mod h1:GvvjBRRGRdwPK5y
|
||||||
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
golang.org/x/crypto v0.0.0-20220622213112-05595931fe9d/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
|
||||||
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
golang.org/x/crypto v0.19.0/go.mod h1:Iy9bg/ha4yyC70EfRS8jz+B6ybOBKMaSxLj6P6oBDfU=
|
||||||
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
golang.org/x/crypto v0.20.0/go.mod h1:Xwo95rrVNIoSMx9wa1JroENMToLWn3RNVrTBpLHgZPQ=
|
||||||
golang.org/x/crypto v0.32.0 h1:euUpcYgM8WcP71gNpTqQCn6rC2t6ULUPiOzfWaXVVfc=
|
|
||||||
golang.org/x/crypto v0.32.0/go.mod h1:ZnnJkOaASj8g0AjIduWNlq2NRxL0PlBrbKVyZ6V/Ugc=
|
|
||||||
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
golang.org/x/crypto v0.33.0 h1:IOBPskki6Lysi0lo9qQvbxiQ+FvsCC/YWOecCHAixus=
|
||||||
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
golang.org/x/crypto v0.33.0/go.mod h1:bVdXmD7IV/4GdElGPozy6U7lWdRXA4qyRVGJV57uQ5M=
|
||||||
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
golang.org/x/exp v0.0.0-20240112132812-db7319d0e0e3 h1:hNQpMuAJe5CtcUqCXaWga3FHu+kQvCqcsoVaQgSV60o=
|
||||||
|
|
@ -639,8 +641,6 @@ golang.org/x/net v0.0.0-20220722155237-a158d28d115b/go.mod h1:XRhObCWvk6IyKnWLug
|
||||||
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
golang.org/x/net v0.6.0/go.mod h1:2Tu9+aMcznHK/AK1HMvgo6xiTLG5rD5rZLDS+rp2Bjs=
|
||||||
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
golang.org/x/net v0.10.0/go.mod h1:0qNGK6F8kojg2nk9dLZ2mShWaEBan6FAoqfSigmmuDg=
|
||||||
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
golang.org/x/net v0.21.0/go.mod h1:bIjVDfnllIU7BJ2DNgfnXvpSvtn8VRwhlsaeUTyUS44=
|
||||||
golang.org/x/net v0.34.0 h1:Mb7Mrk043xzHgnRM88suvJFwzVrRfHEHJEl5/71CKw0=
|
|
||||||
golang.org/x/net v0.34.0/go.mod h1:di0qlW3YNM5oh6GqDGQr92MyTozJPmybPK4Ev/Gm31k=
|
|
||||||
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
golang.org/x/net v0.35.0 h1:T5GQRQb2y08kTAByq9L4/bz8cipCdA8FbRTXewonqY8=
|
||||||
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
golang.org/x/net v0.35.0/go.mod h1:EglIi67kWsHKlRzzVMUD93VMSWGFOMSZgxFjparz1Qk=
|
||||||
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
golang.org/x/oauth2 v0.25.0 h1:CY4y7XT9v0cRI9oupztF8AgiIu99L/ksR/Xp/6jrZ70=
|
||||||
|
|
@ -652,8 +652,8 @@ golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJ
|
||||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.0.0-20220722155255-886fb9371eb4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
golang.org/x/sync v0.1.0/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
|
||||||
golang.org/x/sync v0.10.0 h1:3NQrjDixjgGwUOCaF8w2+VYHv0Ve/vGYSbdkTa98gmQ=
|
golang.org/x/sync v0.11.0 h1:GGz8+XQP4FvTTrjZPzNKTMFtSXH80RAzG+5ghFPgK9w=
|
||||||
golang.org/x/sync v0.10.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
golang.org/x/sync v0.11.0/go.mod h1:Czt+wKu1gCyEFDUtn0jG5QVvpJ6rzVqr5aXyt9drQfk=
|
||||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||||
|
|
@ -675,8 +675,6 @@ golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBc
|
||||||
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.5.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
golang.org/x/sys v0.8.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||||
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.17.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/sys v0.29.0 h1:TPYlXGxvx1MGTn2GiZDhnjPA9wZzZeGKHHmKhHYvgaU=
|
|
||||||
golang.org/x/sys v0.29.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
|
||||||
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
golang.org/x/sys v0.30.0 h1:QjkSwP/36a20jFYWkSue1YwXzLmsV5Gfq7Eiy72C1uc=
|
||||||
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
golang.org/x/sys v0.30.0/go.mod h1:/VUhepiaJMQUp4+oa/7Zr1D23ma6VTLIYjOOTFZPUcA=
|
||||||
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
golang.org/x/term v0.0.0-20201117132131-f5c789dd3221/go.mod h1:Nr5EML6q2oocZ2LXRh80K7BxOlk5/8JxuGnuhpl+muw=
|
||||||
|
|
@ -685,8 +683,8 @@ golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuX
|
||||||
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
golang.org/x/term v0.5.0/go.mod h1:jMB1sMXY+tzblOD4FWmEbocvup2/aLOaQEp7JmGp78k=
|
||||||
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
golang.org/x/term v0.8.0/go.mod h1:xPskH00ivmX89bAKVGSKKtLOWNx2+17Eiy94tnKShWo=
|
||||||
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
golang.org/x/term v0.17.0/go.mod h1:lLRBjIVuehSbZlaOtGMbcMncT+aqLLLmKrsjNrUguwk=
|
||||||
golang.org/x/term v0.28.0 h1:/Ts8HFuMR2E6IP/jlo7QVLZHggjKQbhu/7H0LJFr3Gg=
|
golang.org/x/term v0.29.0 h1:L6pJp37ocefwRRtYPKSWOWzOtWSxVajvz2ldH/xi3iU=
|
||||||
golang.org/x/term v0.28.0/go.mod h1:Sw/lC2IAUZ92udQNf3WodGtn4k/XoLyZoh8v/8uiwek=
|
golang.org/x/term v0.29.0/go.mod h1:6bl4lRlvVuDgSf3179VpIxBF0o10JUpXWOnI7nErv7s=
|
||||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||||
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
|
||||||
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||||
|
|
@ -696,8 +694,6 @@ golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||||
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
golang.org/x/text v0.9.0/go.mod h1:e1OnstbJyHTd6l/uOt8jFFHp6TRDWZR/bV3emEE/zU8=
|
||||||
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
golang.org/x/text v0.14.0/go.mod h1:18ZOQIKpY8NJVqYksKHtTdi31H5itFRjB5/qKTNYzSU=
|
||||||
golang.org/x/text v0.21.0 h1:zyQAAkrwaneQ066sspRyJaG9VNi/YJ1NfzcGB3hZ/qo=
|
|
||||||
golang.org/x/text v0.21.0/go.mod h1:4IBbMaMmOPCJ8SecivzSH54+73PCFmPWxNTLm+vZkEQ=
|
|
||||||
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
golang.org/x/text v0.22.0 h1:bofq7m3/HAFvbF51jz3Q9wLg3jkvSPuiZu/pD1XwgtM=
|
||||||
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
golang.org/x/text v0.22.0/go.mod h1:YRoo4H8PVmsu+E3Ou7cqLVH8oXWIHVoX0jqUWALQhfY=
|
||||||
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
golang.org/x/time v0.6.0 h1:eTDhh4ZXt5Qf0augr54TN6suAUudPcawVZeIAPU7D4U=
|
||||||
|
|
|
||||||
|
|
@ -2,12 +2,11 @@ package datasaver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"openreplay/backend/pkg/db/types"
|
|
||||||
|
|
||||||
"openreplay/backend/internal/config/db"
|
"openreplay/backend/internal/config/db"
|
||||||
"openreplay/backend/pkg/db/clickhouse"
|
"openreplay/backend/pkg/db/clickhouse"
|
||||||
"openreplay/backend/pkg/db/postgres"
|
"openreplay/backend/pkg/db/postgres"
|
||||||
|
"openreplay/backend/pkg/db/types"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
. "openreplay/backend/pkg/messages"
|
. "openreplay/backend/pkg/messages"
|
||||||
queue "openreplay/backend/pkg/queue/types"
|
queue "openreplay/backend/pkg/queue/types"
|
||||||
|
|
@ -51,6 +50,10 @@ func New(log logger.Logger, cfg *db.Config, pg *postgres.Conn, ch clickhouse.Con
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *saverImpl) Handle(msg Message) {
|
func (s *saverImpl) Handle(msg Message) {
|
||||||
|
if msg.TypeID() == MsgCustomEvent {
|
||||||
|
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||||
session *sessions.Session
|
session *sessions.Session
|
||||||
|
|
@ -66,23 +69,6 @@ func (s *saverImpl) Handle(msg Message) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg.TypeID() == MsgCustomEvent {
|
|
||||||
m := msg.(*CustomEvent)
|
|
||||||
// Try to parse custom event payload to JSON and extract or_payload field
|
|
||||||
type CustomEventPayload struct {
|
|
||||||
CustomTimestamp uint64 `json:"or_timestamp"`
|
|
||||||
}
|
|
||||||
customPayload := &CustomEventPayload{}
|
|
||||||
if err := json.Unmarshal([]byte(m.Payload), customPayload); err == nil {
|
|
||||||
if customPayload.CustomTimestamp >= session.Timestamp {
|
|
||||||
s.log.Info(sessCtx, "custom event timestamp received: %v", m.Timestamp)
|
|
||||||
msg.Meta().Timestamp = customPayload.CustomTimestamp
|
|
||||||
s.log.Info(sessCtx, "custom event timestamp updated: %v", m.Timestamp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer s.Handle(types.WrapCustomEvent(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsMobileType(msg.TypeID()) {
|
if IsMobileType(msg.TypeID()) {
|
||||||
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
|
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
|
||||||
if !postgres.IsPkeyViolation(err) {
|
if !postgres.IsPkeyViolation(err) {
|
||||||
|
|
|
||||||
|
|
@ -108,15 +108,15 @@ func (c *connectorImpl) newBatch(name, query string) error {
|
||||||
var batches = map[string]string{
|
var batches = map[string]string{
|
||||||
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)",
|
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)",
|
||||||
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
|
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
|
||||||
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$os", "$browser", "$referrer", "$country", "$state", "$city", "$current_url", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
|
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
|
||||||
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
|
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
|
||||||
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
|
|
@ -267,6 +267,13 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
nullableUint16(uint16(msg.InputDuration)),
|
nullableUint16(uint16(msg.InputDuration)),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -307,6 +314,13 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
"mouse_thrashing",
|
"mouse_thrashing",
|
||||||
issueID,
|
issueID,
|
||||||
jsonString,
|
jsonString,
|
||||||
|
|
@ -363,6 +377,13 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
msg.Type,
|
msg.Type,
|
||||||
issueID,
|
issueID,
|
||||||
jsonString,
|
jsonString,
|
||||||
|
|
@ -452,6 +473,12 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
cropString(msg.URL),
|
cropString(msg.URL),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -512,6 +539,12 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
cropString(msg.Url),
|
cropString(msg.Url),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -551,6 +584,13 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
msgID,
|
msgID,
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -601,6 +641,13 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("performance", err)
|
c.checkError("performance", err)
|
||||||
|
|
@ -652,6 +699,13 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.URL),
|
||||||
nullableUint16(uint16(msg.Duration)),
|
nullableUint16(uint16(msg.Duration)),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -683,6 +737,13 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("custom", err)
|
c.checkError("custom", err)
|
||||||
|
|
@ -714,6 +775,13 @@ func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.G
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
session.Platform,
|
||||||
session.UserOSVersion,
|
session.UserOSVersion,
|
||||||
|
session.UserOS,
|
||||||
|
session.UserBrowser,
|
||||||
|
session.Referrer,
|
||||||
|
session.UserCountry,
|
||||||
|
session.UserState,
|
||||||
|
session.UserCity,
|
||||||
|
cropString(msg.Url),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("graphql", err)
|
c.checkError("graphql", err)
|
||||||
|
|
|
||||||
|
|
@ -84,7 +84,10 @@ func (p *poolImpl) Begin() (*Tx, error) {
|
||||||
tx, err := p.conn.Begin(context.Background())
|
tx, err := p.conn.Begin(context.Background())
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||||
p.metrics.IncreaseTotalRequests("begin", "")
|
p.metrics.IncreaseTotalRequests("begin", "")
|
||||||
return &Tx{tx, p.metrics}, err
|
return &Tx{
|
||||||
|
origTx: tx,
|
||||||
|
metrics: p.metrics,
|
||||||
|
}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *poolImpl) Close() {
|
func (p *poolImpl) Close() {
|
||||||
|
|
@ -94,13 +97,13 @@ func (p *poolImpl) Close() {
|
||||||
// TX - start
|
// TX - start
|
||||||
|
|
||||||
type Tx struct {
|
type Tx struct {
|
||||||
pgx.Tx
|
origTx pgx.Tx
|
||||||
metrics database.Database
|
metrics database.Database
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
_, err := tx.Exec(context.Background(), sql, args...)
|
_, err := tx.origTx.Exec(context.Background(), sql, args...)
|
||||||
method, table := methodName(sql)
|
method, table := methodName(sql)
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
tx.metrics.IncreaseTotalRequests(method, table)
|
tx.metrics.IncreaseTotalRequests(method, table)
|
||||||
|
|
@ -109,7 +112,7 @@ func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
||||||
|
|
||||||
func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
|
func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
res := tx.QueryRow(context.Background(), sql, args...)
|
res := tx.origTx.QueryRow(context.Background(), sql, args...)
|
||||||
method, table := methodName(sql)
|
method, table := methodName(sql)
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
tx.metrics.IncreaseTotalRequests(method, table)
|
tx.metrics.IncreaseTotalRequests(method, table)
|
||||||
|
|
@ -118,7 +121,7 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
|
||||||
|
|
||||||
func (tx *Tx) TxRollback() error {
|
func (tx *Tx) TxRollback() error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := tx.Rollback(context.Background())
|
err := tx.origTx.Rollback(context.Background())
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||||
tx.metrics.IncreaseTotalRequests("rollback", "")
|
tx.metrics.IncreaseTotalRequests("rollback", "")
|
||||||
return err
|
return err
|
||||||
|
|
@ -126,7 +129,7 @@ func (tx *Tx) TxRollback() error {
|
||||||
|
|
||||||
func (tx *Tx) TxCommit() error {
|
func (tx *Tx) TxCommit() error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := tx.Commit(context.Background())
|
err := tx.origTx.Commit(context.Background())
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||||
tx.metrics.IncreaseTotalRequests("commit", "")
|
tx.metrics.IncreaseTotalRequests("commit", "")
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,9 @@ package redis
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"github.com/go-redis/redis"
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
|
||||||
config "openreplay/backend/internal/config/redis"
|
config "openreplay/backend/internal/config/redis"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,10 +5,11 @@ import (
|
||||||
"encoding/hex"
|
"encoding/hex"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"github.com/google/uuid"
|
|
||||||
"hash/fnv"
|
"hash/fnv"
|
||||||
"strconv"
|
"strconv"
|
||||||
|
|
||||||
|
"github.com/google/uuid"
|
||||||
|
|
||||||
. "openreplay/backend/pkg/messages"
|
. "openreplay/backend/pkg/messages"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -23,41 +24,7 @@ type ErrorEvent struct {
|
||||||
Payload string
|
Payload string
|
||||||
Tags map[string]*string
|
Tags map[string]*string
|
||||||
OriginType int
|
OriginType int
|
||||||
}
|
Url string
|
||||||
|
|
||||||
func unquote(s string) string {
|
|
||||||
if s[0] == '"' {
|
|
||||||
return s[1 : len(s)-1]
|
|
||||||
}
|
|
||||||
return s
|
|
||||||
}
|
|
||||||
func parseTags(tagsJSON string) (tags map[string]*string, err error) {
|
|
||||||
if len(tagsJSON) == 0 {
|
|
||||||
return nil, fmt.Errorf("empty tags")
|
|
||||||
}
|
|
||||||
if tagsJSON[0] == '[' {
|
|
||||||
var tagsArr []json.RawMessage
|
|
||||||
if err = json.Unmarshal([]byte(tagsJSON), &tagsArr); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = make(map[string]*string)
|
|
||||||
for _, keyBts := range tagsArr {
|
|
||||||
tags[unquote(string(keyBts))] = nil
|
|
||||||
}
|
|
||||||
} else if tagsJSON[0] == '{' {
|
|
||||||
var tagsObj map[string]json.RawMessage
|
|
||||||
if err = json.Unmarshal([]byte(tagsJSON), &tagsObj); err != nil {
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
tags = make(map[string]*string)
|
|
||||||
for key, valBts := range tagsObj {
|
|
||||||
val := unquote(string(valBts))
|
|
||||||
tags[key] = &val
|
|
||||||
}
|
|
||||||
}
|
|
||||||
return
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
||||||
|
|
@ -69,6 +36,7 @@ func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
||||||
Message: m.Message,
|
Message: m.Message,
|
||||||
Payload: m.Payload,
|
Payload: m.Payload,
|
||||||
OriginType: m.TypeID(),
|
OriginType: m.TypeID(),
|
||||||
|
Url: m.Url,
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -81,6 +49,7 @@ func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
|
||||||
Message: m.Message,
|
Message: m.Message,
|
||||||
Payload: m.Payload,
|
Payload: m.Payload,
|
||||||
OriginType: m.TypeID(),
|
OriginType: m.TypeID(),
|
||||||
|
Url: m.Url,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -26,7 +26,11 @@ func New() Logger {
|
||||||
encoderConfig := zap.NewProductionEncoderConfig()
|
encoderConfig := zap.NewProductionEncoderConfig()
|
||||||
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000")
|
encoderConfig.EncodeTime = zapcore.TimeEncoderOfLayout("2006-01-02 15:04:05.000")
|
||||||
jsonEncoder := zapcore.NewJSONEncoder(encoderConfig)
|
jsonEncoder := zapcore.NewJSONEncoder(encoderConfig)
|
||||||
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), zap.InfoLevel)
|
logLevel := zap.InfoLevel
|
||||||
|
if os.Getenv("DEBUG") == "true" {
|
||||||
|
logLevel = zap.DebugLevel
|
||||||
|
}
|
||||||
|
core := zapcore.NewCore(jsonEncoder, zapcore.AddSync(os.Stdout), logLevel)
|
||||||
baseLogger := zap.New(core, zap.AddCaller())
|
baseLogger := zap.New(core, zap.AddCaller())
|
||||||
logger := baseLogger.WithOptions(zap.AddCallerSkip(1))
|
logger := baseLogger.WithOptions(zap.AddCallerSkip(1))
|
||||||
customLogger := &loggerImpl{l: logger}
|
customLogger := &loggerImpl{l: logger}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package projects
|
package projects
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"encoding/json"
|
"encoding/json"
|
||||||
"fmt"
|
"fmt"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -40,10 +41,10 @@ func (c *cacheImpl) Set(project *Project) error {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err = c.db.Redis.Set(fmt.Sprintf("project:id:%d", project.ProjectID), projectBytes, time.Minute*10).Result(); err != nil {
|
if _, err = c.db.Redis.Set(context.Background(), fmt.Sprintf("project:id:%d", project.ProjectID), projectBytes, time.Minute*10).Result(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
|
if _, err = c.db.Redis.Set(context.Background(), fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
||||||
|
|
@ -56,7 +57,7 @@ func (c *cacheImpl) GetByID(projectID uint32) (*Project, error) {
|
||||||
return nil, ErrDisabledCache
|
return nil, ErrDisabledCache
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
result, err := c.db.Redis.Get(fmt.Sprintf("project:id:%d", projectID)).Result()
|
result, err := c.db.Redis.Get(context.Background(), fmt.Sprintf("project:id:%d", projectID)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
@ -74,7 +75,7 @@ func (c *cacheImpl) GetByKey(projectKey string) (*Project, error) {
|
||||||
return nil, ErrDisabledCache
|
return nil, ErrDisabledCache
|
||||||
}
|
}
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
result, err := c.db.Redis.Get(fmt.Sprintf("project:key:%s", projectKey)).Result()
|
result, err := c.db.Redis.Get(context.Background(), fmt.Sprintf("project:key:%s", projectKey)).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,6 +1,7 @@
|
||||||
package redisstream
|
package redisstream
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"context"
|
||||||
"log"
|
"log"
|
||||||
"net"
|
"net"
|
||||||
"sort"
|
"sort"
|
||||||
|
|
@ -8,8 +9,8 @@ import (
|
||||||
"strings"
|
"strings"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
_redis "github.com/go-redis/redis"
|
|
||||||
"github.com/pkg/errors"
|
"github.com/pkg/errors"
|
||||||
|
_redis "github.com/redis/go-redis/v9"
|
||||||
|
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
"openreplay/backend/pkg/queue/types"
|
"openreplay/backend/pkg/queue/types"
|
||||||
|
|
@ -38,7 +39,7 @@ func NewConsumer(group string, streams []string, messageIterator messages.Messag
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
for _, stream := range streams {
|
for _, stream := range streams {
|
||||||
err := redis.XGroupCreateMkStream(stream, group, "0").Err()
|
err := redis.XGroupCreateMkStream(context.Background(), stream, group, "0").Err()
|
||||||
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
if err != nil && err.Error() != "BUSYGROUP Consumer Group name already exists" {
|
||||||
log.Fatalln(err)
|
log.Fatalln(err)
|
||||||
}
|
}
|
||||||
|
|
@ -75,7 +76,7 @@ func (c *Consumer) Rebalanced() <-chan *types.PartitionsRebalancedEvent {
|
||||||
|
|
||||||
func (c *Consumer) ConsumeNext() error {
|
func (c *Consumer) ConsumeNext() error {
|
||||||
// MBTODO: read in go routine, send messages to channel
|
// MBTODO: read in go routine, send messages to channel
|
||||||
res, err := c.redis.XReadGroup(&_redis.XReadGroupArgs{
|
res, err := c.redis.XReadGroup(context.Background(), &_redis.XReadGroupArgs{
|
||||||
Group: c.group,
|
Group: c.group,
|
||||||
Consumer: c.group,
|
Consumer: c.group,
|
||||||
Streams: c.streams,
|
Streams: c.streams,
|
||||||
|
|
@ -115,7 +116,7 @@ func (c *Consumer) ConsumeNext() error {
|
||||||
bID := ts<<13 | (idx & 0x1FFF) // Max: 4096 messages/ms for 69 years
|
bID := ts<<13 | (idx & 0x1FFF) // Max: 4096 messages/ms for 69 years
|
||||||
c.messageIterator.Iterate([]byte(valueString), messages.NewBatchInfo(sessionID, r.Stream, bID, 0, int64(ts)))
|
c.messageIterator.Iterate([]byte(valueString), messages.NewBatchInfo(sessionID, r.Stream, bID, 0, int64(ts)))
|
||||||
if c.autoCommit {
|
if c.autoCommit {
|
||||||
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
|
if err = c.redis.XAck(context.Background(), r.Stream, c.group, m.ID).Err(); err != nil {
|
||||||
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
|
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
|
|
@ -134,7 +135,7 @@ func (c *Consumer) Commit() error {
|
||||||
if len(idsInfo.id) == 0 {
|
if len(idsInfo.id) == 0 {
|
||||||
continue
|
continue
|
||||||
}
|
}
|
||||||
if err := c.redis.XAck(stream, c.group, idsInfo.id...).Err(); err != nil {
|
if err := c.redis.XAck(context.Background(), stream, c.group, idsInfo.id...).Err(); err != nil {
|
||||||
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
||||||
}
|
}
|
||||||
c.idsPending[stream].id = nil
|
c.idsPending[stream].id = nil
|
||||||
|
|
@ -156,7 +157,7 @@ func (c *Consumer) CommitBack(gap int64) error {
|
||||||
maxI := sort.Search(len(idsInfo.ts), func(i int) bool {
|
maxI := sort.Search(len(idsInfo.ts), func(i int) bool {
|
||||||
return idsInfo.ts[i] > maxTs
|
return idsInfo.ts[i] > maxTs
|
||||||
})
|
})
|
||||||
if err := c.redis.XAck(stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil {
|
if err := c.redis.XAck(context.Background(), stream, c.group, idsInfo.id[:maxI]...).Err(); err != nil {
|
||||||
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
return errors.Wrapf(err, "Redisstreams: Acknoledgment error on commit %v", err)
|
||||||
}
|
}
|
||||||
c.idsPending[stream].id = idsInfo.id[maxI:]
|
c.idsPending[stream].id = idsInfo.id[maxI:]
|
||||||
|
|
|
||||||
|
|
@ -1,9 +1,11 @@
|
||||||
package redisstream
|
package redisstream
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/go-redis/redis"
|
"context"
|
||||||
"log"
|
"log"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
|
||||||
"openreplay/backend/pkg/env"
|
"openreplay/backend/pkg/env"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -30,10 +32,11 @@ func (p *Producer) Produce(topic string, key uint64, value []byte) error {
|
||||||
"sessionID": key,
|
"sessionID": key,
|
||||||
"value": value,
|
"value": value,
|
||||||
},
|
},
|
||||||
|
MaxLen: p.maxLenApprox,
|
||||||
}
|
}
|
||||||
args.MaxLenApprox = p.maxLenApprox
|
args.MaxLen = p.maxLenApprox
|
||||||
|
|
||||||
_, err := p.redis.XAdd(args).Result()
|
_, err := p.redis.XAdd(context.Background(), args).Result()
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,8 @@ package redisstream
|
||||||
import (
|
import (
|
||||||
"regexp"
|
"regexp"
|
||||||
|
|
||||||
"github.com/go-redis/redis"
|
"github.com/docker/distribution/context"
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
|
||||||
"openreplay/backend/pkg/env"
|
"openreplay/backend/pkg/env"
|
||||||
)
|
)
|
||||||
|
|
@ -28,7 +29,7 @@ func getRedisClient() (*redis.Client, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
redisClient = redis.NewClient(options)
|
redisClient = redis.NewClient(options)
|
||||||
if _, err := redisClient.Ping().Result(); err != nil {
|
if _, err := redisClient.Ping(context.Background()).Result(); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return redisClient, nil
|
return redisClient, nil
|
||||||
|
|
|
||||||
|
|
@ -13,7 +13,7 @@ func (e *routerImpl) health(w http.ResponseWriter, r *http.Request) {
|
||||||
|
|
||||||
func (e *routerImpl) healthMiddleware(next http.Handler) http.Handler {
|
func (e *routerImpl) healthMiddleware(next http.Handler) http.Handler {
|
||||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||||
if r.URL.Path == "/" {
|
if r.URL.Path == "/" || r.URL.Path == "/health" {
|
||||||
w.WriteHeader(http.StatusOK)
|
w.WriteHeader(http.StatusOK)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -135,11 +135,6 @@ func (e *handlersImpl) startSessionHandlerWeb(w http.ResponseWriter, r *http.Req
|
||||||
|
|
||||||
// Add tracker version to context
|
// Add tracker version to context
|
||||||
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
|
r = r.WithContext(context.WithValue(r.Context(), "tracker", req.TrackerVersion))
|
||||||
if err := validateTrackerVersion(req.TrackerVersion); err != nil {
|
|
||||||
e.log.Error(r.Context(), "unsupported tracker version: %s, err: %s", req.TrackerVersion, err)
|
|
||||||
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUpgradeRequired, errors.New("please upgrade the tracker version"), startTime, r.URL.Path, bodySize)
|
|
||||||
return
|
|
||||||
}
|
|
||||||
|
|
||||||
// Handler's logic
|
// Handler's logic
|
||||||
if req.ProjectKey == nil {
|
if req.ProjectKey == nil {
|
||||||
|
|
@ -162,6 +157,13 @@ func (e *handlersImpl) startSessionHandlerWeb(w http.ResponseWriter, r *http.Req
|
||||||
// Add projectID to context
|
// Add projectID to context
|
||||||
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
|
r = r.WithContext(context.WithValue(r.Context(), "projectID", fmt.Sprintf("%d", p.ProjectID)))
|
||||||
|
|
||||||
|
// Validate tracker version
|
||||||
|
if err := validateTrackerVersion(req.TrackerVersion); err != nil {
|
||||||
|
e.log.Error(r.Context(), "unsupported tracker version: %s, err: %s", req.TrackerVersion, err)
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusUpgradeRequired, errors.New("please upgrade the tracker version"), startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
// Check if the project supports mobile sessions
|
// Check if the project supports mobile sessions
|
||||||
if !p.IsWeb() {
|
if !p.IsWeb() {
|
||||||
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, errors.New("project doesn't support web sessions"), startTime, r.URL.Path, bodySize)
|
||||||
|
|
|
||||||
|
|
@ -29,7 +29,7 @@ type Task struct {
|
||||||
Duration int
|
Duration int
|
||||||
Status string
|
Status string
|
||||||
Path string
|
Path string
|
||||||
tx pool.Tx
|
tx *pool.Tx
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *Task) HasToTrim() bool {
|
func (t *Task) HasToTrim() bool {
|
||||||
|
|
@ -65,7 +65,7 @@ func (t *tasksImpl) Get() (task *Task, err error) {
|
||||||
}
|
}
|
||||||
}()
|
}()
|
||||||
|
|
||||||
task = &Task{tx: pool.Tx{Tx: tx}}
|
task = &Task{tx: tx}
|
||||||
sql := `SELECT spot_id, crop, duration FROM spots.tasks WHERE status = 'pending' ORDER BY added_time FOR UPDATE SKIP LOCKED LIMIT 1`
|
sql := `SELECT spot_id, crop, duration FROM spots.tasks WHERE status = 'pending' ORDER BY added_time FOR UPDATE SKIP LOCKED LIMIT 1`
|
||||||
err = tx.TxQueryRow(sql).Scan(&task.SpotID, &task.Crop, &task.Duration)
|
err = tx.TxQueryRow(sql).Scan(&task.SpotID, &task.Crop, &task.Duration)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -52,6 +52,7 @@ func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage
|
||||||
tasks: NewTasks(conn),
|
tasks: NewTasks(conn),
|
||||||
streams: NewStreams(log, conn, objStorage),
|
streams: NewStreams(log, conn, objStorage),
|
||||||
spots: spots,
|
spots: spots,
|
||||||
|
metrics: metrics,
|
||||||
}
|
}
|
||||||
tnsc.prepareWorkers = workers.NewPool(2, 4, tnsc.prepare)
|
tnsc.prepareWorkers = workers.NewPool(2, 4, tnsc.prepare)
|
||||||
tnsc.transcodeWorkers = workers.NewPool(2, 4, tnsc.transcode)
|
tnsc.transcodeWorkers = workers.NewPool(2, 4, tnsc.transcode)
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,6 @@ ignore:
|
||||||
- "**/*/build/**"
|
- "**/*/build/**"
|
||||||
- "**/*/.test.*"
|
- "**/*/.test.*"
|
||||||
- "**/*/version.ts"
|
- "**/*/version.ts"
|
||||||
review:
|
comment:
|
||||||
poem: false
|
layout: "condensed_header, condensed_files, condensed_footer"
|
||||||
review_status: false
|
hide_project_coverage: TRUE
|
||||||
collapse_walkthrough: true
|
|
||||||
|
|
|
||||||
6
ee/api/.gitignore
vendored
6
ee/api/.gitignore
vendored
|
|
@ -223,10 +223,14 @@ Pipfile.lock
|
||||||
/chalicelib/core/sessions/performance_event.py
|
/chalicelib/core/sessions/performance_event.py
|
||||||
/chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
|
/chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
|
||||||
/chalicelib/core/sessions/unprocessed_sessions.py
|
/chalicelib/core/sessions/unprocessed_sessions.py
|
||||||
|
/chalicelib/core/sessions/__init__.py
|
||||||
|
/chalicelib/core/sessions/sessions_legacy_mobil.py
|
||||||
|
/chalicelib/core/sessions/sessions_search_exp.py
|
||||||
/chalicelib/core/metrics/modules
|
/chalicelib/core/metrics/modules
|
||||||
/chalicelib/core/socket_ios.py
|
/chalicelib/core/socket_ios.py
|
||||||
/chalicelib/core/sourcemaps
|
/chalicelib/core/sourcemaps
|
||||||
/chalicelib/core/tags.py
|
/chalicelib/core/tags.py
|
||||||
|
/chalicelib/core/product_analytics
|
||||||
/chalicelib/saml
|
/chalicelib/saml
|
||||||
/chalicelib/utils/__init__.py
|
/chalicelib/utils/__init__.py
|
||||||
/chalicelib/utils/args_transformer.py
|
/chalicelib/utils/args_transformer.py
|
||||||
|
|
@ -289,3 +293,5 @@ Pipfile.lock
|
||||||
/chalicelib/core/errors/errors_ch.py
|
/chalicelib/core/errors/errors_ch.py
|
||||||
/chalicelib/core/errors/errors_details.py
|
/chalicelib/core/errors/errors_details.py
|
||||||
/chalicelib/utils/contextual_validators.py
|
/chalicelib/utils/contextual_validators.py
|
||||||
|
/routers/subs/product_analytics.py
|
||||||
|
/schemas/product_analytics.py
|
||||||
|
|
|
||||||
|
|
@ -6,25 +6,23 @@ name = "pypi"
|
||||||
[packages]
|
[packages]
|
||||||
urllib3 = "==2.3.0"
|
urllib3 = "==2.3.0"
|
||||||
requests = "==2.32.3"
|
requests = "==2.32.3"
|
||||||
boto3 = "==1.36.12"
|
boto3 = "==1.37.21"
|
||||||
pyjwt = "==2.10.1"
|
pyjwt = "==2.10.1"
|
||||||
psycopg2-binary = "==2.9.10"
|
psycopg2-binary = "==2.9.10"
|
||||||
psycopg = {extras = ["pool", "binary"], version = "==3.2.4"}
|
psycopg = {extras = ["pool", "binary"], version = "==3.2.6"}
|
||||||
clickhouse-driver = {extras = ["lz4"], version = "==0.2.9"}
|
|
||||||
clickhouse-connect = "==0.8.15"
|
clickhouse-connect = "==0.8.15"
|
||||||
elasticsearch = "==8.17.1"
|
elasticsearch = "==8.17.2"
|
||||||
jira = "==3.8.0"
|
jira = "==3.8.0"
|
||||||
cachetools = "==5.5.1"
|
cachetools = "==5.5.2"
|
||||||
fastapi = "==0.115.8"
|
fastapi = "==0.115.12"
|
||||||
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
uvicorn = {extras = ["standard"], version = "==0.34.0"}
|
||||||
gunicorn = "==23.0.0"
|
gunicorn = "==23.0.0"
|
||||||
python-decouple = "==3.8"
|
python-decouple = "==3.8"
|
||||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||||
apscheduler = "==3.11.0"
|
apscheduler = "==3.11.0"
|
||||||
python3-saml = "==1.16.0"
|
|
||||||
python-multipart = "==0.0.20"
|
python-multipart = "==0.0.20"
|
||||||
redis = "==5.2.1"
|
redis = "==5.2.1"
|
||||||
azure-storage-blob = "==12.24.1"
|
azure-storage-blob = "==12.25.0"
|
||||||
|
|
||||||
[dev-packages]
|
[dev-packages]
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,7 @@ from chalicelib.utils import pg_client, ch_client
|
||||||
from crons import core_crons, ee_crons, core_dynamic_crons
|
from crons import core_crons, ee_crons, core_dynamic_crons
|
||||||
from routers import core, core_dynamic
|
from routers import core, core_dynamic
|
||||||
from routers import ee
|
from routers import ee
|
||||||
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_anaytics
|
from routers.subs import insights, metrics, v1_api, health, usability_tests, spot, product_analytics
|
||||||
from routers.subs import v1_api_ee
|
from routers.subs import v1_api_ee
|
||||||
|
|
||||||
if config("ENABLE_SSO", cast=bool, default=True):
|
if config("ENABLE_SSO", cast=bool, default=True):
|
||||||
|
|
@ -150,9 +150,9 @@ app.include_router(spot.public_app)
|
||||||
app.include_router(spot.app)
|
app.include_router(spot.app)
|
||||||
app.include_router(spot.app_apikey)
|
app.include_router(spot.app_apikey)
|
||||||
|
|
||||||
app.include_router(product_anaytics.public_app)
|
app.include_router(product_analytics.public_app, prefix="/ap")
|
||||||
app.include_router(product_anaytics.app)
|
app.include_router(product_analytics.app, prefix="/ap")
|
||||||
app.include_router(product_anaytics.app_apikey)
|
app.include_router(product_analytics.app_apikey, prefix="/ap")
|
||||||
|
|
||||||
if config("ENABLE_SSO", cast=bool, default=True):
|
if config("ENABLE_SSO", cast=bool, default=True):
|
||||||
app.include_router(saml.public_app)
|
app.include_router(saml.public_app)
|
||||||
|
|
|
||||||
|
|
@ -86,8 +86,7 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""SELECT DISTINCT ON(value, type) value, type
|
return f"""(SELECT DISTINCT value, type
|
||||||
FROM ((SELECT DISTINCT value, type
|
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -103,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5)) AS raw;"""
|
LIMIT 5);"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -258,7 +257,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
WHERE project_id = %(project_id)s
|
WHERE project_id = %(project_id)s
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with ch_client.ClickHouseClient() as cur:
|
with ch_client.ClickHouseClient() as cur:
|
||||||
query = cur.format(query=f"""SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
query = cur.format(query=f"""SELECT key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", parameters={"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", parameters={"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)})
|
"svalue": helper.string_to_sql_like("^" + value)})
|
||||||
|
|
|
||||||
|
|
@ -71,7 +71,7 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
|
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
|
||||||
|
|
||||||
ch_basic_query = errors_helper.__get_basic_constraints_ch(time_constraint=False)
|
ch_basic_query = errors_helper.__get_basic_constraints_ch(time_constraint=False)
|
||||||
ch_basic_query.append("error_id = %(error_id)s")
|
ch_basic_query.append("toString(`$properties`.error_id) = %(error_id)s")
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as ch:
|
with ch_client.ClickHouseClient() as ch:
|
||||||
data["startDate24"] = TimeUTC.now(-1)
|
data["startDate24"] = TimeUTC.now(-1)
|
||||||
|
|
@ -95,7 +95,7 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
"error_id": error_id}
|
"error_id": error_id}
|
||||||
|
|
||||||
main_ch_query = f"""\
|
main_ch_query = f"""\
|
||||||
WITH pre_processed AS (SELECT error_id,
|
WITH pre_processed AS (SELECT toString(`$properties`.error_id) AS error_id,
|
||||||
toString(`$properties`.name) AS name,
|
toString(`$properties`.name) AS name,
|
||||||
toString(`$properties`.message) AS message,
|
toString(`$properties`.message) AS message,
|
||||||
session_id,
|
session_id,
|
||||||
|
|
@ -183,7 +183,7 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
AND `$event_name` = 'ERROR'
|
AND `$event_name` = 'ERROR'
|
||||||
AND events.created_at >= toDateTime(timestamp / 1000)
|
AND events.created_at >= toDateTime(timestamp / 1000)
|
||||||
AND events.created_at < toDateTime((timestamp + %(step_size24)s) / 1000)
|
AND events.created_at < toDateTime((timestamp + %(step_size24)s) / 1000)
|
||||||
AND error_id = %(error_id)s
|
AND toString(`$properties`.error_id) = %(error_id)s
|
||||||
GROUP BY timestamp
|
GROUP BY timestamp
|
||||||
ORDER BY timestamp) AS chart_details
|
ORDER BY timestamp) AS chart_details
|
||||||
) AS chart_details24 ON TRUE
|
) AS chart_details24 ON TRUE
|
||||||
|
|
@ -196,7 +196,7 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
AND `$event_name` = 'ERROR'
|
AND `$event_name` = 'ERROR'
|
||||||
AND events.created_at >= toDateTime(timestamp / 1000)
|
AND events.created_at >= toDateTime(timestamp / 1000)
|
||||||
AND events.created_at < toDateTime((timestamp + %(step_size30)s) / 1000)
|
AND events.created_at < toDateTime((timestamp + %(step_size30)s) / 1000)
|
||||||
AND error_id = %(error_id)s
|
AND toString(`$properties`.error_id) = %(error_id)s
|
||||||
GROUP BY timestamp
|
GROUP BY timestamp
|
||||||
ORDER BY timestamp) AS chart_details
|
ORDER BY timestamp) AS chart_details
|
||||||
) AS chart_details30 ON TRUE;"""
|
) AS chart_details30 ON TRUE;"""
|
||||||
|
|
|
||||||
|
|
@ -1,17 +0,0 @@
|
||||||
import logging
|
|
||||||
|
|
||||||
from decouple import config
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
from . import sessions_pg
|
|
||||||
from . import sessions_pg as sessions_legacy
|
|
||||||
from . import sessions_ch
|
|
||||||
from . import sessions_search as sessions_search_legacy
|
|
||||||
|
|
||||||
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
|
||||||
logger.info(">>> Using experimental sessions search")
|
|
||||||
from . import sessions_ch as sessions
|
|
||||||
from . import sessions_search_exp as sessions_search
|
|
||||||
else:
|
|
||||||
from . import sessions_pg as sessions
|
|
||||||
from . import sessions_search as sessions_search
|
|
||||||
|
|
@ -44,11 +44,15 @@ rm -rf ./chalicelib/core/sessions/sessions_search.py
|
||||||
rm -rf ./chalicelib/core/sessions/performance_event.py
|
rm -rf ./chalicelib/core/sessions/performance_event.py
|
||||||
rm -rf ./chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
|
rm -rf ./chalicelib/core/sessions/sessions_viewed/sessions_viewed.py
|
||||||
rm -rf ./chalicelib/core/sessions/unprocessed_sessions.py
|
rm -rf ./chalicelib/core/sessions/unprocessed_sessions.py
|
||||||
|
rm -rf ./chalicelib/core/sessions/__init__.py
|
||||||
|
rm -rf ./chalicelib/core/sessions/sessions_legacy_mobil.py
|
||||||
|
rm -rf ./chalicelib/core/sessions/sessions_search_exp.py
|
||||||
rm -rf ./chalicelib/core/metrics/modules
|
rm -rf ./chalicelib/core/metrics/modules
|
||||||
rm -rf ./chalicelib/core/socket_ios.py
|
rm -rf ./chalicelib/core/socket_ios.py
|
||||||
rm -rf ./chalicelib/core/sourcemaps
|
rm -rf ./chalicelib/core/sourcemaps
|
||||||
rm -rf ./chalicelib/core/user_testing.py
|
rm -rf ./chalicelib/core/user_testing.py
|
||||||
rm -rf ./chalicelib/core/tags.py
|
rm -rf ./chalicelib/core/tags.py
|
||||||
|
rm -rf ./chalicelib/core/product_analytics
|
||||||
rm -rf ./chalicelib/saml
|
rm -rf ./chalicelib/saml
|
||||||
rm -rf ./chalicelib/utils/__init__.py
|
rm -rf ./chalicelib/utils/__init__.py
|
||||||
rm -rf ./chalicelib/utils/args_transformer.py
|
rm -rf ./chalicelib/utils/args_transformer.py
|
||||||
|
|
@ -109,3 +113,5 @@ rm -rf ./chalicelib/core/errors/errors_pg.py
|
||||||
rm -rf ./chalicelib/core/errors/errors_ch.py
|
rm -rf ./chalicelib/core/errors/errors_ch.py
|
||||||
rm -rf ./chalicelib/core/errors/errors_details.py
|
rm -rf ./chalicelib/core/errors/errors_details.py
|
||||||
rm -rf ./chalicelib/utils/contextual_validators.py
|
rm -rf ./chalicelib/utils/contextual_validators.py
|
||||||
|
rm -rf ./routers/subs/product_analytics.py
|
||||||
|
rm -rf ./schemas/product_analytics.py
|
||||||
|
|
@ -1,19 +1,18 @@
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
boto3==1.36.12
|
boto3==1.37.21
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
psycopg[pool,binary]==3.2.4
|
psycopg[pool,binary]==3.2.6
|
||||||
clickhouse-driver[lz4]==0.2.9
|
|
||||||
clickhouse-connect==0.8.15
|
clickhouse-connect==0.8.15
|
||||||
elasticsearch==8.17.1
|
elasticsearch==8.17.2
|
||||||
jira==3.8.0
|
jira==3.8.0
|
||||||
cachetools==5.5.1
|
cachetools==5.5.2
|
||||||
|
|
||||||
fastapi==0.115.8
|
fastapi==0.115.12
|
||||||
uvicorn[standard]==0.34.0
|
uvicorn[standard]==0.34.0
|
||||||
python-decouple==3.8
|
python-decouple==3.8
|
||||||
pydantic[email]==2.10.6
|
pydantic[email]==2.10.6
|
||||||
apscheduler==3.11.0
|
apscheduler==3.11.0
|
||||||
|
|
||||||
azure-storage-blob==12.24.1
|
azure-storage-blob==12.25.0
|
||||||
|
|
|
||||||
|
|
@ -1,19 +1,18 @@
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
boto3==1.36.12
|
boto3==1.37.21
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
psycopg[pool,binary]==3.2.4
|
psycopg[pool,binary]==3.2.6
|
||||||
clickhouse-driver[lz4]==0.2.9
|
|
||||||
clickhouse-connect==0.8.15
|
clickhouse-connect==0.8.15
|
||||||
elasticsearch==8.17.1
|
elasticsearch==8.17.2
|
||||||
jira==3.8.0
|
jira==3.8.0
|
||||||
cachetools==5.5.1
|
cachetools==5.5.2
|
||||||
|
|
||||||
fastapi==0.115.8
|
fastapi==0.115.12
|
||||||
python-decouple==3.8
|
python-decouple==3.8
|
||||||
pydantic[email]==2.10.6
|
pydantic[email]==2.10.6
|
||||||
apscheduler==3.11.0
|
apscheduler==3.11.0
|
||||||
|
|
||||||
redis==5.2.1
|
redis==5.2.1
|
||||||
azure-storage-blob==12.24.1
|
azure-storage-blob==12.25.0
|
||||||
|
|
|
||||||
|
|
@ -1,16 +1,15 @@
|
||||||
urllib3==2.3.0
|
urllib3==2.3.0
|
||||||
requests==2.32.3
|
requests==2.32.3
|
||||||
boto3==1.36.12
|
boto3==1.37.21
|
||||||
pyjwt==2.10.1
|
pyjwt==2.10.1
|
||||||
psycopg2-binary==2.9.10
|
psycopg2-binary==2.9.10
|
||||||
psycopg[pool,binary]==3.2.4
|
psycopg[pool,binary]==3.2.6
|
||||||
clickhouse-driver[lz4]==0.2.9
|
|
||||||
clickhouse-connect==0.8.15
|
clickhouse-connect==0.8.15
|
||||||
elasticsearch==8.17.1
|
elasticsearch==8.17.2
|
||||||
jira==3.8.0
|
jira==3.8.0
|
||||||
cachetools==5.5.1
|
cachetools==5.5.2
|
||||||
|
|
||||||
fastapi==0.115.8
|
fastapi==0.115.12
|
||||||
uvicorn[standard]==0.34.0
|
uvicorn[standard]==0.34.0
|
||||||
gunicorn==23.0.0
|
gunicorn==23.0.0
|
||||||
python-decouple==3.8
|
python-decouple==3.8
|
||||||
|
|
@ -19,10 +18,9 @@ apscheduler==3.11.0
|
||||||
|
|
||||||
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
|
# TODO: enable after xmlsec fix https://github.com/xmlsec/python-xmlsec/issues/252
|
||||||
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
|
#--no-binary is used to avoid libxml2 library version incompatibilities between xmlsec and lxml
|
||||||
python3-saml==1.16.0
|
|
||||||
--no-binary=lxml
|
|
||||||
python-multipart==0.0.20
|
python-multipart==0.0.20
|
||||||
|
|
||||||
redis==5.2.1
|
redis==5.2.1
|
||||||
#confluent-kafka==2.1.0
|
#confluent-kafka==2.1.0
|
||||||
azure-storage-blob==12.24.1
|
azure-storage-blob==12.25.0
|
||||||
|
|
|
||||||
|
|
@ -1,4 +1,5 @@
|
||||||
from .schemas import *
|
from .schemas import *
|
||||||
from .schemas_ee import *
|
from .schemas_ee import *
|
||||||
from .assist_stats_schema import *
|
from .assist_stats_schema import *
|
||||||
|
from .product_analytics import *
|
||||||
from . import overrides as _overrides
|
from . import overrides as _overrides
|
||||||
|
|
|
||||||
|
|
@ -4,7 +4,7 @@ from pydantic import Field, EmailStr, field_validator, model_validator
|
||||||
|
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from . import schemas
|
from . import schemas
|
||||||
from .overrides import BaseModel, Enum, ORUnion
|
from .overrides import BaseModel, Enum
|
||||||
from .transformers_validators import remove_whitespace
|
from .transformers_validators import remove_whitespace
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -91,33 +91,6 @@ class TrailSearchPayloadSchema(schemas._PaginatedSchema):
|
||||||
return values
|
return values
|
||||||
|
|
||||||
|
|
||||||
class SessionModel(BaseModel):
|
|
||||||
duration: int
|
|
||||||
errorsCount: int
|
|
||||||
eventsCount: int
|
|
||||||
favorite: bool = Field(default=False)
|
|
||||||
issueScore: int
|
|
||||||
issueTypes: List[schemas.IssueType] = Field(default=[])
|
|
||||||
metadata: dict = Field(default={})
|
|
||||||
pagesCount: int
|
|
||||||
platform: str
|
|
||||||
projectId: int
|
|
||||||
sessionId: str
|
|
||||||
startTs: int
|
|
||||||
timezone: Optional[str]
|
|
||||||
userAnonymousId: Optional[str]
|
|
||||||
userBrowser: str
|
|
||||||
userCity: str
|
|
||||||
userCountry: str
|
|
||||||
userDevice: Optional[str]
|
|
||||||
userDeviceType: str
|
|
||||||
userId: Optional[str]
|
|
||||||
userOs: str
|
|
||||||
userState: str
|
|
||||||
userUuid: str
|
|
||||||
viewed: bool = Field(default=False)
|
|
||||||
|
|
||||||
|
|
||||||
class AssistRecordUpdatePayloadSchema(BaseModel):
|
class AssistRecordUpdatePayloadSchema(BaseModel):
|
||||||
name: str = Field(..., min_length=1)
|
name: str = Field(..., min_length=1)
|
||||||
_transform_name = field_validator('name', mode="before")(remove_whitespace)
|
_transform_name = field_validator('name', mode="before")(remove_whitespace)
|
||||||
|
|
|
||||||
16
ee/assist/.gitignore
vendored
16
ee/assist/.gitignore
vendored
|
|
@ -2,20 +2,4 @@
|
||||||
node_modules
|
node_modules
|
||||||
npm-debug.log
|
npm-debug.log
|
||||||
.cache
|
.cache
|
||||||
test.html
|
|
||||||
build.sh
|
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
servers/peerjs-server.js
|
|
||||||
servers/sourcemaps-handler.js
|
|
||||||
servers/sourcemaps-server.js
|
|
||||||
/utils/geoIP.js
|
|
||||||
/utils/health.js
|
|
||||||
/utils/HeapSnapshot.js
|
|
||||||
/utils/helper.js
|
|
||||||
/utils/assistHelper.js
|
|
||||||
/utils/httpHandlers.js
|
|
||||||
/utils/socketHandlers.js
|
|
||||||
.local
|
|
||||||
*.mmdb
|
*.mmdb
|
||||||
|
|
@ -1,11 +1,8 @@
|
||||||
#ARCH can be amd64 or arm64
|
|
||||||
ARG ARCH=amd64
|
ARG ARCH=amd64
|
||||||
|
|
||||||
FROM --platform=linux/$ARCH node:23-alpine
|
FROM --platform=linux/$ARCH node:23-alpine
|
||||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL Maintainer="Zavorotynskiy Alexander <zavorotynskiy@pm.me>"
|
||||||
RUN apk add --no-cache tini git libc6-compat
|
RUN apk add --no-cache tini git libc6-compat
|
||||||
# && ln -s /lib/libc.musl-x86_64.so.1 /lib/ld-linux-x86-64.so.2
|
|
||||||
|
|
||||||
ARG envarg
|
ARG envarg
|
||||||
ENV ENTERPRISE_BUILD=${envarg} \
|
ENV ENTERPRISE_BUILD=${envarg} \
|
||||||
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \
|
MAXMINDDB_FILE=/home/openreplay/geoip.mmdb \
|
||||||
|
|
|
||||||
164
ee/assist/app/assist.js
Normal file
164
ee/assist/app/assist.js
Normal file
|
|
@ -0,0 +1,164 @@
|
||||||
|
const jwt = require('jsonwebtoken');
|
||||||
|
const uaParser = require('ua-parser-js');
|
||||||
|
const {geoip} = require('./geoIP');
|
||||||
|
const {logger} = require('./logger');
|
||||||
|
|
||||||
|
let PROJECT_KEY_LENGTH = parseInt(process.env.PROJECT_KEY_LENGTH) || 20;
|
||||||
|
|
||||||
|
const IDENTITIES = {agent: 'agent', session: 'session'};
|
||||||
|
const EVENTS_DEFINITION = {
|
||||||
|
listen: {
|
||||||
|
UPDATE_EVENT: "UPDATE_SESSION", // tab become active/inactive, page title change, changed session object (rare case), call start/end
|
||||||
|
CONNECT_ERROR: "connect_error",
|
||||||
|
CONNECT_FAILED: "connect_failed",
|
||||||
|
ERROR: "error"
|
||||||
|
},
|
||||||
|
//The following list of events will be only emitted by the server
|
||||||
|
server: {
|
||||||
|
UPDATE_SESSION: "SERVER_UPDATE_SESSION"
|
||||||
|
}
|
||||||
|
};
|
||||||
|
EVENTS_DEFINITION.emit = {
|
||||||
|
NEW_AGENT: "NEW_AGENT",
|
||||||
|
NO_AGENTS: "NO_AGENT",
|
||||||
|
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
|
||||||
|
AGENTS_CONNECTED: "AGENTS_CONNECTED",
|
||||||
|
NO_SESSIONS: "SESSION_DISCONNECTED",
|
||||||
|
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
|
||||||
|
SESSION_RECONNECTED: "SESSION_RECONNECTED",
|
||||||
|
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
|
||||||
|
};
|
||||||
|
|
||||||
|
const BASE_sessionInfo = {
|
||||||
|
"pageTitle": "Page",
|
||||||
|
"active": false,
|
||||||
|
"sessionID": "0",
|
||||||
|
"metadata": {},
|
||||||
|
"userID": "",
|
||||||
|
"userUUID": "",
|
||||||
|
"projectKey": "",
|
||||||
|
"timestamp": 0,
|
||||||
|
"trackerVersion": "",
|
||||||
|
"userOs": "",
|
||||||
|
"userBrowser": "",
|
||||||
|
"userBrowserVersion": "",
|
||||||
|
"userDevice": "",
|
||||||
|
"userDeviceType": "",
|
||||||
|
"userCountry": "",
|
||||||
|
"userState": "",
|
||||||
|
"userCity": ""
|
||||||
|
};
|
||||||
|
|
||||||
|
const extractPeerId = (peerId) => {
|
||||||
|
const parts = peerId.split("-");
|
||||||
|
if (parts.length < 2 || parts.length > 3) {
|
||||||
|
logger.debug(`Invalid peerId format: ${peerId}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
if (PROJECT_KEY_LENGTH > 0 && parts[0].length !== PROJECT_KEY_LENGTH) {
|
||||||
|
logger.debug(`Invalid project key length in peerId: ${peerId}`);
|
||||||
|
return {};
|
||||||
|
}
|
||||||
|
const [projectKey, sessionId, tabId = generateRandomTabId()] = parts;
|
||||||
|
return { projectKey, sessionId, tabId };
|
||||||
|
};
|
||||||
|
|
||||||
|
const generateRandomTabId = () => (Math.random() + 1).toString(36).substring(2);
|
||||||
|
|
||||||
|
function processPeerInfo(socket) {
|
||||||
|
socket._connectedAt = new Date();
|
||||||
|
const { projectKey, sessionId, tabId } = extractPeerId(socket.handshake.query.peerId || "");
|
||||||
|
Object.assign(socket.handshake.query, {
|
||||||
|
roomId: projectKey && sessionId ? `${projectKey}-${sessionId}` : null,
|
||||||
|
projectKey,
|
||||||
|
sessId: sessionId,
|
||||||
|
tabId
|
||||||
|
});
|
||||||
|
logger.debug(`Connection details: projectKey:${projectKey}, sessionId:${sessionId}, tabId:${tabId}, roomId:${socket.handshake.query.roomId}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
/**
|
||||||
|
* extracts and populate socket with information
|
||||||
|
* @Param {socket} used socket
|
||||||
|
* */
|
||||||
|
const extractSessionInfo = function (socket) {
|
||||||
|
if (socket.handshake.query.sessionInfo !== undefined) {
|
||||||
|
logger.debug(`received headers: ${socket.handshake.headers}`);
|
||||||
|
|
||||||
|
socket.handshake.query.sessionInfo = JSON.parse(socket.handshake.query.sessionInfo);
|
||||||
|
socket.handshake.query.sessionInfo = {...BASE_sessionInfo, ...socket.handshake.query.sessionInfo};
|
||||||
|
|
||||||
|
let ua = uaParser(socket.handshake.headers['user-agent']);
|
||||||
|
socket.handshake.query.sessionInfo.userOs = ua.os.name || null;
|
||||||
|
socket.handshake.query.sessionInfo.userBrowser = ua.browser.name || null;
|
||||||
|
socket.handshake.query.sessionInfo.userBrowserVersion = ua.browser.version || null;
|
||||||
|
socket.handshake.query.sessionInfo.userDevice = ua.device.model || null;
|
||||||
|
socket.handshake.query.sessionInfo.userDeviceType = ua.device.type || 'desktop';
|
||||||
|
socket.handshake.query.sessionInfo.userCountry = null;
|
||||||
|
socket.handshake.query.sessionInfo.userState = null;
|
||||||
|
socket.handshake.query.sessionInfo.userCity = null;
|
||||||
|
if (geoip() !== null) {
|
||||||
|
logger.debug(`looking for location of ${socket.handshake.headers['x-forwarded-for'] || socket.handshake.address}`);
|
||||||
|
try {
|
||||||
|
let ip = socket.handshake.headers['x-forwarded-for'] || socket.handshake.address;
|
||||||
|
ip = ip.split(",")[0];
|
||||||
|
let info = geoip().city(ip);
|
||||||
|
socket.handshake.query.sessionInfo.userCountry = info.country.isoCode;
|
||||||
|
socket.handshake.query.sessionInfo.userCity = info.city.names.en;
|
||||||
|
socket.handshake.query.sessionInfo.userState = info.subdivisions.length > 0 ? info.subdivisions[0].names.en : null;
|
||||||
|
} catch (e) {
|
||||||
|
logger.debug(`geoip-country failed: ${e}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
function errorHandler(listenerName, error) {
|
||||||
|
logger.error(`Error detected from ${listenerName}\n${error}`);
|
||||||
|
}
|
||||||
|
|
||||||
|
const JWT_TOKEN_PREFIX = "Bearer ";
|
||||||
|
|
||||||
|
function check(socket, next) {
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
return next();
|
||||||
|
}
|
||||||
|
if (socket.handshake.query.peerId && socket.handshake.auth && socket.handshake.auth.token) {
|
||||||
|
let token = socket.handshake.auth.token;
|
||||||
|
if (token.startsWith(JWT_TOKEN_PREFIX)) {
|
||||||
|
token = token.substring(JWT_TOKEN_PREFIX.length);
|
||||||
|
}
|
||||||
|
jwt.verify(token, process.env.ASSIST_JWT_SECRET, (err, decoded) => {
|
||||||
|
logger.debug(`JWT payload: ${decoded}`);
|
||||||
|
if (err) {
|
||||||
|
logger.debug(err);
|
||||||
|
return next(new Error('Authentication error'));
|
||||||
|
}
|
||||||
|
const {projectKey, sessionId} = extractPeerId(socket.handshake.query.peerId);
|
||||||
|
if (!projectKey || !sessionId) {
|
||||||
|
logger.debug(`Missing attribute: projectKey:${projectKey}, sessionId:${sessionId}`);
|
||||||
|
return next(new Error('Authentication error'));
|
||||||
|
}
|
||||||
|
if (String(projectKey) !== String(decoded.projectKey) || String(sessionId) !== String(decoded.sessionId)) {
|
||||||
|
logger.debug(`Trying to access projectKey:${projectKey} instead of ${decoded.projectKey} or
|
||||||
|
to sessionId:${sessionId} instead of ${decoded.sessionId}`);
|
||||||
|
return next(new Error('Authorization error'));
|
||||||
|
}
|
||||||
|
socket.decoded = decoded;
|
||||||
|
return next();
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
logger.debug(`something missing in handshake: ${socket.handshake}`);
|
||||||
|
return next(new Error('Authentication error'));
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
processPeerInfo,
|
||||||
|
extractPeerId,
|
||||||
|
extractSessionInfo,
|
||||||
|
EVENTS_DEFINITION,
|
||||||
|
IDENTITIES,
|
||||||
|
errorHandler,
|
||||||
|
authorizer: {check}
|
||||||
|
};
|
||||||
194
ee/assist/app/cache.js
Normal file
194
ee/assist/app/cache.js
Normal file
|
|
@ -0,0 +1,194 @@
|
||||||
|
const {logger} = require('./logger');
|
||||||
|
const Redis = require("ioredis");
|
||||||
|
const crypto = require("crypto");
|
||||||
|
const { Mutex } = require("async-mutex");
|
||||||
|
|
||||||
|
const REDIS_URL = process.env.REDIS_URL || "localhost:6379";
|
||||||
|
const redisClient = new Redis(REDIS_URL);
|
||||||
|
redisClient.on("error", (error) => {
|
||||||
|
logger.error(`Redis cache error : ${error}`);
|
||||||
|
});
|
||||||
|
|
||||||
|
function generateNodeID() {
|
||||||
|
const buffer = crypto.randomBytes(8);
|
||||||
|
return "node_"+buffer.readBigUInt64BE(0).toString();
|
||||||
|
}
|
||||||
|
|
||||||
|
const batchSize = parseInt(process.env.REDIS_BATCH_SIZE) || 1000;
|
||||||
|
const PING_INTERVAL = parseInt(process.env.PING_INTERVAL_SECONDS) || 25;
|
||||||
|
const CACHE_REFRESH_INTERVAL = parseInt(process.env.CACHE_REFRESH_INTERVAL_SECONDS) || 5;
|
||||||
|
const pingInterval = Math.floor(PING_INTERVAL + PING_INTERVAL/2);
|
||||||
|
const cacheRefreshInterval = Math.floor(CACHE_REFRESH_INTERVAL * 4);
|
||||||
|
const cacheRefreshIntervalMs = CACHE_REFRESH_INTERVAL * 1000;
|
||||||
|
let lastCacheUpdateTime = 0;
|
||||||
|
let cacheRefresher = null;
|
||||||
|
const nodeID = process.env.HOSTNAME || generateNodeID();
|
||||||
|
|
||||||
|
const mutex = new Mutex();
|
||||||
|
const localCache = {
|
||||||
|
addedSessions: new Set(),
|
||||||
|
updatedSessions: new Set(),
|
||||||
|
refreshedSessions: new Set(),
|
||||||
|
deletedSessions: new Set()
|
||||||
|
};
|
||||||
|
|
||||||
|
const sendAssistEvent = async function (payload) {
|
||||||
|
try {
|
||||||
|
if (typeof payload !== "string") {
|
||||||
|
logger.warn("sendAssistEvent received non-string payload. Converting to string.");
|
||||||
|
payload = JSON.stringify(payload);
|
||||||
|
}
|
||||||
|
await redisClient.rpush("assist:stats", payload);
|
||||||
|
logger.debug("Assist event sent to Redis: " + payload);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`Failed to send assist event to Redis: ${error}`);
|
||||||
|
}
|
||||||
|
};
|
||||||
|
|
||||||
|
const addSession = async function (sessionID) {
|
||||||
|
await mutex.runExclusive(() => {
|
||||||
|
localCache.addedSessions.add(sessionID);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateSession = async function (sessionID) {
|
||||||
|
await mutex.runExclusive(() => {
|
||||||
|
localCache.addedSessions.add(sessionID); // to update the session's cache
|
||||||
|
localCache.updatedSessions.add(sessionID); // to add sessionID to the list of recently updated sessions
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const renewSession = async function (sessionID) {
|
||||||
|
await mutex.runExclusive(() => {
|
||||||
|
localCache.refreshedSessions.add(sessionID);
|
||||||
|
})
|
||||||
|
}
|
||||||
|
|
||||||
|
const removeSession = async function (sessionID) {
|
||||||
|
await mutex.runExclusive(() => {
|
||||||
|
localCache.deletedSessions.add(sessionID);
|
||||||
|
});
|
||||||
|
}
|
||||||
|
|
||||||
|
const updateNodeCache = async function (io) {
|
||||||
|
logger.debug('Background refresh triggered');
|
||||||
|
try {
|
||||||
|
const startTime = performance.now();
|
||||||
|
let currStepTs = performance.now();
|
||||||
|
const sessionIDs = new Set();
|
||||||
|
const result = await io.fetchSockets();
|
||||||
|
let toAdd = new Map();
|
||||||
|
let toUpdate = [];
|
||||||
|
let toRenew = [];
|
||||||
|
let toDelete = [];
|
||||||
|
await mutex.runExclusive(() => {
|
||||||
|
result.forEach((socket) => {
|
||||||
|
if (socket.handshake.query.sessId) {
|
||||||
|
const sessID = socket.handshake.query.sessId;
|
||||||
|
if (sessionIDs.has(sessID)) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
sessionIDs.add(sessID);
|
||||||
|
if (localCache.addedSessions.has(sessID)) {
|
||||||
|
toAdd.set(sessID, socket.handshake.query.sessionInfo);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
});
|
||||||
|
toUpdate = [...localCache.updatedSessions];
|
||||||
|
toRenew = [...localCache.refreshedSessions];
|
||||||
|
toDelete = [...localCache.deletedSessions];
|
||||||
|
// Clear the local cache
|
||||||
|
localCache.addedSessions.clear();
|
||||||
|
localCache.updatedSessions.clear();
|
||||||
|
localCache.refreshedSessions.clear();
|
||||||
|
localCache.deletedSessions.clear();
|
||||||
|
})
|
||||||
|
|
||||||
|
// insert new sessions in pipeline
|
||||||
|
const toAddArray = Array.from(toAdd.keys());
|
||||||
|
for (let i = 0; i < toAddArray.length; i += batchSize) {
|
||||||
|
const batch = toAddArray.slice(i, i + batchSize);
|
||||||
|
const pipeline = redisClient.pipeline();
|
||||||
|
for (const sessionID of batch) {
|
||||||
|
pipeline.set(`assist:online_sessions:${sessionID}`, JSON.stringify(toAdd.get(sessionID)), 'EX', pingInterval);
|
||||||
|
}
|
||||||
|
await pipeline.exec();
|
||||||
|
}
|
||||||
|
logger.info(`step 1 (toAdd) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toAddArray.length} sockets`);
|
||||||
|
currStepTs = performance.now();
|
||||||
|
|
||||||
|
// renew sessions in pipeline
|
||||||
|
for (let i = 0; i < toRenew.length; i += batchSize) {
|
||||||
|
const batch = toRenew.slice(i, i + batchSize);
|
||||||
|
const pipeline = redisClient.pipeline();
|
||||||
|
for (const sessionID of batch) {
|
||||||
|
pipeline.expire(`assist:online_sessions:${sessionID}`, pingInterval);
|
||||||
|
}
|
||||||
|
await pipeline.exec();
|
||||||
|
}
|
||||||
|
logger.info(`step 2 (toRenew) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toRenew.length} sockets`);
|
||||||
|
currStepTs = performance.now();
|
||||||
|
|
||||||
|
// delete sessions in pipeline
|
||||||
|
for (let i = 0; i < toDelete.length; i += batchSize) {
|
||||||
|
const batch = toDelete.slice(i, i + batchSize);
|
||||||
|
const pipeline = redisClient.pipeline();
|
||||||
|
for (const sessionID of batch) {
|
||||||
|
pipeline.del(`assist:online_sessions:${sessionID}`);
|
||||||
|
}
|
||||||
|
await pipeline.exec();
|
||||||
|
}
|
||||||
|
logger.info(`step 3 (toDelete) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toDelete.length} sockets`);
|
||||||
|
currStepTs = performance.now();
|
||||||
|
|
||||||
|
// add recently updated sessions
|
||||||
|
if (toUpdate.length > 0) {
|
||||||
|
await redisClient.sadd('assist:updated_sessions', toUpdate);
|
||||||
|
}
|
||||||
|
// store the node sessions
|
||||||
|
await redisClient.set(`assist:nodes:${nodeID}:sessions`, JSON.stringify(Array.from(sessionIDs)), 'EX', cacheRefreshInterval);
|
||||||
|
logger.info(`step 4 (full list + updated) complete: ${(performance.now() - currStepTs).toFixed(2)}ms, ${toUpdate.length} sockets`);
|
||||||
|
|
||||||
|
const duration = performance.now() - startTime;
|
||||||
|
logger.info(`Background refresh complete: ${duration.toFixed(2)}ms, ${result.length} sockets`);
|
||||||
|
} catch (error) {
|
||||||
|
logger.error(`Background refresh error: ${error}`);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
let isFlushing = false;
|
||||||
|
|
||||||
|
function startCacheRefresher(io) {
|
||||||
|
if (cacheRefresher) clearInterval(cacheRefresher);
|
||||||
|
|
||||||
|
cacheRefresher = setInterval(async () => {
|
||||||
|
if (isFlushing) {
|
||||||
|
logger.warn("Skipping tick: flush in progress");
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
const now = Date.now();
|
||||||
|
if (now - lastCacheUpdateTime < cacheRefreshIntervalMs) {
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
|
||||||
|
isFlushing = true;
|
||||||
|
try {
|
||||||
|
await updateNodeCache(io);
|
||||||
|
lastCacheUpdateTime = Date.now();
|
||||||
|
} catch (err) {
|
||||||
|
logger.error(`Tick error: ${err}`);
|
||||||
|
} finally {
|
||||||
|
isFlushing = false;
|
||||||
|
}
|
||||||
|
}, cacheRefreshIntervalMs / 2);
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
sendAssistEvent,
|
||||||
|
addSession,
|
||||||
|
updateSession,
|
||||||
|
renewSession,
|
||||||
|
removeSession,
|
||||||
|
startCacheRefresher,
|
||||||
|
}
|
||||||
21
ee/assist/app/geoIP.js
Normal file
21
ee/assist/app/geoIP.js
Normal file
|
|
@ -0,0 +1,21 @@
|
||||||
|
const geoip2Reader = require('@maxmind/geoip2-node').Reader;
|
||||||
|
const {logger} = require('./logger');
|
||||||
|
|
||||||
|
let geoip = null;
|
||||||
|
if (process.env.MAXMINDDB_FILE !== undefined) {
|
||||||
|
geoip2Reader.open(process.env.MAXMINDDB_FILE, {})
|
||||||
|
.then(reader => {
|
||||||
|
geoip = reader;
|
||||||
|
})
|
||||||
|
.catch(error => {
|
||||||
|
logger.error(`Error while opening the MAXMINDDB_FILE, err: ${error}`);
|
||||||
|
});
|
||||||
|
} else {
|
||||||
|
logger.error("!!! please provide a valid value for MAXMINDDB_FILE env var.");
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
geoip: () => {
|
||||||
|
return geoip;
|
||||||
|
}
|
||||||
|
}
|
||||||
23
ee/assist/app/logger.js
Normal file
23
ee/assist/app/logger.js
Normal file
|
|
@ -0,0 +1,23 @@
|
||||||
|
const winston = require('winston');
|
||||||
|
|
||||||
|
const isDebugMode = process.env.debug === "1";
|
||||||
|
const logLevel = isDebugMode ? 'debug' : 'info';
|
||||||
|
|
||||||
|
const logger = winston.createLogger({
|
||||||
|
level: logLevel,
|
||||||
|
format: winston.format.combine(
|
||||||
|
winston.format.timestamp({
|
||||||
|
format: 'YYYY-MM-DD HH:mm:ss.SSS' // The same format as in backend services
|
||||||
|
}),
|
||||||
|
winston.format.errors({stack: true}),
|
||||||
|
winston.format.json()
|
||||||
|
),
|
||||||
|
defaultMeta: {service: process.env.SERVICE_NAME || 'assist'},
|
||||||
|
transports: [
|
||||||
|
new winston.transports.Console(),
|
||||||
|
],
|
||||||
|
});
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
logger,
|
||||||
|
}
|
||||||
266
ee/assist/app/socket.js
Normal file
266
ee/assist/app/socket.js
Normal file
|
|
@ -0,0 +1,266 @@
|
||||||
|
const {
|
||||||
|
processPeerInfo,
|
||||||
|
IDENTITIES,
|
||||||
|
EVENTS_DEFINITION,
|
||||||
|
extractSessionInfo,
|
||||||
|
errorHandler
|
||||||
|
} = require("./assist");
|
||||||
|
const {
|
||||||
|
addSession,
|
||||||
|
updateSession,
|
||||||
|
renewSession,
|
||||||
|
removeSession
|
||||||
|
} = require('./cache');
|
||||||
|
const {
|
||||||
|
logger
|
||||||
|
} = require('./logger');
|
||||||
|
const {
|
||||||
|
startAssist,
|
||||||
|
endAssist,
|
||||||
|
handleEvent
|
||||||
|
} = require('./stats');
|
||||||
|
const deepMerge = require('@fastify/deepmerge')({all: true});
|
||||||
|
|
||||||
|
let io;
|
||||||
|
|
||||||
|
const setSocketIOServer = function (server) {
|
||||||
|
io = server;
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendFrom(from, to, eventName, ...data) {
|
||||||
|
from.to(to).emit(eventName, ...data);
|
||||||
|
}
|
||||||
|
|
||||||
|
function sendTo(to, eventName, ...data) {
|
||||||
|
sendFrom(io, to, eventName, ...data);
|
||||||
|
}
|
||||||
|
|
||||||
|
const fetchSockets = async function (roomID) {
|
||||||
|
if (!io) {
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
try {
|
||||||
|
if (roomID) {
|
||||||
|
return await io.in(roomID).fetchSockets();
|
||||||
|
} else {
|
||||||
|
return await io.fetchSockets();
|
||||||
|
}
|
||||||
|
} catch (error) {
|
||||||
|
logger.error('Error fetching sockets:', error);
|
||||||
|
return [];
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
const findSessionSocketId = async (roomId, tabId) => {
|
||||||
|
let pickFirstSession = tabId === undefined;
|
||||||
|
const connected_sockets = await fetchSockets(roomId);
|
||||||
|
for (let socket of connected_sockets) {
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
if (pickFirstSession) {
|
||||||
|
return socket.id;
|
||||||
|
} else if (socket.handshake.query.tabId === tabId) {
|
||||||
|
return socket.id;
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
return null;
|
||||||
|
};
|
||||||
|
|
||||||
|
async function getRoomData(roomID) {
|
||||||
|
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
|
||||||
|
const connected_sockets = await fetchSockets(roomID);
|
||||||
|
if (connected_sockets.length > 0) {
|
||||||
|
for (let socket of connected_sockets) {
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
tabsCount++;
|
||||||
|
tabIDs.push(socket.handshake.query.tabId);
|
||||||
|
} else {
|
||||||
|
agentsCount++;
|
||||||
|
agentIDs.push(socket.id);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
tabsCount = -1;
|
||||||
|
agentsCount = -1;
|
||||||
|
}
|
||||||
|
return {tabsCount, agentsCount, tabIDs, agentIDs};
|
||||||
|
}
|
||||||
|
|
||||||
|
async function onConnect(socket) {
|
||||||
|
logger.debug(`A new client:${socket.id}, Query:${JSON.stringify(socket.handshake.query)}`);
|
||||||
|
// Drop unknown socket.io connections
|
||||||
|
if (socket.handshake.query.identity === undefined || socket.handshake.query.peerId === undefined) {
|
||||||
|
logger.debug(`no identity or peerId, refusing connexion`);
|
||||||
|
return socket.disconnect();
|
||||||
|
} else if (socket.handshake.query.identity === IDENTITIES.session && socket.handshake.query.sessionInfo === undefined) {
|
||||||
|
logger.debug(`sessionInfo is undefined, refusing connexion`);
|
||||||
|
return socket.disconnect();
|
||||||
|
}
|
||||||
|
processPeerInfo(socket);
|
||||||
|
|
||||||
|
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
|
||||||
|
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
// Check if session with the same tabID already connected, if so, refuse new connexion
|
||||||
|
if (tabsCount > 0) {
|
||||||
|
for (let tab of tabIDs) {
|
||||||
|
if (tab === socket.handshake.query.tabId) {
|
||||||
|
logger.debug(`session already connected, refusing new connexion, peerId: ${socket.handshake.query.peerId}`);
|
||||||
|
sendTo(socket.id, EVENTS_DEFINITION.emit.SESSION_ALREADY_CONNECTED);
|
||||||
|
return socket.disconnect();
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
extractSessionInfo(socket);
|
||||||
|
if (tabsCount < 0) {
|
||||||
|
// New session creates new room
|
||||||
|
}
|
||||||
|
// Inform all connected agents about reconnected session
|
||||||
|
if (agentsCount > 0) {
|
||||||
|
logger.debug(`notifying new session about agent-existence`);
|
||||||
|
sendTo(socket.id, EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
|
||||||
|
}
|
||||||
|
} else if (tabsCount <= 0) {
|
||||||
|
logger.debug(`notifying new agent about no SESSIONS with peerId:${socket.handshake.query.peerId}`);
|
||||||
|
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
|
||||||
|
}
|
||||||
|
|
||||||
|
await socket.join(socket.handshake.query.roomId);
|
||||||
|
logger.debug(`${socket.id} joined room:${socket.handshake.query.roomId}, as:${socket.handshake.query.identity}, connections:${agentsCount + tabsCount + 1}`)
|
||||||
|
|
||||||
|
// Add session to cache
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
await addSession(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.agent) {
|
||||||
|
if (socket.handshake.query.agentInfo !== undefined) {
|
||||||
|
socket.handshake.query.agentInfo = JSON.parse(socket.handshake.query.agentInfo);
|
||||||
|
socket.handshake.query.agentID = socket.handshake.query.agentInfo.id;
|
||||||
|
startAssist(socket, socket.handshake.query.agentID);
|
||||||
|
}
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
|
||||||
|
}
|
||||||
|
|
||||||
|
socket.conn.on("packet", (packet) => {
|
||||||
|
if (packet.type === 'pong') {
|
||||||
|
renewSession(socket.handshake.query.sessId);
|
||||||
|
}
|
||||||
|
});
|
||||||
|
|
||||||
|
// Set disconnect handler
|
||||||
|
socket.on('disconnect', () => onDisconnect(socket));
|
||||||
|
|
||||||
|
// Handle update event
|
||||||
|
socket.on(EVENTS_DEFINITION.listen.UPDATE_EVENT, (...args) => onUpdateEvent(socket, ...args));
|
||||||
|
|
||||||
|
// Handle webrtc events
|
||||||
|
socket.on(EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, (...args) => onWebrtcAgentHandler(socket, ...args));
|
||||||
|
|
||||||
|
// Handle errors
|
||||||
|
socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err));
|
||||||
|
socket.on(EVENTS_DEFINITION.listen.CONNECT_ERROR, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_ERROR, err));
|
||||||
|
socket.on(EVENTS_DEFINITION.listen.CONNECT_FAILED, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_FAILED, err));
|
||||||
|
|
||||||
|
// Handle all other events (usually dom's mutations and user's actions)
|
||||||
|
socket.onAny((eventName, ...args) => onAny(socket, eventName, ...args));
|
||||||
|
}
|
||||||
|
|
||||||
|
async function onDisconnect(socket) {
|
||||||
|
logger.debug(`${socket.id} disconnected from ${socket.handshake.query.roomId}`);
|
||||||
|
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.agent) {
|
||||||
|
endAssist(socket, socket.handshake.query.agentID);
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.AGENT_DISCONNECT, socket.id);
|
||||||
|
}
|
||||||
|
logger.debug("checking for number of connected agents and sessions");
|
||||||
|
let {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(socket.handshake.query.roomId);
|
||||||
|
|
||||||
|
if (tabsCount <= 0) {
|
||||||
|
await removeSession(socket.handshake.query.sessId);
|
||||||
|
}
|
||||||
|
|
||||||
|
if (tabsCount === -1 && agentsCount === -1) {
|
||||||
|
logger.debug(`room not found: ${socket.handshake.query.roomId}`);
|
||||||
|
return;
|
||||||
|
}
|
||||||
|
if (tabsCount === 0) {
|
||||||
|
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no SESSIONS`);
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_SESSIONS);
|
||||||
|
}
|
||||||
|
if (agentsCount === 0) {
|
||||||
|
logger.debug(`notifying everyone in ${socket.handshake.query.roomId} about no AGENTS`);
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, EVENTS_DEFINITION.emit.NO_AGENTS);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function onUpdateEvent(socket, ...args) {
|
||||||
|
logger.debug(`${socket.id} sent update event.`);
|
||||||
|
if (socket.handshake.query.identity !== IDENTITIES.session) {
|
||||||
|
logger.debug('Ignoring update event.');
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
args[0] = updateSessionData(socket, args[0])
|
||||||
|
socket.handshake.query.sessionInfo = deepMerge(socket.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
|
||||||
|
|
||||||
|
// update session cache
|
||||||
|
await updateSession(socket.handshake.query.sessId, socket.handshake.query.sessionInfo);
|
||||||
|
|
||||||
|
// Update sessionInfo for all agents in the room
|
||||||
|
const connected_sockets = await fetchSockets(socket.handshake.query.roomId);
|
||||||
|
for (let item of connected_sockets) {
|
||||||
|
if (item.handshake.query.identity === IDENTITIES.session && item.handshake.query.sessionInfo) {
|
||||||
|
item.handshake.query.sessionInfo = deepMerge(item.handshake.query.sessionInfo, args[0]?.data, {tabId: args[0]?.meta?.tabId});
|
||||||
|
} else if (item.handshake.query.identity === IDENTITIES.agent) {
|
||||||
|
sendFrom(socket, item.id, EVENTS_DEFINITION.emit.UPDATE_EVENT, args[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function onWebrtcAgentHandler(socket, ...args) {
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.agent) {
|
||||||
|
const agentIdToConnect = args[0]?.data?.toAgentId;
|
||||||
|
logger.debug(`${socket.id} sent webrtc event to agent:${agentIdToConnect}`);
|
||||||
|
if (agentIdToConnect && socket.handshake.sessionData.AGENTS_CONNECTED.includes(agentIdToConnect)) {
|
||||||
|
sendFrom(socket, agentIdToConnect, EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, args[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
async function onAny(socket, eventName, ...args) {
|
||||||
|
if (Object.values(EVENTS_DEFINITION.listen).indexOf(eventName) >= 0) {
|
||||||
|
logger.debug(`received event:${eventName}, should be handled by another listener, stopping onAny.`);
|
||||||
|
return
|
||||||
|
}
|
||||||
|
args[0] = updateSessionData(socket, args[0])
|
||||||
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to room:${socket.handshake.query.roomId}`);
|
||||||
|
sendFrom(socket, socket.handshake.query.roomId, eventName, args[0]);
|
||||||
|
} else {
|
||||||
|
handleEvent(eventName, socket, args[0]);
|
||||||
|
logger.debug(`received event:${eventName}, from:${socket.handshake.query.identity}, sending message to session of room:${socket.handshake.query.roomId}`);
|
||||||
|
let socketId = await findSessionSocketId(socket.handshake.query.roomId, args[0]?.meta?.tabId);
|
||||||
|
if (socketId === null) {
|
||||||
|
logger.debug(`session not found for:${socket.handshake.query.roomId}`);
|
||||||
|
sendTo(socket.id, EVENTS_DEFINITION.emit.NO_SESSIONS);
|
||||||
|
} else {
|
||||||
|
logger.debug("message sent");
|
||||||
|
sendTo(socket.id, eventName, socket.id, args[0]);
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
// Back compatibility (add top layer with meta information)
|
||||||
|
function updateSessionData(socket, sessionData) {
|
||||||
|
if (sessionData?.meta === undefined && socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
|
sessionData = {meta: {tabId: socket.handshake.query.tabId, version: 1}, data: sessionData};
|
||||||
|
}
|
||||||
|
return sessionData
|
||||||
|
}
|
||||||
|
|
||||||
|
module.exports = {
|
||||||
|
onConnect,
|
||||||
|
setSocketIOServer,
|
||||||
|
}
|
||||||
|
|
@ -1,6 +1,5 @@
|
||||||
const statsHost = process.env.STATS_HOST || 'http://assist-stats-openreplay.app.svc.cluster.local:8000/events';
|
|
||||||
const authToken = process.env.STATS_AUTH_TOKEN || '';
|
|
||||||
const {logger} = require('./logger');
|
const {logger} = require('./logger');
|
||||||
|
const {sendAssistEvent} = require('./cache');
|
||||||
|
|
||||||
class InMemoryCache {
|
class InMemoryCache {
|
||||||
constructor() {
|
constructor() {
|
||||||
|
|
@ -26,32 +25,10 @@ class InMemoryCache {
|
||||||
|
|
||||||
const cache = new InMemoryCache();
|
const cache = new InMemoryCache();
|
||||||
|
|
||||||
async function postData(payload) {
|
|
||||||
let headers = {
|
|
||||||
'Content-Type': 'application/json'
|
|
||||||
};
|
|
||||||
if (authToken && authToken.trim() !== '') {
|
|
||||||
headers['Authorization'] = 'Bearer ' + authToken;
|
|
||||||
}
|
|
||||||
const options = {
|
|
||||||
method: 'POST',
|
|
||||||
body: JSON.stringify(payload),
|
|
||||||
headers: headers,
|
|
||||||
}
|
|
||||||
|
|
||||||
try {
|
|
||||||
const response = await fetch(statsHost, options)
|
|
||||||
const jsonResponse = await response.json();
|
|
||||||
logger.debug('JSON response', JSON.stringify(jsonResponse, null, 4))
|
|
||||||
} catch(err) {
|
|
||||||
logger.debug('ERROR', err);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
function startAssist(socket, agentID) {
|
function startAssist(socket, agentID) {
|
||||||
const tsNow = +new Date();
|
const tsNow = +new Date();
|
||||||
const eventID = `${socket.handshake.query.sessId}_${agentID}_assist_${tsNow}`;
|
const eventID = `${socket.handshake.query.sessId}_${agentID}_assist_${tsNow}`;
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -72,7 +49,7 @@ function endAssist(socket, agentID) {
|
||||||
logger.debug(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`);
|
logger.debug(`have to skip assist_ended, no eventID in the cache, agentID: ${socket.handshake.query.agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}`);
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -90,7 +67,7 @@ function endAssist(socket, agentID) {
|
||||||
function startCall(socket, agentID) {
|
function startCall(socket, agentID) {
|
||||||
const tsNow = +new Date();
|
const tsNow = +new Date();
|
||||||
const eventID = `${socket.handshake.query.sessId}_${agentID}_call_${tsNow}`;
|
const eventID = `${socket.handshake.query.sessId}_${agentID}_call_${tsNow}`;
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -112,7 +89,7 @@ function endCall(socket, agentID) {
|
||||||
logger.debug(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
|
logger.debug(`have to skip s_call_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -129,7 +106,7 @@ function endCall(socket, agentID) {
|
||||||
function startControl(socket, agentID) {
|
function startControl(socket, agentID) {
|
||||||
const tsNow = +new Date();
|
const tsNow = +new Date();
|
||||||
const eventID = `${socket.handshake.query.sessId}_${agentID}_control_${tsNow}`;
|
const eventID = `${socket.handshake.query.sessId}_${agentID}_control_${tsNow}`;
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -150,7 +127,7 @@ function endControl(socket, agentID) {
|
||||||
logger.debug(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
|
logger.debug(`have to skip s_control_ended, no eventID in the cache, agentID: ${agentID}, sessID: ${socket.handshake.query.sessId}, projID: ${socket.handshake.query.projectId}, time: ${tsNow}`);
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -167,7 +144,7 @@ function endControl(socket, agentID) {
|
||||||
function startRecord(socket, agentID) {
|
function startRecord(socket, agentID) {
|
||||||
const tsNow = +new Date();
|
const tsNow = +new Date();
|
||||||
const eventID = `${socket.handshake.query.sessId}_${agentID}_record_${tsNow}`;
|
const eventID = `${socket.handshake.query.sessId}_${agentID}_record_${tsNow}`;
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -184,7 +161,7 @@ function startRecord(socket, agentID) {
|
||||||
function endRecord(socket, agentID) {
|
function endRecord(socket, agentID) {
|
||||||
const tsNow = +new Date();
|
const tsNow = +new Date();
|
||||||
const eventID = cache.get(`${socket.sessId}_record`);
|
const eventID = cache.get(`${socket.sessId}_record`);
|
||||||
void postData({
|
void sendAssistEvent({
|
||||||
"project_id": socket.handshake.query.projectId,
|
"project_id": socket.handshake.query.projectId,
|
||||||
"session_id": socket.handshake.query.sessId,
|
"session_id": socket.handshake.query.sessId,
|
||||||
"agent_id": agentID,
|
"agent_id": agentID,
|
||||||
|
|
@ -1,14 +0,0 @@
|
||||||
rm -rf ./utils/assistHelper.js
|
|
||||||
rm -rf ./utils/geoIP.js
|
|
||||||
rm -rf ./utils/health.js
|
|
||||||
rm -rf ./utils/HeapSnapshot.js
|
|
||||||
rm -rf ./utils/helper.js
|
|
||||||
rm -rf ./utils/httpHandlers.js
|
|
||||||
rm -rf ./utils/logger.js
|
|
||||||
rm -rf ./utils/metrics.js
|
|
||||||
rm -rf ./utils/socketHandlers.js
|
|
||||||
|
|
||||||
rm -rf servers/peerjs-server.js
|
|
||||||
rm -rf servers/sourcemaps-handler.js
|
|
||||||
rm -rf servers/sourcemaps-server.js
|
|
||||||
rm -rf build.sh
|
|
||||||
971
ee/assist/package-lock.json
generated
971
ee/assist/package-lock.json
generated
File diff suppressed because it is too large
Load diff
|
|
@ -1,33 +1,26 @@
|
||||||
{
|
{
|
||||||
"name": "assist-server",
|
"name": "assist-server",
|
||||||
"version": "v1.22.0-ee",
|
"version": "1.0.0",
|
||||||
"description": "assist server to get live sessions & sourcemaps reader to get stack trace",
|
"description": "",
|
||||||
"main": "peerjs-server.js",
|
"main": "index.js",
|
||||||
"scripts": {
|
"scripts": {
|
||||||
"test": "echo \"Error: no test specified\" && exit 1",
|
"test": "echo \"Error: no test specified\" && exit 1"
|
||||||
"start": "node server.js"
|
|
||||||
},
|
},
|
||||||
"repository": {
|
"keywords": [],
|
||||||
"type": "git",
|
"author": "",
|
||||||
"url": "git+https://github.com/openreplay/openreplay.git"
|
"license": "ISC",
|
||||||
},
|
|
||||||
"author": "KRAIEM Taha Yassine <tahayk2@gmail.com>",
|
|
||||||
"license": "Elastic License 2.0 (ELv2)",
|
|
||||||
"bugs": {
|
|
||||||
"url": "https://github.com/openreplay/openreplay/issues"
|
|
||||||
},
|
|
||||||
"homepage": "https://github.com/openreplay/openreplay#readme",
|
|
||||||
"dependencies": {
|
"dependencies": {
|
||||||
"@fastify/deepmerge": "^2.0.1",
|
"@fastify/deepmerge": "^3.0.0",
|
||||||
"@maxmind/geoip2-node": "^4.2.0",
|
"@maxmind/geoip2-node": "^6.0.0",
|
||||||
"@socket.io/redis-adapter": "^8.2.1",
|
"async-mutex": "^0.5.0",
|
||||||
"express": "^4.21.1",
|
"express": "^4.21.2",
|
||||||
|
"ioredis": "^5.6.1",
|
||||||
"jsonwebtoken": "^9.0.2",
|
"jsonwebtoken": "^9.0.2",
|
||||||
"prom-client": "^15.0.0",
|
"redis": "^4.7.0",
|
||||||
"redis": "^4.6.10",
|
"socket.io": "^4.8.1",
|
||||||
"socket.io": "^4.8.0",
|
"socket.io-client": "^4.8.1",
|
||||||
"ua-parser-js": "^1.0.37",
|
"ua-parser-js": "^2.0.3",
|
||||||
"uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.51.0",
|
"uWebSockets.js": "github:uNetworking/uWebSockets.js#v20.51.0",
|
||||||
"winston": "^3.13.0"
|
"winston": "^3.17.0"
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -1,2 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
rsync -avr --exclude=".*" --exclude="node_modules" --ignore-existing ../../assist/* ./
|
|
||||||
|
|
@ -1,6 +0,0 @@
|
||||||
#!/bin/bash
|
|
||||||
set -a
|
|
||||||
source .env
|
|
||||||
set +a
|
|
||||||
|
|
||||||
npm start
|
|
||||||
|
|
@ -1,119 +1,64 @@
|
||||||
const dumps = require('./utils/HeapSnapshot');
|
const { App } = require('uWebSockets.js');
|
||||||
const {request_logger} = require('./utils/helper');
|
const { Server } = require('socket.io');
|
||||||
const express = require('express');
|
const { logger } = require("./app/logger");
|
||||||
const health = require("./utils/health");
|
const { authorizer } = require("./app/assist");
|
||||||
const assert = require('assert').strict;
|
const { onConnect, setSocketIOServer } = require("./app/socket");
|
||||||
const register = require('./utils/metrics').register;
|
const { startCacheRefresher } = require("./app/cache");
|
||||||
let socket;
|
|
||||||
if (process.env.redis === "true") {
|
|
||||||
socket = require("./servers/websocket-cluster");
|
|
||||||
} else {
|
|
||||||
socket = require("./servers/websocket");
|
|
||||||
}
|
|
||||||
const {logger} = require('./utils/logger');
|
|
||||||
|
|
||||||
health.healthApp.get('/metrics', async (req, res) => {
|
const app = App();
|
||||||
try {
|
const pingInterval = parseInt(process.env.PING_INTERVAL) || 25000;
|
||||||
res.set('Content-Type', register.contentType);
|
|
||||||
res.end(await register.metrics());
|
const getCompressionConfig = function () {
|
||||||
} catch (ex) {
|
// WS: The theoretical overhead per socket is 19KB (11KB for compressor and 8KB for decompressor)
|
||||||
res.status(500).end(ex);
|
let perMessageDeflate = false;
|
||||||
|
if (process.env.COMPRESSION === "true") {
|
||||||
|
logger.info(`WS compression: enabled`);
|
||||||
|
perMessageDeflate = {
|
||||||
|
zlibDeflateOptions: {
|
||||||
|
windowBits: 10,
|
||||||
|
memLevel: 1
|
||||||
|
},
|
||||||
|
zlibInflateOptions: {
|
||||||
|
windowBits: 10
|
||||||
|
}
|
||||||
|
}
|
||||||
|
} else {
|
||||||
|
logger.info(`WS compression: disabled`);
|
||||||
}
|
}
|
||||||
|
return {
|
||||||
|
perMessageDeflate: perMessageDeflate,
|
||||||
|
clientNoContextTakeover: true
|
||||||
|
};
|
||||||
|
}
|
||||||
|
|
||||||
|
const io = new Server({
|
||||||
|
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
||||||
|
pingInterval: pingInterval, // Will use it for cache invalidation
|
||||||
|
cors: {
|
||||||
|
origin: "*", // Allow connections from any origin (for development)
|
||||||
|
methods: ["GET", "POST"],
|
||||||
|
credentials: true
|
||||||
|
},
|
||||||
|
path: '/socket',
|
||||||
|
...getCompressionConfig()
|
||||||
});
|
});
|
||||||
|
|
||||||
|
io.use(async (socket, next) => await authorizer.check(socket, next));
|
||||||
|
io.on('connection', (socket) => onConnect(socket));
|
||||||
|
io.attachApp(app);
|
||||||
|
setSocketIOServer(io);
|
||||||
|
|
||||||
const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
const HOST = process.env.LISTEN_HOST || '0.0.0.0';
|
||||||
const PORT = process.env.LISTEN_PORT || 9001;
|
const PORT = parseInt(process.env.PORT) || 9001;
|
||||||
assert.ok(process.env.ASSIST_KEY, 'The "ASSIST_KEY" environment variable is required');
|
app.listen(PORT, (token) => {
|
||||||
const P_KEY = process.env.ASSIST_KEY;
|
if (token) {
|
||||||
const PREFIX = process.env.PREFIX || process.env.prefix || `/assist`;
|
console.log(`Server running at http://${HOST}:${PORT}`);
|
||||||
|
} else {
|
||||||
const heapdump = process.env.heapdump === "1";
|
console.log(`Failed to listen on port ${PORT}`);
|
||||||
|
|
||||||
if (process.env.uws !== "true") {
|
|
||||||
let wsapp = express();
|
|
||||||
wsapp.use(express.json());
|
|
||||||
wsapp.use(express.urlencoded({extended: true}));
|
|
||||||
wsapp.use(request_logger("[wsapp]"));
|
|
||||||
wsapp.get(['/', PREFIX, `${PREFIX}/`, `${PREFIX}/${P_KEY}`, `${PREFIX}/${P_KEY}/`], (req, res) => {
|
|
||||||
res.statusCode = 200;
|
|
||||||
res.end("ok!");
|
|
||||||
}
|
|
||||||
);
|
|
||||||
heapdump && wsapp.use(`${PREFIX}/${P_KEY}/heapdump`, dumps.router);
|
|
||||||
wsapp.use(`${PREFIX}/${P_KEY}`, socket.wsRouter);
|
|
||||||
|
|
||||||
wsapp.enable('trust proxy');
|
|
||||||
const wsserver = wsapp.listen(PORT, HOST, () => {
|
|
||||||
logger.info(`WS App listening on http://${HOST}:${PORT}`);
|
|
||||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
|
||||||
});
|
|
||||||
|
|
||||||
socket.start(wsserver);
|
|
||||||
module.exports = {wsserver};
|
|
||||||
} else {
|
|
||||||
logger.info("Using uWebSocket");
|
|
||||||
const {App} = require("uWebSockets.js");
|
|
||||||
|
|
||||||
|
|
||||||
const uapp = new App();
|
|
||||||
|
|
||||||
const healthFn = (res, req) => {
|
|
||||||
res.writeStatus('200 OK').end('ok!');
|
|
||||||
}
|
}
|
||||||
uapp.get('/', healthFn);
|
});
|
||||||
uapp.get(PREFIX, healthFn);
|
startCacheRefresher(io);
|
||||||
uapp.get(`${PREFIX}/`, healthFn);
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}`, healthFn);
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/`, healthFn);
|
|
||||||
|
|
||||||
|
process.on('uncaughtException', err => {
|
||||||
/* Either onAborted or simply finished request */
|
logger.error(`Uncaught Exception: ${err}`);
|
||||||
const onAbortedOrFinishedResponse = function (res) {
|
});
|
||||||
|
|
||||||
if (res.id === -1) {
|
|
||||||
logger.debug("ERROR! onAbortedOrFinishedResponse called twice for the same res!");
|
|
||||||
} else {
|
|
||||||
logger.debug('Stream was closed');
|
|
||||||
}
|
|
||||||
|
|
||||||
/* Mark this response already accounted for */
|
|
||||||
res.id = -1;
|
|
||||||
}
|
|
||||||
|
|
||||||
const uWrapper = function (fn) {
|
|
||||||
return (res, req) => {
|
|
||||||
res.id = 1;
|
|
||||||
res.aborted = false;
|
|
||||||
req.startTs = performance.now(); // track request's start timestamp
|
|
||||||
req.method = req.getMethod();
|
|
||||||
res.onAborted(() => {
|
|
||||||
res.aborted = true;
|
|
||||||
onAbortedOrFinishedResponse(res);
|
|
||||||
});
|
|
||||||
return fn(req, res);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-list/:projectKey/autocomplete`, uWrapper(socket.handlers.autocomplete));
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-list/:projectKey/:sessionId`, uWrapper(socket.handlers.socketsListByProject));
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey/autocomplete`, uWrapper(socket.handlers.autocomplete));
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey`, uWrapper(socket.handlers.socketsLiveByProject));
|
|
||||||
uapp.post(`${PREFIX}/${P_KEY}/sockets-live/:projectKey`, uWrapper(socket.handlers.socketsLiveByProject));
|
|
||||||
uapp.get(`${PREFIX}/${P_KEY}/sockets-live/:projectKey/:sessionId`, uWrapper(socket.handlers.socketsLiveBySession));
|
|
||||||
|
|
||||||
socket.start(uapp);
|
|
||||||
|
|
||||||
uapp.listen(HOST, PORT, (token) => {
|
|
||||||
if (!token) {
|
|
||||||
logger.error("port already in use");
|
|
||||||
}
|
|
||||||
logger.info(`WS App listening on http://${HOST}:${PORT}`);
|
|
||||||
health.healthApp.listen(health.PORT, HOST, health.listen_cb);
|
|
||||||
});
|
|
||||||
|
|
||||||
|
|
||||||
process.on('uncaughtException', err => {
|
|
||||||
logger.error(`Uncaught Exception: ${err}`);
|
|
||||||
});
|
|
||||||
module.exports = {uapp};
|
|
||||||
}
|
|
||||||
|
|
@ -1,64 +0,0 @@
|
||||||
const express = require('express');
|
|
||||||
const {
|
|
||||||
socketConnexionTimeout,
|
|
||||||
authorizer
|
|
||||||
} = require('../utils/assistHelper');
|
|
||||||
const {
|
|
||||||
createSocketIOServer
|
|
||||||
} = require('../utils/wsServer');
|
|
||||||
const {
|
|
||||||
onConnect
|
|
||||||
} = require('../utils/socketHandlers');
|
|
||||||
const {
|
|
||||||
socketsListByProject,
|
|
||||||
socketsLiveByProject,
|
|
||||||
socketsLiveBySession,
|
|
||||||
autocomplete
|
|
||||||
} = require('../utils/httpHandlers');
|
|
||||||
const {logger} = require('../utils/logger');
|
|
||||||
|
|
||||||
const {createAdapter} = require("@socket.io/redis-adapter");
|
|
||||||
const {createClient} = require("redis");
|
|
||||||
const REDIS_URL = (process.env.REDIS_URL || "localhost:6379").replace(/((^\w+:|^)\/\/|^)/, 'redis://');
|
|
||||||
const pubClient = createClient({url: REDIS_URL});
|
|
||||||
const subClient = pubClient.duplicate();
|
|
||||||
logger.info(`Using Redis: ${REDIS_URL}`);
|
|
||||||
|
|
||||||
const wsRouter = express.Router();
|
|
||||||
wsRouter.get(`/sockets-list/:projectKey/autocomplete`, autocomplete); // autocomplete
|
|
||||||
wsRouter.get(`/sockets-list/:projectKey/:sessionId`, socketsListByProject); // is_live
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey/autocomplete`, autocomplete); // not using
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey`, socketsLiveByProject);
|
|
||||||
wsRouter.post(`/sockets-live/:projectKey`, socketsLiveByProject); // assist search
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey/:sessionId`, socketsLiveBySession); // session_exists, get_live_session_by_id
|
|
||||||
|
|
||||||
let io;
|
|
||||||
module.exports = {
|
|
||||||
wsRouter,
|
|
||||||
start: (server, prefix) => {
|
|
||||||
io = createSocketIOServer(server, prefix);
|
|
||||||
io.use(async (socket, next) => await authorizer.check(socket, next));
|
|
||||||
io.on('connection', (socket) => onConnect(socket));
|
|
||||||
|
|
||||||
logger.info("WS server started");
|
|
||||||
|
|
||||||
socketConnexionTimeout(io);
|
|
||||||
|
|
||||||
Promise.all([pubClient.connect(), subClient.connect()])
|
|
||||||
.then(() => {
|
|
||||||
io.adapter(createAdapter(pubClient, subClient,
|
|
||||||
{requestsTimeout: process.env.REDIS_REQUESTS_TIMEOUT || 5000}));
|
|
||||||
logger.info("> redis connected.");
|
|
||||||
})
|
|
||||||
.catch((err) => {
|
|
||||||
logger.error(`redis connection error: ${err}`);
|
|
||||||
process.exit(2);
|
|
||||||
});
|
|
||||||
},
|
|
||||||
handlers: {
|
|
||||||
socketsListByProject,
|
|
||||||
socketsLiveByProject,
|
|
||||||
socketsLiveBySession,
|
|
||||||
autocomplete
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
@ -1,45 +0,0 @@
|
||||||
const express = require('express');
|
|
||||||
const {
|
|
||||||
socketConnexionTimeout,
|
|
||||||
authorizer
|
|
||||||
} = require('../utils/assistHelper');
|
|
||||||
const {
|
|
||||||
createSocketIOServer
|
|
||||||
} = require('../utils/wsServer');
|
|
||||||
const {
|
|
||||||
onConnect
|
|
||||||
} = require('../utils/socketHandlers');
|
|
||||||
const {
|
|
||||||
socketsListByProject,
|
|
||||||
socketsLiveByProject,
|
|
||||||
socketsLiveBySession,
|
|
||||||
autocomplete
|
|
||||||
} = require('../utils/httpHandlers');
|
|
||||||
const {logger} = require('../utils/logger');
|
|
||||||
|
|
||||||
const wsRouter = express.Router();
|
|
||||||
wsRouter.get(`/sockets-list/:projectKey/autocomplete`, autocomplete); // autocomplete
|
|
||||||
wsRouter.get(`/sockets-list/:projectKey/:sessionId`, socketsListByProject); // is_live
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey/autocomplete`, autocomplete); // not using
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey`, socketsLiveByProject);
|
|
||||||
wsRouter.post(`/sockets-live/:projectKey`, socketsLiveByProject); // assist search
|
|
||||||
wsRouter.get(`/sockets-live/:projectKey/:sessionId`, socketsLiveBySession); // session_exists, get_live_session_by_id
|
|
||||||
|
|
||||||
let io;
|
|
||||||
module.exports = {
|
|
||||||
wsRouter,
|
|
||||||
start: (server, prefix) => {
|
|
||||||
io = createSocketIOServer(server, prefix);
|
|
||||||
io.use(async (socket, next) => await authorizer.check(socket, next));
|
|
||||||
io.on('connection', (socket) => onConnect(socket));
|
|
||||||
|
|
||||||
logger.info("WS server started");
|
|
||||||
socketConnexionTimeout(io);
|
|
||||||
},
|
|
||||||
handlers: {
|
|
||||||
socketsListByProject,
|
|
||||||
socketsLiveByProject,
|
|
||||||
socketsLiveBySession,
|
|
||||||
autocomplete
|
|
||||||
}
|
|
||||||
};
|
|
||||||
|
|
@ -1,13 +0,0 @@
|
||||||
const {
|
|
||||||
extractProjectKeyFromRequest,
|
|
||||||
extractSessionIdFromRequest,
|
|
||||||
extractPayloadFromRequest,
|
|
||||||
getAvailableRooms
|
|
||||||
} = require('../utils/helper-ee');
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
extractProjectKeyFromRequest,
|
|
||||||
extractSessionIdFromRequest,
|
|
||||||
extractPayloadFromRequest,
|
|
||||||
getAvailableRooms
|
|
||||||
}
|
|
||||||
|
|
@ -1,126 +0,0 @@
|
||||||
const uWS = require("uWebSockets.js");
|
|
||||||
const helper = require('./helper');
|
|
||||||
const {logger} = require('./logger');
|
|
||||||
|
|
||||||
const getBodyFromUWSResponse = async function (res) {
|
|
||||||
return new Promise(((resolve, reject) => {
|
|
||||||
let buffer;
|
|
||||||
res.onData((ab, isLast) => {
|
|
||||||
let chunk = Buffer.from(ab);
|
|
||||||
if (buffer) {
|
|
||||||
buffer = Buffer.concat([buffer, chunk]);
|
|
||||||
} else {
|
|
||||||
buffer = Buffer.concat([chunk]);
|
|
||||||
}
|
|
||||||
if (isLast) {
|
|
||||||
let json;
|
|
||||||
try {
|
|
||||||
json = JSON.parse(buffer);
|
|
||||||
} catch (e) {
|
|
||||||
console.error(e);
|
|
||||||
json = {};
|
|
||||||
}
|
|
||||||
resolve(json);
|
|
||||||
}
|
|
||||||
});
|
|
||||||
}));
|
|
||||||
}
|
|
||||||
const extractProjectKeyFromRequest = function (req) {
|
|
||||||
if (process.env.uws === "true") {
|
|
||||||
if (req.getParameter(0)) {
|
|
||||||
logger.debug(`[WS]where projectKey=${req.getParameter(0)}`);
|
|
||||||
return req.getParameter(0);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return helper.extractProjectKeyFromRequest(req);
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const extractSessionIdFromRequest = function (req) {
|
|
||||||
if (process.env.uws === "true") {
|
|
||||||
if (req.getParameter(1)) {
|
|
||||||
logger.debug(`[WS]where projectKey=${req.getParameter(1)}`);
|
|
||||||
return req.getParameter(1);
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return helper.extractSessionIdFromRequest(req);
|
|
||||||
}
|
|
||||||
return undefined;
|
|
||||||
}
|
|
||||||
const extractPayloadFromRequest = async function (req, res) {
|
|
||||||
let filters = {
|
|
||||||
"query": {},
|
|
||||||
"filter": {}
|
|
||||||
};
|
|
||||||
if (process.env.uws === "true") {
|
|
||||||
if (req.getQuery("q")) {
|
|
||||||
logger.debug(`[WS]where q=${req.getQuery("q")}`);
|
|
||||||
filters.query.value = req.getQuery("q");
|
|
||||||
}
|
|
||||||
if (req.getQuery("key")) {
|
|
||||||
logger.debug(`[WS]where key=${req.getQuery("key")}`);
|
|
||||||
filters.query.key = req.getQuery("key");
|
|
||||||
}
|
|
||||||
if (req.getQuery("userId")) {
|
|
||||||
logger.debug(`[WS]where userId=${req.getQuery("userId")}`);
|
|
||||||
filters.filter.userID = [req.getQuery("userId")];
|
|
||||||
}
|
|
||||||
if (!filters.query.value) {
|
|
||||||
let body = {};
|
|
||||||
if (req.getMethod() !== 'get') {
|
|
||||||
body = await getBodyFromUWSResponse(res);
|
|
||||||
}
|
|
||||||
filters = {
|
|
||||||
...filters,
|
|
||||||
"sort": {
|
|
||||||
"key": body.sort && body.sort.key ? body.sort.key : undefined,
|
|
||||||
"order": body.sort && body.sort.order === "DESC"
|
|
||||||
},
|
|
||||||
"pagination": {
|
|
||||||
"limit": body.pagination && body.pagination.limit ? body.pagination.limit : undefined,
|
|
||||||
"page": body.pagination && body.pagination.page ? body.pagination.page : undefined
|
|
||||||
}
|
|
||||||
}
|
|
||||||
filters.filter = {...filters.filter, ...(body.filter || {})};
|
|
||||||
}
|
|
||||||
} else {
|
|
||||||
return helper.extractPayloadFromRequest(req);
|
|
||||||
}
|
|
||||||
filters.filter = helper.objectToObjectOfArrays(filters.filter);
|
|
||||||
filters.filter = helper.transformFilters(filters.filter);
|
|
||||||
logger.debug("payload/filters:" + JSON.stringify(filters))
|
|
||||||
return Object.keys(filters).length > 0 ? filters : undefined;
|
|
||||||
}
|
|
||||||
const getAvailableRooms = async function (io) {
|
|
||||||
if (process.env.redis === "true") {
|
|
||||||
return io.of('/').adapter.allRooms();
|
|
||||||
} else {
|
|
||||||
return helper.getAvailableRooms(io);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
const getCompressionConfig = function () {
|
|
||||||
if (process.env.uws !== "true") {
|
|
||||||
return helper.getCompressionConfig();
|
|
||||||
} else {
|
|
||||||
// uWS: The theoretical overhead per socket is 32KB (8KB for compressor and for 24KB decompressor)
|
|
||||||
if (process.env.COMPRESSION === "true") {
|
|
||||||
console.log(`uWS compression: enabled`);
|
|
||||||
return {
|
|
||||||
compression: uWS.DEDICATED_COMPRESSOR_8KB,
|
|
||||||
decompression: uWS.DEDICATED_DECOMPRESSOR_1KB
|
|
||||||
};
|
|
||||||
} else {
|
|
||||||
console.log(`uWS compression: disabled`);
|
|
||||||
return {};
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
extractProjectKeyFromRequest,
|
|
||||||
extractSessionIdFromRequest,
|
|
||||||
extractPayloadFromRequest,
|
|
||||||
getCompressionConfig,
|
|
||||||
getAvailableRooms
|
|
||||||
};
|
|
||||||
|
|
@ -1,107 +0,0 @@
|
||||||
const _io = require("socket.io");
|
|
||||||
const {getCompressionConfig} = require("./helper");
|
|
||||||
const {logger} = require('./logger');
|
|
||||||
|
|
||||||
let io;
|
|
||||||
const getServer = function () {return io;}
|
|
||||||
|
|
||||||
const useRedis = process.env.redis === "true";
|
|
||||||
let inMemorySocketsCache = [];
|
|
||||||
let lastCacheUpdateTime = 0;
|
|
||||||
const CACHE_REFRESH_INTERVAL = parseInt(process.env.cacheRefreshInterval) || 5000;
|
|
||||||
|
|
||||||
const doFetchAllSockets = async function () {
|
|
||||||
if (useRedis) {
|
|
||||||
const now = Date.now();
|
|
||||||
logger.info(`Using in-memory cache (age: ${now - lastCacheUpdateTime}ms)`);
|
|
||||||
return inMemorySocketsCache;
|
|
||||||
} else {
|
|
||||||
try {
|
|
||||||
return await io.fetchSockets();
|
|
||||||
} catch (error) {
|
|
||||||
logger.error('Error fetching sockets:', error);
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
// Background refresher that runs independently of requests
|
|
||||||
let cacheRefresher = null;
|
|
||||||
function startCacheRefresher() {
|
|
||||||
if (cacheRefresher) clearInterval(cacheRefresher);
|
|
||||||
|
|
||||||
cacheRefresher = setInterval(async () => {
|
|
||||||
const now = Date.now();
|
|
||||||
// Only refresh if cache is stale
|
|
||||||
if (now - lastCacheUpdateTime >= CACHE_REFRESH_INTERVAL) {
|
|
||||||
logger.debug('Background refresh triggered');
|
|
||||||
try {
|
|
||||||
const startTime = performance.now();
|
|
||||||
const result = await io.fetchSockets();
|
|
||||||
inMemorySocketsCache = result;
|
|
||||||
lastCacheUpdateTime = now;
|
|
||||||
const duration = performance.now() - startTime;
|
|
||||||
logger.info(`Background refresh complete: ${duration}ms, ${result.length} sockets`);
|
|
||||||
} catch (error) {
|
|
||||||
logger.error(`Background refresh error: ${error}`);
|
|
||||||
}
|
|
||||||
}
|
|
||||||
}, CACHE_REFRESH_INTERVAL / 2);
|
|
||||||
}
|
|
||||||
|
|
||||||
const processSocketsList = function (sockets) {
|
|
||||||
let res = []
|
|
||||||
for (let socket of sockets) {
|
|
||||||
let {handshake} = socket;
|
|
||||||
res.push({handshake});
|
|
||||||
}
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
const fetchSockets = async function (roomID) {
|
|
||||||
if (!io) {
|
|
||||||
return [];
|
|
||||||
}
|
|
||||||
if (!roomID) {
|
|
||||||
return await doFetchAllSockets();
|
|
||||||
}
|
|
||||||
return await io.in(roomID).fetchSockets();
|
|
||||||
}
|
|
||||||
|
|
||||||
const createSocketIOServer = function (server, prefix) {
|
|
||||||
if (io) {
|
|
||||||
return io;
|
|
||||||
}
|
|
||||||
if (process.env.uws !== "true") {
|
|
||||||
io = _io(server, {
|
|
||||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
|
||||||
cors: {
|
|
||||||
origin: "*",
|
|
||||||
methods: ["GET", "POST", "PUT"],
|
|
||||||
credentials: true
|
|
||||||
},
|
|
||||||
path: (prefix ? prefix : '') + '/socket',
|
|
||||||
...getCompressionConfig()
|
|
||||||
});
|
|
||||||
} else {
|
|
||||||
io = new _io.Server({
|
|
||||||
maxHttpBufferSize: (parseFloat(process.env.maxHttpBufferSize) || 5) * 1e6,
|
|
||||||
cors: {
|
|
||||||
origin: "*",
|
|
||||||
methods: ["GET", "POST", "PUT"],
|
|
||||||
credentials: true
|
|
||||||
},
|
|
||||||
path: (prefix ? prefix : '') + '/socket',
|
|
||||||
...getCompressionConfig()
|
|
||||||
});
|
|
||||||
io.attachApp(server);
|
|
||||||
}
|
|
||||||
startCacheRefresher();
|
|
||||||
return io;
|
|
||||||
}
|
|
||||||
|
|
||||||
module.exports = {
|
|
||||||
createSocketIOServer,
|
|
||||||
getServer,
|
|
||||||
fetchSockets,
|
|
||||||
}
|
|
||||||
60
ee/backend/cmd/assist-api/main.go
Normal file
60
ee/backend/cmd/assist-api/main.go
Normal file
|
|
@ -0,0 +1,60 @@
|
||||||
|
package main
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
|
||||||
|
assistConfig "openreplay/backend/internal/config/assist"
|
||||||
|
"openreplay/backend/pkg/assist"
|
||||||
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
"openreplay/backend/pkg/db/redis"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
"openreplay/backend/pkg/metrics"
|
||||||
|
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||||
|
"openreplay/backend/pkg/metrics/web"
|
||||||
|
"openreplay/backend/pkg/server"
|
||||||
|
"openreplay/backend/pkg/server/api"
|
||||||
|
)
|
||||||
|
|
||||||
|
func main() {
|
||||||
|
ctx := context.Background()
|
||||||
|
log := logger.New()
|
||||||
|
cfg := assistConfig.New(log)
|
||||||
|
// Observability
|
||||||
|
webMetrics := web.New("assist")
|
||||||
|
dbMetric := databaseMetrics.New("assist")
|
||||||
|
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
||||||
|
|
||||||
|
if cfg.AssistKey == "" {
|
||||||
|
log.Fatal(ctx, "assist key is not set")
|
||||||
|
}
|
||||||
|
|
||||||
|
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
|
}
|
||||||
|
defer pgConn.Close()
|
||||||
|
|
||||||
|
redisClient, err := redis.New(&cfg.Redis)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(ctx, "can't init redis connection: %s", err)
|
||||||
|
}
|
||||||
|
defer redisClient.Close()
|
||||||
|
|
||||||
|
prefix := api.NoPrefix
|
||||||
|
builder, err := assist.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn, redisClient)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(ctx, "can't init services: %s", err)
|
||||||
|
}
|
||||||
|
defer func() {
|
||||||
|
builder.AssistStats.Stop()
|
||||||
|
}()
|
||||||
|
|
||||||
|
router, err := api.NewRouter(&cfg.HTTP, log)
|
||||||
|
if err != nil {
|
||||||
|
log.Fatal(ctx, "failed while creating router: %s", err)
|
||||||
|
}
|
||||||
|
router.AddHandlers(prefix, builder.AssistAPI)
|
||||||
|
router.AddMiddlewares(builder.RateLimiter.Middleware)
|
||||||
|
|
||||||
|
server.Run(ctx, log, &cfg.HTTP, router)
|
||||||
|
}
|
||||||
30
ee/backend/internal/config/assist/config.go
Normal file
30
ee/backend/internal/config/assist/config.go
Normal file
|
|
@ -0,0 +1,30 @@
|
||||||
|
package assist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"openreplay/backend/internal/config/common"
|
||||||
|
"openreplay/backend/internal/config/configurator"
|
||||||
|
"openreplay/backend/internal/config/redis"
|
||||||
|
"openreplay/backend/pkg/env"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
type Config struct {
|
||||||
|
common.Config
|
||||||
|
common.Postgres
|
||||||
|
redis.Redis
|
||||||
|
common.HTTP
|
||||||
|
ProjectExpiration time.Duration `env:"PROJECT_EXPIRATION,default=10m"`
|
||||||
|
AssistKey string `env:"ASSIST_KEY"`
|
||||||
|
CacheTTL time.Duration `env:"REDIS_CACHE_TTL,default=5s"`
|
||||||
|
BatchSize int `env:"REDIS_BATCH_SIZE,default=1000"`
|
||||||
|
ScanSize int64 `env:"REDIS_SCAN_SIZE,default=1000"`
|
||||||
|
WorkerID uint16
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(log logger.Logger) *Config {
|
||||||
|
cfg := &Config{WorkerID: env.WorkerID()}
|
||||||
|
configurator.Process(log, cfg)
|
||||||
|
return cfg
|
||||||
|
}
|
||||||
207
ee/backend/pkg/assist/api/handlers.go
Normal file
207
ee/backend/pkg/assist/api/handlers.go
Normal file
|
|
@ -0,0 +1,207 @@
|
||||||
|
package api
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"fmt"
|
||||||
|
"net/http"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/gorilla/mux"
|
||||||
|
|
||||||
|
assistAPI "openreplay/backend/internal/config/assist"
|
||||||
|
"openreplay/backend/pkg/assist/service"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
"openreplay/backend/pkg/server/api"
|
||||||
|
"openreplay/backend/pkg/sessionmanager"
|
||||||
|
)
|
||||||
|
|
||||||
|
type handlersImpl struct {
|
||||||
|
cfg *assistAPI.Config
|
||||||
|
log logger.Logger
|
||||||
|
responser *api.Responser
|
||||||
|
jsonSizeLimit int64
|
||||||
|
assist service.Assist
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewHandlers(log logger.Logger, cfg *assistAPI.Config, responser *api.Responser, assist service.Assist) (api.Handlers, error) {
|
||||||
|
return &handlersImpl{
|
||||||
|
cfg: cfg,
|
||||||
|
log: log,
|
||||||
|
responser: responser,
|
||||||
|
jsonSizeLimit: cfg.JsonSizeLimit,
|
||||||
|
assist: assist,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) GetAll() []*api.Description {
|
||||||
|
keyPrefix := "/assist"
|
||||||
|
if e.cfg.AssistKey != "" {
|
||||||
|
keyPrefix = fmt.Sprintf("/assist/%s", e.cfg.AssistKey)
|
||||||
|
}
|
||||||
|
return []*api.Description{
|
||||||
|
{keyPrefix + "/sockets-list/{projectKey}/autocomplete", e.autocomplete, "GET"}, // event search with live=true
|
||||||
|
{keyPrefix + "/sockets-list/{projectKey}/{sessionId}", e.socketsListByProject, "GET"}, // is_live for getReplay call
|
||||||
|
{keyPrefix + "/sockets-live/{projectKey}", e.socketsLiveByProject, "POST"}, // handler /{projectId}/assist/sessions for co-browser
|
||||||
|
{keyPrefix + "/sockets-live/{projectKey}/{sessionId}", e.socketsLiveBySession, "GET"}, // for get_live_session (with data) and for session_exists
|
||||||
|
{"/v1/ping", e.ping, "GET"},
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) ping(w http.ResponseWriter, r *http.Request) {
|
||||||
|
w.WriteHeader(http.StatusOK)
|
||||||
|
}
|
||||||
|
|
||||||
|
func getProjectKey(r *http.Request) (string, error) {
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
key := vars["projectKey"]
|
||||||
|
if key == "" {
|
||||||
|
return "", fmt.Errorf("empty project key")
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getSessionID(r *http.Request) (string, error) {
|
||||||
|
vars := mux.Vars(r)
|
||||||
|
key := vars["sessionId"]
|
||||||
|
if key == "" {
|
||||||
|
return "", fmt.Errorf("empty session ID")
|
||||||
|
}
|
||||||
|
return key, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func getQuery(r *http.Request) (*service.Query, error) {
|
||||||
|
params := r.URL.Query()
|
||||||
|
q := &service.Query{
|
||||||
|
Key: params.Get("key"),
|
||||||
|
Value: params.Get("q"),
|
||||||
|
}
|
||||||
|
if q.Key == "" || q.Value == "" {
|
||||||
|
return nil, fmt.Errorf("empty key or value")
|
||||||
|
}
|
||||||
|
return q, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) autocomplete(w http.ResponseWriter, r *http.Request) {
|
||||||
|
startTime := time.Now()
|
||||||
|
bodySize := 0
|
||||||
|
|
||||||
|
projectKey, err := getProjectKey(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
query, err := getQuery(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := e.assist.Autocomplete(projectKey, query)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"data": resp,
|
||||||
|
}
|
||||||
|
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) socketsListByProject(w http.ResponseWriter, r *http.Request) {
|
||||||
|
startTime := time.Now()
|
||||||
|
bodySize := 0
|
||||||
|
|
||||||
|
projectKey, err := getProjectKey(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sessionID, err := getSessionID(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := e.assist.GetByID(projectKey, sessionID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sessionmanager.ErrSessionNotFound) {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
|
||||||
|
} else if errors.Is(err, sessionmanager.ErrSessionNotBelongToProject) {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
|
||||||
|
} else {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"data": resp,
|
||||||
|
}
|
||||||
|
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) socketsLiveByProject(w http.ResponseWriter, r *http.Request) {
|
||||||
|
startTime := time.Now()
|
||||||
|
bodySize := 0
|
||||||
|
|
||||||
|
projectKey, err := getProjectKey(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
bodyBytes, err := api.ReadBody(e.log, w, r, e.jsonSizeLimit)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
e.log.Debug(context.Background(), "bodyBytes: %s", bodyBytes)
|
||||||
|
bodySize = len(bodyBytes)
|
||||||
|
req := &service.Request{}
|
||||||
|
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := e.assist.GetAll(projectKey, req)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"data": resp,
|
||||||
|
}
|
||||||
|
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (e *handlersImpl) socketsLiveBySession(w http.ResponseWriter, r *http.Request) {
|
||||||
|
startTime := time.Now()
|
||||||
|
bodySize := 0
|
||||||
|
|
||||||
|
projectKey, err := getProjectKey(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sessionID, err := getSessionID(r)
|
||||||
|
if err != nil {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||||
|
return
|
||||||
|
}
|
||||||
|
|
||||||
|
resp, err := e.assist.GetByID(projectKey, sessionID)
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, sessionmanager.ErrSessionNotFound) {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusNotFound, err, startTime, r.URL.Path, bodySize)
|
||||||
|
} else if errors.Is(err, sessionmanager.ErrSessionNotBelongToProject) {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusForbidden, err, startTime, r.URL.Path, bodySize)
|
||||||
|
} else {
|
||||||
|
e.responser.ResponseWithError(e.log, r.Context(), w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
response := map[string]interface{}{
|
||||||
|
"data": resp,
|
||||||
|
}
|
||||||
|
e.responser.ResponseWithJSON(e.log, r.Context(), w, response, startTime, r.URL.Path, bodySize)
|
||||||
|
}
|
||||||
48
ee/backend/pkg/assist/builder.go
Normal file
48
ee/backend/pkg/assist/builder.go
Normal file
|
|
@ -0,0 +1,48 @@
|
||||||
|
package assist
|
||||||
|
|
||||||
|
import (
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"openreplay/backend/internal/config/assist"
|
||||||
|
assistAPI "openreplay/backend/pkg/assist/api"
|
||||||
|
"openreplay/backend/pkg/assist/service"
|
||||||
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
"openreplay/backend/pkg/db/redis"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
"openreplay/backend/pkg/metrics/database"
|
||||||
|
"openreplay/backend/pkg/metrics/web"
|
||||||
|
"openreplay/backend/pkg/projects"
|
||||||
|
"openreplay/backend/pkg/server/api"
|
||||||
|
"openreplay/backend/pkg/server/limiter"
|
||||||
|
"openreplay/backend/pkg/sessionmanager"
|
||||||
|
)
|
||||||
|
|
||||||
|
type ServicesBuilder struct {
|
||||||
|
RateLimiter *limiter.UserRateLimiter
|
||||||
|
AssistAPI api.Handlers
|
||||||
|
AssistStats service.AssistStats
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewServiceBuilder(log logger.Logger, cfg *assist.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||||
|
projectsManager := projects.New(log, pgconn, redis, dbMetrics)
|
||||||
|
sessManager, err := sessionmanager.New(log, cfg, redis.Redis)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
sessManager.Start()
|
||||||
|
assistStats, err := service.NewAssistStats(log, pgconn, redis.Redis)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
assistManager := service.NewAssist(log, pgconn, projectsManager, sessManager)
|
||||||
|
responser := api.NewResponser(webMetrics)
|
||||||
|
handlers, err := assistAPI.NewHandlers(log, cfg, responser, assistManager)
|
||||||
|
if err != nil {
|
||||||
|
return nil, err
|
||||||
|
}
|
||||||
|
return &ServicesBuilder{
|
||||||
|
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
|
||||||
|
AssistAPI: handlers,
|
||||||
|
AssistStats: assistStats,
|
||||||
|
}, nil
|
||||||
|
}
|
||||||
119
ee/backend/pkg/assist/service/assist.go
Normal file
119
ee/backend/pkg/assist/service/assist.go
Normal file
|
|
@ -0,0 +1,119 @@
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"fmt"
|
||||||
|
"strconv"
|
||||||
|
|
||||||
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
"openreplay/backend/pkg/projects"
|
||||||
|
"openreplay/backend/pkg/sessionmanager"
|
||||||
|
)
|
||||||
|
|
||||||
|
type assistImpl struct {
|
||||||
|
log logger.Logger
|
||||||
|
pgconn pool.Pool
|
||||||
|
projects projects.Projects
|
||||||
|
sessions sessionmanager.SessionManager
|
||||||
|
}
|
||||||
|
|
||||||
|
type Assist interface {
|
||||||
|
Autocomplete(projectKey string, query *Query) (interface{}, error)
|
||||||
|
IsLive(projectKey, sessionID string) (bool, error)
|
||||||
|
GetAll(projectKey string, filters *Request) (interface{}, error)
|
||||||
|
GetByID(projectKey, sessionID string) (interface{}, error)
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAssist(log logger.Logger, pgconn pool.Pool, projects projects.Projects, sessions sessionmanager.SessionManager) Assist {
|
||||||
|
return &assistImpl{
|
||||||
|
log: log,
|
||||||
|
pgconn: pgconn,
|
||||||
|
projects: projects,
|
||||||
|
sessions: sessions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assistImpl) Autocomplete(projectKey string, query *Query) (interface{}, error) {
|
||||||
|
switch {
|
||||||
|
case projectKey == "":
|
||||||
|
return nil, fmt.Errorf("project key is required")
|
||||||
|
case query == nil:
|
||||||
|
return nil, fmt.Errorf("query is required")
|
||||||
|
case query.Key == "":
|
||||||
|
return nil, fmt.Errorf("query key is required")
|
||||||
|
case query.Value == "":
|
||||||
|
return nil, fmt.Errorf("query value is required")
|
||||||
|
}
|
||||||
|
project, err := a.projects.GetProjectByKey(projectKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get project by key: %s", err)
|
||||||
|
}
|
||||||
|
return a.sessions.Autocomplete(strconv.Itoa(int(project.ProjectID)), sessionmanager.FilterType(query.Key), query.Value)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assistImpl) IsLive(projectKey, sessionID string) (bool, error) {
|
||||||
|
switch {
|
||||||
|
case projectKey == "":
|
||||||
|
return false, fmt.Errorf("project key is required")
|
||||||
|
case sessionID == "":
|
||||||
|
return false, fmt.Errorf("session ID is required")
|
||||||
|
}
|
||||||
|
project, err := a.projects.GetProjectByKey(projectKey)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get project by key: %s", err)
|
||||||
|
}
|
||||||
|
sess, err := a.sessions.GetByID(strconv.Itoa(int(project.ProjectID)), sessionID)
|
||||||
|
if err != nil {
|
||||||
|
return false, fmt.Errorf("failed to get session by ID: %s", err)
|
||||||
|
}
|
||||||
|
return sess != nil, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assistImpl) GetAll(projectKey string, request *Request) (interface{}, error) {
|
||||||
|
switch {
|
||||||
|
case projectKey == "":
|
||||||
|
return nil, fmt.Errorf("project key is required")
|
||||||
|
case request == nil:
|
||||||
|
return nil, fmt.Errorf("filters are required")
|
||||||
|
}
|
||||||
|
project, err := a.projects.GetProjectByKey(projectKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get project by key: %s", err)
|
||||||
|
}
|
||||||
|
order := sessionmanager.Asc
|
||||||
|
if request.Sort.Order == "DESC" {
|
||||||
|
order = sessionmanager.Desc
|
||||||
|
}
|
||||||
|
filters := make([]*sessionmanager.Filter, 0, len(request.Filters))
|
||||||
|
for name, f := range request.Filters {
|
||||||
|
filters = append(filters, &sessionmanager.Filter{
|
||||||
|
Type: sessionmanager.FilterType(name),
|
||||||
|
Value: f.Value,
|
||||||
|
Operator: f.Operator == "is",
|
||||||
|
})
|
||||||
|
}
|
||||||
|
sessions, total, counter, err := a.sessions.GetAll(strconv.Itoa(int(project.ProjectID)), filters, order, request.Pagination.Page, request.Pagination.Limit)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get sessions: %s", err)
|
||||||
|
}
|
||||||
|
resp := map[string]interface{}{
|
||||||
|
"total": total,
|
||||||
|
"counter": counter,
|
||||||
|
"sessions": sessions,
|
||||||
|
}
|
||||||
|
return resp, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (a *assistImpl) GetByID(projectKey, sessionID string) (interface{}, error) {
|
||||||
|
switch {
|
||||||
|
case projectKey == "":
|
||||||
|
return nil, fmt.Errorf("project key is required")
|
||||||
|
case sessionID == "":
|
||||||
|
return nil, fmt.Errorf("session ID is required")
|
||||||
|
}
|
||||||
|
project, err := a.projects.GetProjectByKey(projectKey)
|
||||||
|
if err != nil {
|
||||||
|
return nil, fmt.Errorf("failed to get project by key: %s", err)
|
||||||
|
}
|
||||||
|
return a.sessions.GetByID(strconv.Itoa(int(project.ProjectID)), sessionID)
|
||||||
|
}
|
||||||
27
ee/backend/pkg/assist/service/model.go
Normal file
27
ee/backend/pkg/assist/service/model.go
Normal file
|
|
@ -0,0 +1,27 @@
|
||||||
|
package service
|
||||||
|
|
||||||
|
type Query struct {
|
||||||
|
Key string
|
||||||
|
Value string
|
||||||
|
}
|
||||||
|
|
||||||
|
type Filter struct {
|
||||||
|
Value []string `json:"values"`
|
||||||
|
Operator string `json:"operator"` // is|contains
|
||||||
|
}
|
||||||
|
|
||||||
|
type Pagination struct {
|
||||||
|
Limit int `json:"limit"`
|
||||||
|
Page int `json:"page"`
|
||||||
|
}
|
||||||
|
|
||||||
|
type Sort struct {
|
||||||
|
Key string `json:"key"` // useless
|
||||||
|
Order string `json:"order"` // [ASC|DESC]
|
||||||
|
}
|
||||||
|
|
||||||
|
type Request struct {
|
||||||
|
Filters map[string]Filter `json:"filter"`
|
||||||
|
Pagination Pagination `json:"pagination"`
|
||||||
|
Sort Sort `json:"sort"`
|
||||||
|
}
|
||||||
126
ee/backend/pkg/assist/service/stats.go
Normal file
126
ee/backend/pkg/assist/service/stats.go
Normal file
|
|
@ -0,0 +1,126 @@
|
||||||
|
package service
|
||||||
|
|
||||||
|
import (
|
||||||
|
"context"
|
||||||
|
"encoding/json"
|
||||||
|
"errors"
|
||||||
|
"time"
|
||||||
|
|
||||||
|
"github.com/redis/go-redis/v9"
|
||||||
|
|
||||||
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
"openreplay/backend/pkg/logger"
|
||||||
|
)
|
||||||
|
|
||||||
|
type assistStatsImpl struct {
|
||||||
|
log logger.Logger
|
||||||
|
pgClient pool.Pool
|
||||||
|
redisClient *redis.Client
|
||||||
|
ticker *time.Ticker
|
||||||
|
stopChan chan struct{}
|
||||||
|
}
|
||||||
|
|
||||||
|
type AssistStats interface {
|
||||||
|
Stop()
|
||||||
|
}
|
||||||
|
|
||||||
|
func NewAssistStats(log logger.Logger, pgClient pool.Pool, redisClient *redis.Client) (AssistStats, error) {
|
||||||
|
switch {
|
||||||
|
case log == nil:
|
||||||
|
return nil, errors.New("logger is empty")
|
||||||
|
case pgClient == nil:
|
||||||
|
return nil, errors.New("pg client is empty")
|
||||||
|
case redisClient == nil:
|
||||||
|
return nil, errors.New("redis client is empty")
|
||||||
|
}
|
||||||
|
stats := &assistStatsImpl{
|
||||||
|
log: log,
|
||||||
|
pgClient: pgClient,
|
||||||
|
redisClient: redisClient,
|
||||||
|
ticker: time.NewTicker(time.Minute),
|
||||||
|
stopChan: make(chan struct{}),
|
||||||
|
}
|
||||||
|
stats.init()
|
||||||
|
return stats, nil
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *assistStatsImpl) init() {
|
||||||
|
as.log.Debug(context.Background(), "Starting assist stats")
|
||||||
|
|
||||||
|
go func() {
|
||||||
|
for {
|
||||||
|
select {
|
||||||
|
case <-as.ticker.C:
|
||||||
|
as.loadData()
|
||||||
|
case <-as.stopChan:
|
||||||
|
as.log.Debug(context.Background(), "Stopping assist stats")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}()
|
||||||
|
}
|
||||||
|
|
||||||
|
type AssistStatsEvent struct {
|
||||||
|
ProjectID uint32 `json:"project_id"`
|
||||||
|
SessionID string `json:"session_id"`
|
||||||
|
AgentID string `json:"agent_id"`
|
||||||
|
EventID string `json:"event_id"`
|
||||||
|
EventType string `json:"event_type"`
|
||||||
|
EventState string `json:"event_state"`
|
||||||
|
Timestamp int64 `json:"timestamp"`
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *assistStatsImpl) loadData() {
|
||||||
|
ctx := context.Background()
|
||||||
|
|
||||||
|
events, err := as.redisClient.LPopCount(ctx, "assist:stats", 1000).Result()
|
||||||
|
if err != nil {
|
||||||
|
if errors.Is(err, redis.Nil) {
|
||||||
|
as.log.Debug(ctx, "No data to load from redis")
|
||||||
|
} else {
|
||||||
|
as.log.Error(ctx, "Failed to load data from redis: ", err)
|
||||||
|
}
|
||||||
|
return
|
||||||
|
}
|
||||||
|
if len(events) == 0 {
|
||||||
|
as.log.Debug(ctx, "No data to load from redis")
|
||||||
|
return
|
||||||
|
}
|
||||||
|
as.log.Debug(ctx, "Loaded %d events from redis", len(events))
|
||||||
|
|
||||||
|
for _, event := range events {
|
||||||
|
e := &AssistStatsEvent{}
|
||||||
|
err := json.Unmarshal([]byte(event), &e)
|
||||||
|
if err != nil {
|
||||||
|
as.log.Error(ctx, "Failed to unmarshal event: ", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
switch e.EventType {
|
||||||
|
case "start":
|
||||||
|
err = as.insertEvent(e)
|
||||||
|
case "end":
|
||||||
|
err = as.updateEvent(e)
|
||||||
|
default:
|
||||||
|
as.log.Warn(ctx, "Unknown event type: %s", e.EventType)
|
||||||
|
}
|
||||||
|
if err != nil {
|
||||||
|
as.log.Error(ctx, "Failed to process event: ", err)
|
||||||
|
continue
|
||||||
|
}
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *assistStatsImpl) insertEvent(event *AssistStatsEvent) error {
|
||||||
|
insertQuery := `INSERT INTO assist_events (event_id, project_id, session_id, agent_id, event_type, timestamp) VALUES ($1, $2, $3, $4, $5, $6) ON CONFLICT (event_id) DO NOTHING`
|
||||||
|
return as.pgClient.Exec(insertQuery, event.EventID, event.ProjectID, event.SessionID, event.AgentID, event.EventType, event.Timestamp)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *assistStatsImpl) updateEvent(event *AssistStatsEvent) error {
|
||||||
|
updateQuery := `UPDATE assist_events SET duration = $1 - timestamp WHERE event_id = $2`
|
||||||
|
return as.pgClient.Exec(updateQuery, event.Timestamp, event.EventID)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (as *assistStatsImpl) Stop() {
|
||||||
|
close(as.stopChan)
|
||||||
|
as.ticker.Stop()
|
||||||
|
}
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue