Compare commits
8 commits
main
...
fix_locali
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
25d0b09987 | ||
|
|
781e0054b9 | ||
|
|
386fcea138 | ||
|
|
5b55352d25 | ||
|
|
e95ee51066 | ||
|
|
006f87f388 | ||
|
|
e342e04616 | ||
|
|
f16e59ec9b |
896 changed files with 17165 additions and 13169 deletions
122
.github/workflows/assist-server-ee.yaml
vendored
122
.github/workflows/assist-server-ee.yaml
vendored
|
|
@ -1,122 +0,0 @@
|
||||||
# This action will push the assist changes to aws
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
skip_security_checks:
|
|
||||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
|
||||||
required: false
|
|
||||||
default: "false"
|
|
||||||
push:
|
|
||||||
branches:
|
|
||||||
- dev
|
|
||||||
paths:
|
|
||||||
- "ee/assist-server/**"
|
|
||||||
|
|
||||||
name: Build and Deploy Assist-Server EE
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: Deploy
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
# We need to diff with old commit
|
|
||||||
# to see which workers got changed.
|
|
||||||
fetch-depth: 2
|
|
||||||
|
|
||||||
- uses: ./.github/composite-actions/update-keys
|
|
||||||
with:
|
|
||||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
|
||||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
|
||||||
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
|
|
||||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
|
||||||
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
|
|
||||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
|
||||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
|
||||||
license_key: ${{ secrets.EE_LICENSE_KEY }}
|
|
||||||
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
|
|
||||||
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
|
|
||||||
pg_password: ${{ secrets.EE_PG_PASSWORD }}
|
|
||||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
|
||||||
name: Update Keys
|
|
||||||
|
|
||||||
- name: Docker login
|
|
||||||
run: |
|
|
||||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
|
||||||
|
|
||||||
- uses: azure/k8s-set-context@v1
|
|
||||||
with:
|
|
||||||
method: kubeconfig
|
|
||||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
|
||||||
id: setcontext
|
|
||||||
|
|
||||||
- name: Building and Pushing Assist-Server image
|
|
||||||
id: build-image
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
|
||||||
ENVIRONMENT: staging
|
|
||||||
run: |
|
|
||||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
|
||||||
cd assist-server
|
|
||||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
|
||||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
|
||||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
|
||||||
images=("assist-server")
|
|
||||||
for image in ${images[*]};do
|
|
||||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
|
||||||
done
|
|
||||||
err_code=$?
|
|
||||||
[[ $err_code -ne 0 ]] && {
|
|
||||||
exit $err_code
|
|
||||||
}
|
|
||||||
} && {
|
|
||||||
echo "Skipping Security Checks"
|
|
||||||
}
|
|
||||||
images=("assist-server")
|
|
||||||
for image in ${images[*]};do
|
|
||||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
|
||||||
done
|
|
||||||
- name: Creating old image input
|
|
||||||
run: |
|
|
||||||
#
|
|
||||||
# Create yaml with existing image tags
|
|
||||||
#
|
|
||||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
|
||||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
|
||||||
|
|
||||||
echo > /tmp/image_override.yaml
|
|
||||||
|
|
||||||
for line in `cat /tmp/image_tag.txt`;
|
|
||||||
do
|
|
||||||
image_array=($(echo "$line" | tr ':' '\n'))
|
|
||||||
cat <<EOF >> /tmp/image_override.yaml
|
|
||||||
${image_array[0]}:
|
|
||||||
image:
|
|
||||||
# We've to strip off the -ee, as helm will append it.
|
|
||||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
|
||||||
EOF
|
|
||||||
done
|
|
||||||
- name: Deploy to kubernetes
|
|
||||||
run: |
|
|
||||||
pwd
|
|
||||||
cd scripts/helmcharts/
|
|
||||||
|
|
||||||
# Update changed image tag
|
|
||||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
|
||||||
|
|
||||||
cat /tmp/image_override.yaml
|
|
||||||
# Deploy command
|
|
||||||
mkdir -p /tmp/charts
|
|
||||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
|
||||||
rm -rf openreplay/charts/*
|
|
||||||
mv /tmp/charts/* openreplay/charts/
|
|
||||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
|
||||||
env:
|
|
||||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
|
||||||
# We're not passing -ee flag, because helm will add that.
|
|
||||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
|
||||||
ENVIRONMENT: staging
|
|
||||||
189
.github/workflows/patch-build-old.yaml
vendored
189
.github/workflows/patch-build-old.yaml
vendored
|
|
@ -1,189 +0,0 @@
|
||||||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
|
||||||
|
|
||||||
on:
|
|
||||||
workflow_dispatch:
|
|
||||||
inputs:
|
|
||||||
services:
|
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
|
||||||
required: true
|
|
||||||
default: 'chalice,frontend'
|
|
||||||
tag:
|
|
||||||
description: 'Tag to update.'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
branch:
|
|
||||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
|
||||||
required: true
|
|
||||||
type: string
|
|
||||||
|
|
||||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
|
||||||
|
|
||||||
jobs:
|
|
||||||
deploy:
|
|
||||||
name: Build Patch from old tag
|
|
||||||
runs-on: ubuntu-latest
|
|
||||||
env:
|
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
|
||||||
steps:
|
|
||||||
- name: Checkout
|
|
||||||
uses: actions/checkout@v2
|
|
||||||
with:
|
|
||||||
fetch-depth: 4
|
|
||||||
ref: ${{ github.event.inputs.tag }}
|
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
|
||||||
run: |
|
|
||||||
git config --unset http.https://github.com/.extraheader
|
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
|
||||||
|
|
||||||
- name: Create backup tag with timestamp
|
|
||||||
run: |
|
|
||||||
set -e # Exit immediately if a command exits with a non-zero status
|
|
||||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
|
||||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
|
||||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
|
||||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
|
||||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
|
||||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
|
||||||
echo "Created backup tag: $BACKUP_TAG"
|
|
||||||
|
|
||||||
# Get the oldest commit date from the last 3 commits in raw format
|
|
||||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
|
||||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
|
||||||
# Add 1 second to the timestamp
|
|
||||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
|
||||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
|
|
||||||
- name: Setup yq
|
|
||||||
uses: mikefarah/yq@master
|
|
||||||
|
|
||||||
# Configure AWS credentials for the first registry
|
|
||||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
|
||||||
uses: aws-actions/configure-aws-credentials@v1
|
|
||||||
with:
|
|
||||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
|
||||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
|
||||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
|
||||||
|
|
||||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
|
||||||
id: login-ecr-arm
|
|
||||||
run: |
|
|
||||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
|
||||||
- name: Get HEAD Commit ID
|
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
|
||||||
- name: Define Branch Name
|
|
||||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
|
||||||
|
|
||||||
- name: Build
|
|
||||||
id: build-image
|
|
||||||
env:
|
|
||||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
|
||||||
run: |
|
|
||||||
set -exo pipefail
|
|
||||||
git config --local user.email "action@github.com"
|
|
||||||
git config --local user.name "GitHub Action"
|
|
||||||
git checkout -b $BRANCH_NAME
|
|
||||||
working_dir=$(pwd)
|
|
||||||
function image_version(){
|
|
||||||
local service=$1
|
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
|
||||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
|
||||||
echo $new_version
|
|
||||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
|
||||||
}
|
|
||||||
function clone_msaas() {
|
|
||||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
|
||||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
|
||||||
cd $MSAAS_REPO_FOLDER
|
|
||||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
|
||||||
git log -1
|
|
||||||
cd $MSAAS_REPO_FOLDER
|
|
||||||
bash git-init.sh
|
|
||||||
git checkout
|
|
||||||
}
|
|
||||||
}
|
|
||||||
function build_managed() {
|
|
||||||
local service=$1
|
|
||||||
local version=$2
|
|
||||||
echo building managed
|
|
||||||
clone_msaas
|
|
||||||
if [[ $service == 'chalice' ]]; then
|
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
|
||||||
else
|
|
||||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
|
||||||
fi
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
|
||||||
}
|
|
||||||
# Checking for backend images
|
|
||||||
ls backend/cmd >> /tmp/backend.txt
|
|
||||||
echo Services: "${{ github.event.inputs.services }}"
|
|
||||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
|
||||||
BUILD_SCRIPT_NAME="build.sh"
|
|
||||||
# Build FOSS
|
|
||||||
for SERVICE in "${SERVICES[@]}"; do
|
|
||||||
# Check if service is backend
|
|
||||||
if grep -q $SERVICE /tmp/backend.txt; then
|
|
||||||
cd backend
|
|
||||||
foss_build_args="nil $SERVICE"
|
|
||||||
ee_build_args="ee $SERVICE"
|
|
||||||
else
|
|
||||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
|
||||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
|
||||||
ee_build_args="ee"
|
|
||||||
fi
|
|
||||||
version=$(image_version $SERVICE)
|
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
|
||||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
|
||||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
|
||||||
else
|
|
||||||
build_managed $SERVICE $version
|
|
||||||
fi
|
|
||||||
cd $working_dir
|
|
||||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
|
||||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
|
||||||
git add $chart_path
|
|
||||||
git commit -m "Increment $SERVICE chart version"
|
|
||||||
done
|
|
||||||
|
|
||||||
- name: Change commit timestamp
|
|
||||||
run: |
|
|
||||||
# Convert the timestamp to a date format git can understand
|
|
||||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
|
||||||
echo "Setting commit date to: $NEW_DATE"
|
|
||||||
|
|
||||||
# Amend the commit with the new date
|
|
||||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
|
||||||
|
|
||||||
# Verify the change
|
|
||||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
|
||||||
|
|
||||||
# git tag and push
|
|
||||||
git tag $INPUT_TAG -f
|
|
||||||
git push origin $INPUT_TAG -f
|
|
||||||
|
|
||||||
|
|
||||||
# - name: Debug Job
|
|
||||||
# if: ${{ failure() }}
|
|
||||||
# uses: mxschmitt/action-tmate@v3
|
|
||||||
# env:
|
|
||||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
|
||||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
|
||||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
|
||||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
|
||||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
|
||||||
# with:
|
|
||||||
# limit-access-to-actor: true
|
|
||||||
246
.github/workflows/patch-build.yaml
vendored
246
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,6 +2,7 @@
|
||||||
|
|
||||||
on:
|
on:
|
||||||
workflow_dispatch:
|
workflow_dispatch:
|
||||||
|
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||||
inputs:
|
inputs:
|
||||||
services:
|
services:
|
||||||
description: 'Comma separated names of services to build(in small letters).'
|
description: 'Comma separated names of services to build(in small letters).'
|
||||||
|
|
@ -19,20 +20,12 @@ jobs:
|
||||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v4
|
uses: actions/checkout@v2
|
||||||
with:
|
with:
|
||||||
fetch-depth: 0
|
fetch-depth: 1
|
||||||
token: ${{ secrets.GITHUB_TOKEN }}
|
|
||||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||||
if: github.ref != 'refs/heads/main'
|
|
||||||
run: |
|
run: |
|
||||||
git remote -v
|
git pull --rebase origin main
|
||||||
git config --global user.email "action@github.com"
|
|
||||||
git config --global user.name "GitHub Action"
|
|
||||||
git config --global rebase.autoStash true
|
|
||||||
git fetch origin main:main
|
|
||||||
git rebase main
|
|
||||||
git log -3
|
|
||||||
|
|
||||||
- name: Downloading yq
|
- name: Downloading yq
|
||||||
run: |
|
run: |
|
||||||
|
|
@ -55,8 +48,6 @@ jobs:
|
||||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||||
|
|
||||||
- uses: depot/setup-action@v1
|
- uses: depot/setup-action@v1
|
||||||
env:
|
|
||||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
|
||||||
- name: Get HEAD Commit ID
|
- name: Get HEAD Commit ID
|
||||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||||
- name: Define Branch Name
|
- name: Define Branch Name
|
||||||
|
|
@ -74,168 +65,78 @@ jobs:
|
||||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
|
||||||
run: |
|
run: |
|
||||||
#!/bin/bash
|
set -exo pipefail
|
||||||
set -euo pipefail
|
git config --local user.email "action@github.com"
|
||||||
|
git config --local user.name "GitHub Action"
|
||||||
# Configuration
|
git checkout -b $BRANCH_NAME
|
||||||
readonly WORKING_DIR=$(pwd)
|
working_dir=$(pwd)
|
||||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
function image_version(){
|
||||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
local service=$1
|
||||||
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||||
# Initialize git configuration
|
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||||
setup_git() {
|
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||||
git config --local user.email "action@github.com"
|
echo $new_version
|
||||||
git config --local user.name "GitHub Action"
|
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||||
git checkout -b "$BRANCH_NAME"
|
|
||||||
}
|
}
|
||||||
|
function clone_msaas() {
|
||||||
# Get and increment image version
|
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||||
image_version() {
|
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||||
local service=$1
|
cd $MSAAS_REPO_FOLDER
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
||||||
local current_version new_version
|
git log -1
|
||||||
|
cd $MSAAS_REPO_FOLDER
|
||||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
bash git-init.sh
|
||||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
git checkout
|
||||||
echo "$new_version"
|
}
|
||||||
}
|
}
|
||||||
|
function build_managed() {
|
||||||
# Clone MSAAS repository if not exists
|
local service=$1
|
||||||
clone_msaas() {
|
local version=$2
|
||||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
echo building managed
|
||||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
clone_msaas
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
if [[ $service == 'chalice' ]]; then
|
||||||
cd openreplay && git fetch origin && git checkout main
|
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||||
git log -1
|
else
|
||||||
cd "$MSAAS_REPO_FOLDER"
|
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||||
bash git-init.sh
|
fi
|
||||||
git checkout
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||||
fi
|
|
||||||
}
|
}
|
||||||
|
# Checking for backend images
|
||||||
# Build managed services
|
ls backend/cmd >> /tmp/backend.txt
|
||||||
build_managed() {
|
echo Services: "${{ github.event.inputs.services }}"
|
||||||
local service=$1
|
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||||
local version=$2
|
BUILD_SCRIPT_NAME="build.sh"
|
||||||
|
# Build FOSS
|
||||||
echo "Building managed service: $service"
|
for SERVICE in "${SERVICES[@]}"; do
|
||||||
clone_msaas
|
# Check if service is backend
|
||||||
|
if grep -q $SERVICE /tmp/backend.txt; then
|
||||||
if [[ $service == 'chalice' ]]; then
|
cd backend
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
foss_build_args="nil $SERVICE"
|
||||||
else
|
ee_build_args="ee $SERVICE"
|
||||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
else
|
||||||
fi
|
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||||
|
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
ee_build_args="ee"
|
||||||
|
fi
|
||||||
echo "Executing: $build_cmd"
|
version=$(image_version $SERVICE)
|
||||||
if ! eval "$build_cmd" 2>&1; then
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
echo "Build failed for $service"
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
exit 1
|
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
fi
|
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||||
}
|
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||||
|
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
# Build service with given arguments
|
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||||
build_service() {
|
else
|
||||||
local service=$1
|
build_managed $SERVICE $version
|
||||||
local version=$2
|
fi
|
||||||
local build_args=$3
|
cd $working_dir
|
||||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||||
|
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
git add $chart_path
|
||||||
echo "Executing: $command"
|
git commit -m "Increment $SERVICE chart version"
|
||||||
eval "$command"
|
git push --set-upstream origin $BRANCH_NAME
|
||||||
}
|
done
|
||||||
|
|
||||||
# Update chart version and commit changes
|
|
||||||
update_chart_version() {
|
|
||||||
local service=$1
|
|
||||||
local version=$2
|
|
||||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
|
||||||
|
|
||||||
# Ensure we're in the original working directory/repository
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
|
||||||
git add "$chart_path"
|
|
||||||
git commit -m "Increment $service chart version to $version"
|
|
||||||
git push --set-upstream origin "$BRANCH_NAME"
|
|
||||||
cd -
|
|
||||||
}
|
|
||||||
|
|
||||||
# Main execution
|
|
||||||
main() {
|
|
||||||
setup_git
|
|
||||||
|
|
||||||
# Get backend services list
|
|
||||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
|
||||||
|
|
||||||
# Parse services input (fix for GitHub Actions syntax)
|
|
||||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
|
||||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
|
||||||
|
|
||||||
# Process each service
|
|
||||||
for service in "${services[@]}"; do
|
|
||||||
echo "Processing service: $service"
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
|
||||||
|
|
||||||
# Determine build configuration based on service type
|
|
||||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
|
||||||
# Backend service
|
|
||||||
cd backend
|
|
||||||
foss_build_args="nil $service"
|
|
||||||
ee_build_args="ee $service"
|
|
||||||
else
|
|
||||||
# Non-backend service
|
|
||||||
case "$service" in
|
|
||||||
chalice | alerts | crons)
|
|
||||||
cd "$WORKING_DIR/api"
|
|
||||||
;;
|
|
||||||
*)
|
|
||||||
cd "$service"
|
|
||||||
;;
|
|
||||||
esac
|
|
||||||
|
|
||||||
# Special build scripts for alerts/crons
|
|
||||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
|
||||||
build_script="build_${service}.sh"
|
|
||||||
fi
|
|
||||||
|
|
||||||
ee_build_args="ee"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Get version and build
|
|
||||||
local version
|
|
||||||
version=$(image_version "$service")
|
|
||||||
|
|
||||||
# Build FOSS and EE versions
|
|
||||||
build_service "$service" "$version" "$foss_build_args"
|
|
||||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
|
||||||
|
|
||||||
# Build managed version for specific services
|
|
||||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
|
||||||
echo "Nothing to build in managed for service $service"
|
|
||||||
else
|
|
||||||
build_managed "$service" "$version"
|
|
||||||
fi
|
|
||||||
|
|
||||||
# Update chart and commit
|
|
||||||
update_chart_version "$service" "$version"
|
|
||||||
done
|
|
||||||
cd "$WORKING_DIR"
|
|
||||||
|
|
||||||
# Cleanup
|
|
||||||
rm -f "$BACKEND_SERVICES_FILE"
|
|
||||||
}
|
|
||||||
|
|
||||||
echo "Working directory: $WORKING_DIR"
|
|
||||||
# Run main function with all arguments
|
|
||||||
main "$SERVICES_INPUT"
|
|
||||||
|
|
||||||
|
|
||||||
- name: Create Pull Request
|
- name: Create Pull Request
|
||||||
uses: repo-sync/pull-request@v2
|
uses: repo-sync/pull-request@v2
|
||||||
|
|
@ -246,7 +147,8 @@ jobs:
|
||||||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||||
pr_body: |
|
pr_body: |
|
||||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||||
Once this PR is merged, tag update job will run automatically.
|
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||||
|
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||||
|
|
||||||
# - name: Debug Job
|
# - name: Debug Job
|
||||||
# if: ${{ failure() }}
|
# if: ${{ failure() }}
|
||||||
|
|
|
||||||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,42 +1,35 @@
|
||||||
on:
|
on:
|
||||||
pull_request:
|
workflow_dispatch:
|
||||||
types: [closed]
|
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||||
branches:
|
inputs:
|
||||||
- main
|
services:
|
||||||
name: Release tag update --force
|
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||||
|
required: true
|
||||||
|
default: "false"
|
||||||
|
|
||||||
|
name: Force Push tag with main branch HEAD
|
||||||
|
|
||||||
jobs:
|
jobs:
|
||||||
deploy:
|
deploy:
|
||||||
name: Build Patch from main
|
name: Build Patch from main
|
||||||
runs-on: ubuntu-latest
|
runs-on: ubuntu-latest
|
||||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
env:
|
||||||
|
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||||
|
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||||
steps:
|
steps:
|
||||||
- name: Checkout
|
- name: Checkout
|
||||||
uses: actions/checkout@v2
|
uses: actions/checkout@v2
|
||||||
|
|
||||||
- name: Get latest release tag using GitHub API
|
|
||||||
id: get-latest-tag
|
|
||||||
run: |
|
|
||||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
|
||||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
|
||||||
| jq -r .tag_name)
|
|
||||||
|
|
||||||
# Fallback to git command if API doesn't return a tag
|
|
||||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
|
||||||
echo "Not found latest tag"
|
|
||||||
exit 100
|
|
||||||
fi
|
|
||||||
|
|
||||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
|
||||||
echo "Latest tag: $LATEST_TAG"
|
|
||||||
|
|
||||||
- name: Set Remote with GITHUB_TOKEN
|
- name: Set Remote with GITHUB_TOKEN
|
||||||
run: |
|
run: |
|
||||||
git config --unset http.https://github.com/.extraheader
|
git config --unset http.https://github.com/.extraheader
|
||||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||||
|
|
||||||
- name: Push main branch to tag
|
- name: Push main branch to tag
|
||||||
run: |
|
run: |
|
||||||
|
git fetch --tags
|
||||||
git checkout main
|
git checkout main
|
||||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
# - name: Debug Job
|
||||||
|
# if: ${{ failure() }}
|
||||||
|
# uses: mxschmitt/action-tmate@v3
|
||||||
|
# with:
|
||||||
|
# limit-access-to-actor: true
|
||||||
|
|
|
||||||
|
|
@ -1,17 +1,10 @@
|
||||||
FROM python:3.12-alpine AS builder
|
|
||||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
|
||||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
|
||||||
|
|
||||||
RUN apk add --no-cache build-base
|
|
||||||
WORKDIR /work
|
|
||||||
COPY requirements.txt ./requirements.txt
|
|
||||||
RUN pip install --no-cache-dir --upgrade uv && \
|
|
||||||
export UV_SYSTEM_PYTHON=true && \
|
|
||||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
|
||||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
|
||||||
|
|
||||||
FROM python:3.12-alpine
|
FROM python:3.12-alpine
|
||||||
|
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||||
|
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
ARG GIT_SHA
|
ARG GIT_SHA
|
||||||
|
LABEL GIT_SHA=$GIT_SHA
|
||||||
|
|
||||||
|
RUN apk add --no-cache build-base tini
|
||||||
ARG envarg
|
ARG envarg
|
||||||
# Add Tini
|
# Add Tini
|
||||||
# Startup daemon
|
# Startup daemon
|
||||||
|
|
@ -21,11 +14,19 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||||
PRIVATE_ENDPOINTS=false \
|
PRIVATE_ENDPOINTS=false \
|
||||||
ENTERPRISE_BUILD=${envarg} \
|
ENTERPRISE_BUILD=${envarg} \
|
||||||
GIT_SHA=$GIT_SHA
|
GIT_SHA=$GIT_SHA
|
||||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
|
||||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
|
||||||
WORKDIR /work
|
WORKDIR /work
|
||||||
|
COPY requirements.txt ./requirements.txt
|
||||||
|
RUN pip install --no-cache-dir --upgrade uv
|
||||||
|
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||||
|
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN apk add --no-cache tini && mv env.default .env
|
RUN mv env.default .env
|
||||||
|
|
||||||
|
RUN adduser -u 1001 openreplay -D
|
||||||
|
USER 1001
|
||||||
|
|
||||||
ENTRYPOINT ["/sbin/tini", "--"]
|
ENTRYPOINT ["/sbin/tini", "--"]
|
||||||
CMD ["./entrypoint.sh"]
|
CMD ./entrypoint.sh
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -4,8 +4,7 @@ from pydantic_core._pydantic_core import ValidationError
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.alerts import alerts, alerts_listener
|
from chalicelib.core.alerts import alerts, alerts_listener
|
||||||
from chalicelib.core.alerts.modules import alert_helpers
|
from chalicelib.core.alerts.modules import sessions, alert_helpers
|
||||||
from chalicelib.core.sessions import sessions_pg as sessions
|
|
||||||
from chalicelib.utils import pg_client
|
from chalicelib.utils import pg_client
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
||||||
|
|
@ -132,7 +131,6 @@ def Build(a):
|
||||||
|
|
||||||
|
|
||||||
def process():
|
def process():
|
||||||
logger.info("> processing alerts on PG")
|
|
||||||
notifications = []
|
notifications = []
|
||||||
all_alerts = alerts_listener.get_all_alerts()
|
all_alerts = alerts_listener.get_all_alerts()
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
|
|
||||||
|
|
@ -5,9 +5,8 @@ from pydantic_core._pydantic_core import ValidationError
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.core.alerts import alerts, alerts_listener
|
from . import alerts, alerts_listener
|
||||||
from chalicelib.core.alerts.modules import alert_helpers
|
from .modules import sessions, alert_helpers
|
||||||
from chalicelib.core.sessions import sessions_ch as sessions
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
@ -156,7 +155,6 @@ def Build(a):
|
||||||
|
|
||||||
|
|
||||||
def process():
|
def process():
|
||||||
logger.info("> processing alerts on CH")
|
|
||||||
notifications = []
|
notifications = []
|
||||||
all_alerts = alerts_listener.get_all_alerts()
|
all_alerts = alerts_listener.get_all_alerts()
|
||||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,9 @@
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
TENANT_ID = "-1"
|
TENANT_ID = "-1"
|
||||||
|
if config("EXP_ALERTS", cast=bool, default=False):
|
||||||
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
from chalicelib.core.sessions import sessions
|
||||||
|
|
||||||
from . import helpers as alert_helpers
|
from . import helpers as alert_helpers
|
||||||
|
|
|
||||||
|
|
@ -85,8 +85,7 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
return f"""(SELECT DISTINCT value, type
|
||||||
((SELECT DISTINCT value, type
|
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -102,7 +101,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5)) AS raw;"""
|
LIMIT 5);"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -327,7 +326,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(cur.mogrify(f"""\
|
cur.execute(cur.mogrify(f"""\
|
||||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
SELECT key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||||
|
|
|
||||||
|
|
@ -13,18 +13,15 @@ def get_state(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||||
"""SELECT EXISTS(( SELECT 1
|
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)},
|
{"ids": tuple(pids)})
|
||||||
)
|
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
meta = False
|
meta = False
|
||||||
if recorded:
|
if recorded:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
|
||||||
f"""SELECT EXISTS((SELECT 1
|
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
|
|
@ -39,35 +36,26 @@ def get_state(tenant_id):
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""",
|
)) AS exists;""",
|
||||||
{"tenant_id": tenant_id},
|
{"tenant_id": tenant_id})
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return [
|
return [
|
||||||
{
|
{"task": "Install OpenReplay",
|
||||||
"task": "Install OpenReplay",
|
"done": recorded,
|
||||||
"done": recorded,
|
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
{"task": "Identify Users",
|
||||||
},
|
"done": meta,
|
||||||
{
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||||
"task": "Identify Users",
|
{"task": "Invite Team Members",
|
||||||
"done": meta,
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||||
},
|
{"task": "Integrations",
|
||||||
{
|
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"task": "Invite Team Members",
|
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
"URL": "https://app.openreplay.com/client/manage-users",
|
"URL": "https://docs.openreplay.com/integrations"}
|
||||||
},
|
|
||||||
{
|
|
||||||
"task": "Integrations",
|
|
||||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
|
||||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
|
||||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
|
||||||
"URL": "https://docs.openreplay.com/integrations",
|
|
||||||
},
|
|
||||||
]
|
]
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -78,26 +66,21 @@ def get_state_installing(tenant_id):
|
||||||
|
|
||||||
if len(pids) > 0:
|
if len(pids) > 0:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||||
"""SELECT EXISTS(( SELECT 1
|
|
||||||
FROM public.sessions AS s
|
FROM public.sessions AS s
|
||||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||||
{"ids": tuple(pids)},
|
{"ids": tuple(pids)})
|
||||||
)
|
|
||||||
)
|
)
|
||||||
recorded = cur.fetchone()["exists"]
|
recorded = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {
|
return {"task": "Install OpenReplay",
|
||||||
"task": "Install OpenReplay",
|
"done": recorded,
|
||||||
"done": recorded,
|
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_identify_users(tenant_id):
|
def get_state_identify_users(tenant_id):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
|
||||||
f"""SELECT EXISTS((SELECT 1
|
|
||||||
FROM public.projects AS p
|
FROM public.projects AS p
|
||||||
LEFT JOIN LATERAL ( SELECT 1
|
LEFT JOIN LATERAL ( SELECT 1
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
|
|
@ -112,32 +95,25 @@ def get_state_identify_users(tenant_id):
|
||||||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||||
OR p.metadata_10 IS NOT NULL )
|
OR p.metadata_10 IS NOT NULL )
|
||||||
)) AS exists;""",
|
)) AS exists;""",
|
||||||
{"tenant_id": tenant_id},
|
{"tenant_id": tenant_id})
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
meta = cur.fetchone()["exists"]
|
meta = cur.fetchone()["exists"]
|
||||||
|
|
||||||
return {
|
return {"task": "Identify Users",
|
||||||
"task": "Identify Users",
|
"done": meta,
|
||||||
"done": meta,
|
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_manage_users(tenant_id):
|
def get_state_manage_users(tenant_id):
|
||||||
return {
|
return {"task": "Invite Team Members",
|
||||||
"task": "Invite Team Members",
|
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||||
"URL": "https://app.openreplay.com/client/manage-users",
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_state_integrations(tenant_id):
|
def get_state_integrations(tenant_id):
|
||||||
return {
|
return {"task": "Integrations",
|
||||||
"task": "Integrations",
|
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
"URL": "https://docs.openreplay.com/integrations"}
|
||||||
"URL": "https://docs.openreplay.com/integrations",
|
|
||||||
}
|
|
||||||
|
|
|
||||||
|
|
@ -4,10 +4,10 @@ from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
from . import errors_pg as errors_legacy
|
from . import errors as errors_legacy
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||||
logger.info(">>> Using experimental error search")
|
logger.info(">>> Using experimental error search")
|
||||||
from . import errors_ch as errors
|
from . import errors_ch as errors
|
||||||
else:
|
else:
|
||||||
from . import errors_pg as errors
|
from . import errors
|
||||||
|
|
|
||||||
|
|
@ -1,8 +1,7 @@
|
||||||
import json
|
import json
|
||||||
from typing import List
|
from typing import Optional, List
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
from chalicelib.core.sessions import sessions_search
|
from chalicelib.core.sessions import sessions_search
|
||||||
from chalicelib.core.sourcemaps import sourcemaps
|
from chalicelib.core.sourcemaps import sourcemaps
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
@ -52,6 +51,27 @@ def get_batch(error_ids):
|
||||||
return helper.list_to_camel_case(errors)
|
return helper.list_to_camel_case(errors)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||||
|
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||||
|
chart: bool = False, step_size_name: str = "step_size",
|
||||||
|
project_key: Optional[str] = "project_id"):
|
||||||
|
if project_key is None:
|
||||||
|
ch_sub_query = []
|
||||||
|
else:
|
||||||
|
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||||
|
f"timestamp < %({endTime_arg_name})s"]
|
||||||
|
if chart:
|
||||||
|
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||||
|
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||||
|
if platform == schemas.PlatformType.MOBILE:
|
||||||
|
ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
def __get_sort_key(key):
|
def __get_sort_key(key):
|
||||||
return {
|
return {
|
||||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||||
|
|
@ -70,13 +90,12 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
for f in data.filters:
|
for f in data.filters:
|
||||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||||
platform = f.value[0]
|
platform = f.value[0]
|
||||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||||
"pe.project_id=%(project_id)s"]
|
"pe.project_id=%(project_id)s"]
|
||||||
# To ignore Script error
|
# To ignore Script error
|
||||||
pg_sub_query.append("pe.message!='Script error.'")
|
pg_sub_query.append("pe.message!='Script error.'")
|
||||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||||
project_key=None)
|
|
||||||
if platform:
|
if platform:
|
||||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||||
|
|
@ -1,11 +1,10 @@
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import metadata
|
from chalicelib.core import metadata
|
||||||
from chalicelib.core.errors import errors_legacy
|
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
|
||||||
from chalicelib.core.errors.modules import sessions
|
from chalicelib.core.errors.modules import sessions
|
||||||
from chalicelib.utils import ch_client, exp_ch_helper
|
from chalicelib.utils import ch_client, exp_ch_helper
|
||||||
from chalicelib.utils import helper, metrics_helper
|
from chalicelib.utils import helper, metrics_helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
from . import errors as errors_legacy
|
||||||
|
|
||||||
|
|
||||||
def _multiple_values(values, value_key="value"):
|
def _multiple_values(values, value_key="value"):
|
||||||
|
|
@ -62,6 +61,25 @@ def get_batch(error_ids):
|
||||||
return errors_legacy.get_batch(error_ids=error_ids)
|
return errors_legacy.get_batch(error_ids=error_ids)
|
||||||
|
|
||||||
|
|
||||||
|
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||||
|
endTime_arg_name="endDate", type_condition=True, project_key="project_id", table_name=None):
|
||||||
|
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||||
|
if table_name is not None:
|
||||||
|
table_name = table_name + "."
|
||||||
|
else:
|
||||||
|
table_name = ""
|
||||||
|
if type_condition:
|
||||||
|
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||||
|
if time_constraint:
|
||||||
|
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||||
|
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||||
|
if platform == schemas.PlatformType.MOBILE:
|
||||||
|
ch_sub_query.append("user_device_type = 'mobile'")
|
||||||
|
elif platform == schemas.PlatformType.DESKTOP:
|
||||||
|
ch_sub_query.append("user_device_type = 'desktop'")
|
||||||
|
return ch_sub_query
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||||
table_name=None):
|
table_name=None):
|
||||||
|
|
@ -98,7 +116,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
for f in data.filters:
|
for f in data.filters:
|
||||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||||
platform = f.value[0]
|
platform = f.value[0]
|
||||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
|
||||||
# ignore platform for errors table
|
# ignore platform for errors table
|
||||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||||
|
|
@ -130,8 +148,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
if len(data.events) > errors_condition_count:
|
if len(data.events) > errors_condition_count:
|
||||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||||
errors_only=True,
|
errors_only=True,
|
||||||
project_id=project.project_id,
|
project_id=project.project_id, user_id=user_id,
|
||||||
user_id=user_id,
|
|
||||||
issue=None,
|
issue=None,
|
||||||
favorite_only=False)
|
favorite_only=False)
|
||||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||||
|
|
@ -338,14 +355,14 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
SELECT details.error_id as error_id,
|
SELECT details.error_id as error_id,
|
||||||
name, message, users, total,
|
name, message, users, total,
|
||||||
sessions, last_occurrence, first_occurrence, chart
|
sessions, last_occurrence, first_occurrence, chart
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||||
COUNT(DISTINCT user_id) AS users,
|
COUNT(DISTINCT user_id) AS users,
|
||||||
COUNT(DISTINCT events.session_id) AS sessions,
|
COUNT(DISTINCT events.session_id) AS sessions,
|
||||||
MAX(created_at) AS max_datetime,
|
MAX(created_at) AS max_datetime,
|
||||||
MIN(created_at) AS min_datetime,
|
MIN(created_at) AS min_datetime,
|
||||||
COUNT(DISTINCT error_id)
|
COUNT(DISTINCT JSONExtractString(toString(`$properties`), 'error_id'))
|
||||||
OVER() AS total
|
OVER() AS total
|
||||||
FROM {MAIN_EVENTS_TABLE} AS events
|
FROM {MAIN_EVENTS_TABLE} AS events
|
||||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||||
|
|
@ -357,7 +374,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
GROUP BY error_id, name, message
|
GROUP BY error_id, name, message
|
||||||
ORDER BY {sort} {order}
|
ORDER BY {sort} {order}
|
||||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||||
INNER JOIN (SELECT error_id,
|
INNER JOIN (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||||
FROM {MAIN_EVENTS_TABLE}
|
FROM {MAIN_EVENTS_TABLE}
|
||||||
|
|
@ -366,7 +383,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
||||||
GROUP BY error_id) AS time_details
|
GROUP BY error_id) AS time_details
|
||||||
ON details.error_id=time_details.error_id
|
ON details.error_id=time_details.error_id
|
||||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||||
gs.generate_series AS timestamp,
|
gs.generate_series AS timestamp,
|
||||||
COUNT(DISTINCT session_id) AS count
|
COUNT(DISTINCT session_id) AS count
|
||||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||||
|
|
|
||||||
|
|
@ -1,5 +1,5 @@
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
from chalicelib.core.errors import errors_legacy as errors
|
||||||
|
from chalicelib.utils import errors_helper
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.utils.metrics_helper import get_step_size
|
from chalicelib.utils.metrics_helper import get_step_size
|
||||||
|
|
@ -40,29 +40,26 @@ def __process_tags(row):
|
||||||
|
|
||||||
|
|
||||||
def get_details(project_id, error_id, user_id, **data):
|
def get_details(project_id, error_id, user_id, **data):
|
||||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||||
step_size_name="step_size24")
|
|
||||||
pg_sub_query24.append("error_id = %(error_id)s")
|
pg_sub_query24.append("error_id = %(error_id)s")
|
||||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||||
startTime_arg_name="startDate30",
|
startTime_arg_name="startDate30",
|
||||||
endTime_arg_name="endDate30",
|
endTime_arg_name="endDate30",
|
||||||
project_key="sessions.project_id")
|
project_key="sessions.project_id")
|
||||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||||
startTime_arg_name="startDate30",
|
startTime_arg_name="startDate30",
|
||||||
endTime_arg_name="endDate30",
|
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||||
project_key="errors.project_id")
|
|
||||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||||
pg_sub_query30_err.append("source ='js_exception'")
|
pg_sub_query30_err.append("source ='js_exception'")
|
||||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||||
step_size_name="step_size30")
|
|
||||||
pg_sub_query30.append("error_id = %(error_id)s")
|
pg_sub_query30.append("error_id = %(error_id)s")
|
||||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
|
||||||
pg_basic_query.append("error_id = %(error_id)s")
|
pg_basic_query.append("error_id = %(error_id)s")
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
data["startDate24"] = TimeUTC.now(-1)
|
data["startDate24"] = TimeUTC.now(-1)
|
||||||
|
|
@ -98,7 +95,8 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
device_partition,
|
device_partition,
|
||||||
country_partition,
|
country_partition,
|
||||||
chart24,
|
chart24,
|
||||||
chart30
|
chart30,
|
||||||
|
custom_tags
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT error_id,
|
||||||
name,
|
name,
|
||||||
message,
|
message,
|
||||||
|
|
@ -113,8 +111,15 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
MIN(timestamp) AS first_occurrence
|
MIN(timestamp) AS first_occurrence
|
||||||
FROM events.errors
|
FROM events.errors
|
||||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||||
INNER JOIN (SELECT session_id AS last_session_id
|
INNER JOIN (SELECT session_id AS last_session_id,
|
||||||
|
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||||
FROM events.errors
|
FROM events.errors
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||||
|
FROM errors_tags
|
||||||
|
WHERE errors_tags.error_id = %(error_id)s
|
||||||
|
AND errors_tags.session_id = errors.session_id
|
||||||
|
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||||
WHERE error_id = %(error_id)s
|
WHERE error_id = %(error_id)s
|
||||||
ORDER BY errors.timestamp DESC
|
ORDER BY errors.timestamp DESC
|
||||||
LIMIT 1) AS last_session_details ON (TRUE)
|
LIMIT 1) AS last_session_details ON (TRUE)
|
||||||
|
|
|
||||||
|
|
@ -3,9 +3,8 @@ import logging
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
from . import helper as errors_helper
|
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||||
import chalicelib.core.sessions.sessions_ch as sessions
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
else:
|
else:
|
||||||
import chalicelib.core.sessions.sessions_pg as sessions
|
from chalicelib.core.sessions import sessions
|
||||||
|
|
|
||||||
|
|
@ -1,58 +0,0 @@
|
||||||
from typing import Optional
|
|
||||||
|
|
||||||
import schemas
|
|
||||||
from chalicelib.core.sourcemaps import sourcemaps
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
|
||||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
|
||||||
chart: bool = False, step_size_name: str = "step_size",
|
|
||||||
project_key: Optional[str] = "project_id"):
|
|
||||||
if project_key is None:
|
|
||||||
ch_sub_query = []
|
|
||||||
else:
|
|
||||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
|
||||||
f"timestamp < %({endTime_arg_name})s"]
|
|
||||||
if chart:
|
|
||||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
|
||||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
|
||||||
if platform == schemas.PlatformType.MOBILE:
|
|
||||||
ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
|
||||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
|
||||||
table_name=None):
|
|
||||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
|
||||||
if table_name is not None:
|
|
||||||
table_name = table_name + "."
|
|
||||||
else:
|
|
||||||
table_name = ""
|
|
||||||
if type_condition:
|
|
||||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
|
||||||
if time_constraint:
|
|
||||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
|
||||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
|
||||||
if platform == schemas.PlatformType.MOBILE:
|
|
||||||
ch_sub_query.append("user_device_type = 'mobile'")
|
|
||||||
elif platform == schemas.PlatformType.DESKTOP:
|
|
||||||
ch_sub_query.append("user_device_type = 'desktop'")
|
|
||||||
return ch_sub_query
|
|
||||||
|
|
||||||
|
|
||||||
def format_first_stack_frame(error):
|
|
||||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
|
||||||
for s in error["stack"]:
|
|
||||||
for c in s.get("context", []):
|
|
||||||
for sci, sc in enumerate(c):
|
|
||||||
if isinstance(sc, str) and len(sc) > 1000:
|
|
||||||
c[sci] = sc[:1000]
|
|
||||||
# convert bytes to string:
|
|
||||||
if isinstance(s["filename"], bytes):
|
|
||||||
s["filename"] = s["filename"].decode("utf-8")
|
|
||||||
return error
|
|
||||||
|
|
@ -1,9 +1,8 @@
|
||||||
from functools import cache
|
|
||||||
from typing import Optional
|
from typing import Optional
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import issues
|
|
||||||
from chalicelib.core.autocomplete import autocomplete
|
from chalicelib.core.autocomplete import autocomplete
|
||||||
|
from chalicelib.core import issues
|
||||||
from chalicelib.core.sessions import sessions_metas
|
from chalicelib.core.sessions import sessions_metas
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
|
|
@ -138,57 +137,52 @@ class EventType:
|
||||||
column=None) # column=None because errors are searched by name or message
|
column=None) # column=None because errors are searched by name or message
|
||||||
|
|
||||||
|
|
||||||
@cache
|
SUPPORTED_TYPES = {
|
||||||
def supported_types():
|
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||||
return {
|
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
query=autocomplete.__generic_query(
|
||||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
typename=EventType.LOCATION.ui_type)),
|
||||||
|
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||||
|
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||||
|
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.REQUEST.ui_type)),
|
||||||
|
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.GRAPHQL.ui_type)),
|
||||||
|
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.STATEACTION.ui_type)),
|
||||||
|
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||||
|
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||||
|
query=None),
|
||||||
|
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||||
|
query=None),
|
||||||
|
# MOBILE
|
||||||
|
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.LOCATION.ui_type)),
|
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.CUSTOM.ui_type)),
|
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||||
|
query=autocomplete.__generic_query(
|
||||||
|
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||||
|
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.REQUEST.ui_type)),
|
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.GRAPHQL.ui_type)),
|
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||||
query=autocomplete.__generic_query(
|
query=autocomplete.__generic_query(
|
||||||
typename=EventType.STATEACTION.ui_type)),
|
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
|
||||||
query=None),
|
|
||||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
|
||||||
query=None),
|
query=None),
|
||||||
# MOBILE
|
}
|
||||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
|
||||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
|
||||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
|
||||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
|
||||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
|
||||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
|
||||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
|
||||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
|
||||||
query=autocomplete.__generic_query(
|
|
||||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
|
||||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
|
||||||
query=None),
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def get_errors_by_session_id(session_id, project_id):
|
def get_errors_by_session_id(session_id, project_id):
|
||||||
|
|
@ -208,17 +202,17 @@ def search(text, event_type, project_id, source, key):
|
||||||
if not event_type:
|
if not event_type:
|
||||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||||
|
|
||||||
if event_type in supported_types().keys():
|
if event_type in SUPPORTED_TYPES.keys():
|
||||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
elif event_type + "_MOBILE" in supported_types().keys():
|
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
||||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||||
elif event_type in sessions_metas.supported_types().keys():
|
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_IOS") \
|
elif event_type.endswith("_IOS") \
|
||||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
elif event_type.endswith("_MOBILE") \
|
elif event_type.endswith("_MOBILE") \
|
||||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||||
return sessions_metas.search(text, event_type, project_id)
|
return sessions_metas.search(text, event_type, project_id)
|
||||||
else:
|
else:
|
||||||
return {"errors": ["unsupported event"]}
|
return {"errors": ["unsupported event"]}
|
||||||
|
|
|
||||||
|
|
@ -27,6 +27,7 @@ HEALTH_ENDPOINTS = {
|
||||||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||||
|
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||||
"sourcemapreader": app_connection_string(
|
"sourcemapreader": app_connection_string(
|
||||||
"sourcemapreader-openreplay", 8888, "health"
|
"sourcemapreader-openreplay", 8888, "health"
|
||||||
|
|
@ -38,7 +39,9 @@ HEALTH_ENDPOINTS = {
|
||||||
def __check_database_pg(*_):
|
def __check_database_pg(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["Postgres health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["Postgres health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
try:
|
try:
|
||||||
|
|
@ -60,26 +63,29 @@ def __check_database_pg(*_):
|
||||||
"details": {
|
"details": {
|
||||||
# "version": server_version["server_version"],
|
# "version": server_version["server_version"],
|
||||||
# "schema": schema_version["version"]
|
# "schema": schema_version["version"]
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __always_healthy(*_):
|
def __always_healthy(*_):
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_be_service(service_name):
|
def __check_be_service(service_name):
|
||||||
def fn(*_):
|
def fn(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["server health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(
|
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
|
||||||
)
|
|
||||||
logger.error(results.text)
|
logger.error(results.text)
|
||||||
# fail_response["details"]["errors"].append(results.text)
|
# fail_response["details"]["errors"].append(results.text)
|
||||||
return fail_response
|
return fail_response
|
||||||
|
|
@ -97,7 +103,10 @@ def __check_be_service(service_name):
|
||||||
logger.error("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
# fail_response["details"]["errors"].append(str(e))
|
# fail_response["details"]["errors"].append(str(e))
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
|
|
@ -105,7 +114,7 @@ def __check_be_service(service_name):
|
||||||
def __check_redis(*_):
|
def __check_redis(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {"errors": ["server health-check failed"]}
|
||||||
}
|
}
|
||||||
if config("REDIS_STRING", default=None) is None:
|
if config("REDIS_STRING", default=None) is None:
|
||||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||||
|
|
@ -124,14 +133,16 @@ def __check_redis(*_):
|
||||||
"health": True,
|
"health": True,
|
||||||
"details": {
|
"details": {
|
||||||
# "version": r.execute_command('INFO')['redis_version']
|
# "version": r.execute_command('INFO')['redis_version']
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_SSL(*_):
|
def __check_SSL(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["SSL Certificate health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||||
|
|
@ -139,28 +150,36 @@ def __check_SSL(*_):
|
||||||
logger.error("!! health failed: SSL Certificate")
|
logger.error("!! health failed: SSL Certificate")
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_stats(*_):
|
def __get_sessions_stats(*_):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
constraints = ["projects.deleted_at IS NULL"]
|
constraints = ["projects.deleted_at IS NULL"]
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
|
||||||
COALESCE(SUM(events_count),0) AS e_c
|
COALESCE(SUM(events_count),0) AS e_c
|
||||||
FROM public.projects_stats
|
FROM public.projects_stats
|
||||||
INNER JOIN public.projects USING(project_id)
|
INNER JOIN public.projects USING(project_id)
|
||||||
WHERE {" AND ".join(constraints)};"""
|
WHERE {" AND ".join(constraints)};""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
return {
|
||||||
|
"numberOfSessionsCaptured": row["s_c"],
|
||||||
|
"numberOfEventCaptured": row["e_c"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_health(tenant_id=None):
|
def get_health(tenant_id=None):
|
||||||
health_map = {
|
health_map = {
|
||||||
"databases": {"postgres": __check_database_pg},
|
"databases": {
|
||||||
"ingestionPipeline": {"redis": __check_redis},
|
"postgres": __check_database_pg
|
||||||
|
},
|
||||||
|
"ingestionPipeline": {
|
||||||
|
"redis": __check_redis
|
||||||
|
},
|
||||||
"backendServices": {
|
"backendServices": {
|
||||||
"alerts": __check_be_service("alerts"),
|
"alerts": __check_be_service("alerts"),
|
||||||
"assets": __check_be_service("assets"),
|
"assets": __check_be_service("assets"),
|
||||||
|
|
@ -173,12 +192,13 @@ def get_health(tenant_id=None):
|
||||||
"http": __check_be_service("http"),
|
"http": __check_be_service("http"),
|
||||||
"ingress-nginx": __always_healthy,
|
"ingress-nginx": __always_healthy,
|
||||||
"integrations": __check_be_service("integrations"),
|
"integrations": __check_be_service("integrations"),
|
||||||
|
"peers": __check_be_service("peers"),
|
||||||
"sink": __check_be_service("sink"),
|
"sink": __check_be_service("sink"),
|
||||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||||
"storage": __check_be_service("storage"),
|
"storage": __check_be_service("storage")
|
||||||
},
|
},
|
||||||
"details": __get_sessions_stats,
|
"details": __get_sessions_stats,
|
||||||
"ssl": __check_SSL,
|
"ssl": __check_SSL
|
||||||
}
|
}
|
||||||
return __process_health(health_map=health_map)
|
return __process_health(health_map=health_map)
|
||||||
|
|
||||||
|
|
@ -190,16 +210,10 @@ def __process_health(health_map):
|
||||||
response.pop(parent_key)
|
response.pop(parent_key)
|
||||||
elif isinstance(health_map[parent_key], dict):
|
elif isinstance(health_map[parent_key], dict):
|
||||||
for element_key in health_map[parent_key]:
|
for element_key in health_map[parent_key]:
|
||||||
if config(
|
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
|
||||||
cast=bool,
|
|
||||||
default=False,
|
|
||||||
):
|
|
||||||
response[parent_key].pop(element_key)
|
response[parent_key].pop(element_key)
|
||||||
else:
|
else:
|
||||||
response[parent_key][element_key] = health_map[parent_key][
|
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||||
element_key
|
|
||||||
]()
|
|
||||||
else:
|
else:
|
||||||
response[parent_key] = health_map[parent_key]()
|
response[parent_key] = health_map[parent_key]()
|
||||||
return response
|
return response
|
||||||
|
|
@ -207,8 +221,7 @@ def __process_health(health_map):
|
||||||
|
|
||||||
def cron():
|
def cron():
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT projects.project_id,
|
||||||
"""SELECT projects.project_id,
|
|
||||||
projects.created_at,
|
projects.created_at,
|
||||||
projects.sessions_last_check_at,
|
projects.sessions_last_check_at,
|
||||||
projects.first_recorded_session_at,
|
projects.first_recorded_session_at,
|
||||||
|
|
@ -216,8 +229,7 @@ def cron():
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
|
|
@ -238,24 +250,20 @@ def cron():
|
||||||
count_start_from = r["last_update_at"]
|
count_start_from = r["last_update_at"]
|
||||||
|
|
||||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"start_ts": count_start_from,
|
||||||
"start_ts": count_start_from,
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts>=%(start_ts)s
|
AND start_ts>=%(start_ts)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
|
|
@ -263,68 +271,56 @@ def cron():
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
if insert:
|
if insert:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
|
||||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||||
events_count=events_count+%(events_count)s,
|
events_count=events_count+%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
# this cron is used to correct the sessions&events count every week
|
# this cron is used to correct the sessions&events count every week
|
||||||
def weekly_cron():
|
def weekly_cron():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT project_id,
|
||||||
"""SELECT project_id,
|
|
||||||
projects_stats.last_update_at
|
projects_stats.last_update_at
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["last_update_at"] is None:
|
if r["last_update_at"] is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
params["sessions_count"] = row["sessions_count"]
|
params["sessions_count"] = row["sessions_count"]
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=%(sessions_count)s,
|
SET sessions_count=%(sessions_count)s,
|
||||||
events_count=%(events_count)s,
|
events_count=%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
|
||||||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(
|
cur.mogrify(
|
||||||
"""SELECT username, token, url
|
"""SELECT username, token, url
|
||||||
FROM public.jira_cloud
|
FROM public.jira_cloud
|
||||||
WHERE user_id = %(user_id)s;""",
|
WHERE user_id=%(user_id)s;""",
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
data = helper.dict_to_camel_case(cur.fetchone())
|
data = helper.dict_to_camel_case(cur.fetchone())
|
||||||
|
|
@ -95,9 +95,10 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def add(self, username, token, url, obfuscate=False):
|
def add(self, username, token, url, obfuscate=False):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||||
|
RETURNING username, token, url;""",
|
||||||
{"user_id": self._user_id, "username": username,
|
{"user_id": self._user_id, "username": username,
|
||||||
"token": token, "url": url})
|
"token": token, "url": url})
|
||||||
)
|
)
|
||||||
|
|
@ -111,10 +112,9 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
def delete(self):
|
def delete(self):
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
cur.execute(
|
cur.execute(
|
||||||
cur.mogrify(""" \
|
cur.mogrify("""\
|
||||||
DELETE
|
DELETE FROM public.jira_cloud
|
||||||
FROM public.jira_cloud
|
WHERE user_id=%(user_id)s;""",
|
||||||
WHERE user_id = %(user_id)s;""",
|
|
||||||
{"user_id": self._user_id})
|
{"user_id": self._user_id})
|
||||||
)
|
)
|
||||||
return {"state": "success"}
|
return {"state": "success"}
|
||||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
|
||||||
changes={
|
changes={
|
||||||
"username": data.username,
|
"username": data.username,
|
||||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||||
else self.integration["token"],
|
else self.integration.token,
|
||||||
"url": str(data.url)
|
"url": str(data.url)
|
||||||
},
|
},
|
||||||
obfuscate=True
|
obfuscate=True
|
||||||
|
|
|
||||||
|
|
@ -378,6 +378,21 @@ def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, includ
|
||||||
)
|
)
|
||||||
|
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
|
count_query = cur.mogrify(
|
||||||
|
f"""SELECT COUNT(*)
|
||||||
|
FROM metrics
|
||||||
|
LEFT JOIN LATERAL (
|
||||||
|
SELECT email AS owner_email, name AS owner_name
|
||||||
|
FROM users
|
||||||
|
WHERE deleted_at ISNULL
|
||||||
|
AND users.user_id = metrics.user_id
|
||||||
|
) AS owner ON (TRUE)
|
||||||
|
WHERE {" AND ".join(constraints)};""",
|
||||||
|
params
|
||||||
|
)
|
||||||
|
cur.execute(count_query)
|
||||||
|
total = cur.fetchone()["count"]
|
||||||
|
|
||||||
sub_join = ""
|
sub_join = ""
|
||||||
if include_series:
|
if include_series:
|
||||||
sub_join = """LEFT JOIN LATERAL (
|
sub_join = """LEFT JOIN LATERAL (
|
||||||
|
|
@ -387,8 +402,7 @@ def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, includ
|
||||||
AND metric_series.deleted_at ISNULL
|
AND metric_series.deleted_at ISNULL
|
||||||
) AS metric_series ON (TRUE)"""
|
) AS metric_series ON (TRUE)"""
|
||||||
|
|
||||||
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
|
sort_column = data.sort.field if data.sort.field is not None else "created_at"
|
||||||
else "created_at"
|
|
||||||
# change ascend to asc and descend to desc
|
# change ascend to asc and descend to desc
|
||||||
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
||||||
if sort_order == "ascend":
|
if sort_order == "ascend":
|
||||||
|
|
@ -397,7 +411,7 @@ def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, includ
|
||||||
sort_order = "desc"
|
sort_order = "desc"
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(
|
||||||
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
f"""SELECT metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||||
FROM metrics
|
FROM metrics
|
||||||
|
|
@ -427,21 +441,15 @@ def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, includ
|
||||||
)
|
)
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
if len(rows) > 0:
|
if include_series:
|
||||||
total = rows[0]["total"]
|
for r in rows:
|
||||||
if include_series:
|
for s in r.get("series", []):
|
||||||
for r in rows:
|
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||||
r.pop("total")
|
|
||||||
for s in r.get("series", []):
|
|
||||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
|
||||||
else:
|
|
||||||
for r in rows:
|
|
||||||
r.pop("total")
|
|
||||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
|
||||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
|
||||||
rows = helper.list_to_camel_case(rows)
|
|
||||||
else:
|
else:
|
||||||
total = 0
|
for r in rows:
|
||||||
|
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||||
|
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||||
|
rows = helper.list_to_camel_case(rows)
|
||||||
|
|
||||||
return {"total": total, "list": rows}
|
return {"total": total, "list": rows}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -5,8 +5,8 @@ from decouple import config
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
import chalicelib.core.sessions.sessions_ch as sessions
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
else:
|
else:
|
||||||
import chalicelib.core.sessions.sessions_pg as sessions
|
from chalicelib.core.sessions import sessions
|
||||||
|
|
||||||
from chalicelib.core.sessions import sessions_mobs
|
from chalicelib.core.sessions import sessions_mobs
|
||||||
|
|
|
||||||
|
|
@ -85,9 +85,6 @@ def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_
|
||||||
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
||||||
# if start-point is selected, the selected event is ranked n°1
|
# if start-point is selected, the selected event is ranked n°1
|
||||||
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||||
if not data.hide_excess:
|
|
||||||
data.hide_excess = True
|
|
||||||
data.rows = 50
|
|
||||||
sub_events = []
|
sub_events = []
|
||||||
start_points_conditions = []
|
start_points_conditions = []
|
||||||
step_0_conditions = []
|
step_0_conditions = []
|
||||||
|
|
|
||||||
|
|
@ -3,11 +3,9 @@ import logging
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
from . import sessions_pg
|
from . import sessions as sessions_legacy
|
||||||
from . import sessions_pg as sessions_legacy
|
|
||||||
from . import sessions_ch
|
|
||||||
|
|
||||||
if config("EXP_METRICS", cast=bool, default=False):
|
if config("EXP_METRICS", cast=bool, default=False):
|
||||||
from . import sessions_ch as sessions
|
from . import sessions_ch as sessions
|
||||||
else:
|
else:
|
||||||
from . import sessions_pg as sessions
|
from . import sessions
|
||||||
|
|
|
||||||
|
|
@ -148,7 +148,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -165,7 +165,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -989,7 +989,7 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
||||||
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
||||||
c.value, value_key=e_k))
|
c.value, value_key=e_k))
|
||||||
else:
|
else:
|
||||||
logger.warning(f"unsupported extra_event type: {c.type}")
|
logger.warning(f"unsupported extra_event type:${c.type}")
|
||||||
if len(_extra_or_condition) > 0:
|
if len(_extra_or_condition) > 0:
|
||||||
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
||||||
query_part = f"""\
|
query_part = f"""\
|
||||||
|
|
@ -3,7 +3,7 @@ from typing import List, Union
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import events, metadata
|
from chalicelib.core import events, metadata
|
||||||
from . import performance_event, sessions_legacy
|
from . import performance_event, sessions as sessions_legacy
|
||||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||||
from chalicelib.utils import sql_helper as sh
|
from chalicelib.utils import sql_helper as sh
|
||||||
|
|
||||||
|
|
@ -153,7 +153,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -178,7 +178,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
||||||
"isEvent": True,
|
"isEvent": True,
|
||||||
"value": [],
|
"value": [],
|
||||||
"operator": e.operator,
|
"operator": e.operator,
|
||||||
"filters": e.filters
|
"filters": []
|
||||||
})
|
})
|
||||||
for v in e.value:
|
for v in e.value:
|
||||||
if v not in extra_conditions[e.operator].value:
|
if v not in extra_conditions[e.operator].value:
|
||||||
|
|
@ -870,12 +870,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
events_conditions[-1]["condition"] = []
|
events_conditions[-1]["condition"] = []
|
||||||
if not is_any and event.value not in [None, "*", ""]:
|
if not is_any and event.value not in [None, "*", ""]:
|
||||||
event_where.append(
|
event_where.append(
|
||||||
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
|
sh.multi_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
|
||||||
event.value, value_key=e_k))
|
event.value, value_key=e_k))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
events_extra_join += f" AND {event_where[-1]}"
|
events_extra_join += f" AND {event_where[-1]}"
|
||||||
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
||||||
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k))
|
event_where.append(sh.multi_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
events_extra_join += f" AND {event_where[-1]}"
|
events_extra_join += f" AND {event_where[-1]}"
|
||||||
|
|
||||||
|
|
@ -1108,12 +1108,8 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
is_any = sh.isAny_opreator(f.operator)
|
is_any = sh.isAny_opreator(f.operator)
|
||||||
if is_any or len(f.value) == 0:
|
if is_any or len(f.value) == 0:
|
||||||
continue
|
continue
|
||||||
is_negative_operator = sh.is_negation_operator(f.operator)
|
|
||||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||||
op = sh.get_sql_operator(f.operator)
|
op = sh.get_sql_operator(f.operator)
|
||||||
r_op = ""
|
|
||||||
if is_negative_operator:
|
|
||||||
r_op = sh.reverse_sql_operator(op)
|
|
||||||
e_k_f = e_k + f"_fetch{j}"
|
e_k_f = e_k + f"_fetch{j}"
|
||||||
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
||||||
if f.type == schemas.FetchFilterType.FETCH_URL:
|
if f.type == schemas.FetchFilterType.FETCH_URL:
|
||||||
|
|
@ -1122,12 +1118,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.url_path {r_op} %({e_k_f})s", f.value, value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
||||||
event_where.append(json_condition(
|
event_where.append(json_condition(
|
||||||
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
||||||
|
|
@ -1140,13 +1130,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.method {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
||||||
event_where.append(
|
event_where.append(
|
||||||
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
||||||
|
|
@ -1159,26 +1142,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.request_body {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
||||||
event_where.append(json_condition(
|
event_where.append(json_condition(
|
||||||
"main", "$properties", 'response_body', op, f.value, e_k_f
|
"main", "$properties", 'response_body', op, f.value, e_k_f
|
||||||
))
|
))
|
||||||
events_conditions[-1]["condition"].append(event_where[-1])
|
events_conditions[-1]["condition"].append(event_where[-1])
|
||||||
apply = True
|
apply = True
|
||||||
if is_negative_operator:
|
|
||||||
events_conditions_not.append(
|
|
||||||
{
|
|
||||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
|
||||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
|
||||||
f"sub.`$properties`.response_body {r_op} %({e_k_f})s", f.value,
|
|
||||||
value_key=e_k_f)
|
|
||||||
else:
|
else:
|
||||||
logging.warning(f"undefined FETCH filter: {f.type}")
|
logging.warning(f"undefined FETCH filter: {f.type}")
|
||||||
if not apply:
|
if not apply:
|
||||||
|
|
@ -1426,30 +1395,17 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
if extra_conditions and len(extra_conditions) > 0:
|
if extra_conditions and len(extra_conditions) > 0:
|
||||||
_extra_or_condition = []
|
_extra_or_condition = []
|
||||||
for i, c in enumerate(extra_conditions):
|
for i, c in enumerate(extra_conditions):
|
||||||
if sh.isAny_opreator(c.operator) and c.type != schemas.EventType.REQUEST_DETAILS.value:
|
if sh.isAny_opreator(c.operator):
|
||||||
continue
|
continue
|
||||||
e_k = f"ec_value{i}"
|
e_k = f"ec_value{i}"
|
||||||
op = sh.get_sql_operator(c.operator)
|
op = sh.get_sql_operator(c.operator)
|
||||||
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
||||||
full_args = {**full_args,
|
full_args = {**full_args,
|
||||||
**sh.multi_values(c.value, value_key=e_k)}
|
**sh.multi_values(c.value, value_key=e_k)}
|
||||||
if c.type in (schemas.EventType.LOCATION.value, schemas.EventType.REQUEST.value):
|
if c.type == events.EventType.LOCATION.ui_type:
|
||||||
_extra_or_condition.append(
|
_extra_or_condition.append(
|
||||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||||
c.value, value_key=e_k))
|
c.value, value_key=e_k))
|
||||||
elif c.type == schemas.EventType.REQUEST_DETAILS.value:
|
|
||||||
for j, c_f in enumerate(c.filters):
|
|
||||||
if sh.isAny_opreator(c_f.operator) or len(c_f.value) == 0:
|
|
||||||
continue
|
|
||||||
e_k += f"_{j}"
|
|
||||||
op = sh.get_sql_operator(c_f.operator)
|
|
||||||
c_f.value = helper.values_for_operator(value=c_f.value, op=c_f.operator)
|
|
||||||
full_args = {**full_args,
|
|
||||||
**sh.multi_values(c_f.value, value_key=e_k)}
|
|
||||||
if c_f.type == schemas.FetchFilterType.FETCH_URL.value:
|
|
||||||
_extra_or_condition.append(
|
|
||||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
|
||||||
c_f.value, value_key=e_k))
|
|
||||||
else:
|
else:
|
||||||
logging.warning(f"unsupported extra_event type:${c.type}")
|
logging.warning(f"unsupported extra_event type:${c.type}")
|
||||||
if len(_extra_or_condition) > 0:
|
if len(_extra_or_condition) > 0:
|
||||||
|
|
@ -1460,10 +1416,9 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
||||||
query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}"""
|
query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}"""
|
||||||
else:
|
else:
|
||||||
if len(events_query_part) > 0:
|
if len(events_query_part) > 0:
|
||||||
extra_join += f"""INNER JOIN (SELECT DISTINCT ON (session_id) *
|
extra_join += f"""INNER JOIN (SELECT *
|
||||||
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
|
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
|
||||||
WHERE {" AND ".join(extra_constraints)}
|
WHERE {" AND ".join(extra_constraints)}) AS s ON(s.session_id=f.session_id)"""
|
||||||
ORDER BY _timestamp DESC) AS s ON(s.session_id=f.session_id)"""
|
|
||||||
else:
|
else:
|
||||||
deduplication_keys = ["session_id"] + extra_deduplication
|
deduplication_keys = ["session_id"] + extra_deduplication
|
||||||
extra_join = f"""(SELECT *
|
extra_join = f"""(SELECT *
|
||||||
|
|
|
||||||
|
|
@ -1,81 +1,76 @@
|
||||||
from functools import cache
|
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core.autocomplete import autocomplete
|
from chalicelib.core.autocomplete import autocomplete
|
||||||
from chalicelib.utils.event_filter_definition import SupportedFilter
|
from chalicelib.utils.event_filter_definition import SupportedFilter
|
||||||
|
|
||||||
|
SUPPORTED_TYPES = {
|
||||||
|
schemas.FilterType.USER_OS: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||||
|
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||||
|
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||||
|
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||||
|
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||||
|
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||||
|
schemas.FilterType.USER_ID: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||||
|
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||||
|
schemas.FilterType.REV_ID: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||||
|
schemas.FilterType.REFERRER: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||||
|
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||||
|
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||||
|
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||||
|
# Mobile
|
||||||
|
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||||
|
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(
|
||||||
|
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||||
|
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||||
|
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||||
|
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||||
|
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||||
|
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||||
|
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||||
|
|
||||||
@cache
|
}
|
||||||
def supported_types():
|
|
||||||
return {
|
|
||||||
schemas.FilterType.USER_OS: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
|
||||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
|
||||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
|
||||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
|
||||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
|
||||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
|
||||||
schemas.FilterType.USER_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
|
||||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
|
||||||
schemas.FilterType.REV_ID: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
|
||||||
schemas.FilterType.REFERRER: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
|
||||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
|
||||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
|
||||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
|
||||||
# Mobile
|
|
||||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
|
||||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(
|
|
||||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
|
||||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
|
||||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
|
||||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
|
||||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
|
||||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
|
||||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
|
||||||
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
||||||
rows = []
|
rows = []
|
||||||
if meta_type not in list(supported_types().keys()):
|
if meta_type not in list(SUPPORTED_TYPES.keys()):
|
||||||
return {"errors": ["unsupported type"]}
|
return {"errors": ["unsupported type"]}
|
||||||
rows += supported_types()[meta_type].get(project_id=project_id, text=text)
|
rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
|
||||||
# for IOS events autocomplete
|
# for IOS events autocomplete
|
||||||
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
||||||
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import schemas
|
||||||
from chalicelib.core import events, metadata, events_mobile, \
|
from chalicelib.core import events, metadata, events_mobile, \
|
||||||
issues, assist, canvas, user_testing
|
issues, assist, canvas, user_testing
|
||||||
from . import sessions_mobs, sessions_devtool
|
from . import sessions_mobs, sessions_devtool
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
from chalicelib.utils import errors_helper
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.core.modules import MOB_KEY, get_file_key
|
from chalicelib.core.modules import MOB_KEY, get_file_key
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,7 @@ import logging
|
||||||
|
|
||||||
import schemas
|
import schemas
|
||||||
from chalicelib.core import metadata, projects
|
from chalicelib.core import metadata, projects
|
||||||
from . import sessions_favorite, sessions_legacy
|
from chalicelib.core.sessions import sessions_favorite, sessions_legacy
|
||||||
from chalicelib.utils import pg_client, helper
|
from chalicelib.utils import pg_client, helper
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
@ -43,13 +43,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
||||||
count_only=False, issue=None, ids_only=False, platform="web"):
|
count_only=False, issue=None, ids_only=False, platform="web"):
|
||||||
if data.bookmarked:
|
if data.bookmarked:
|
||||||
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
|
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
|
||||||
if data.startTimestamp is None:
|
|
||||||
logger.debug(f"No vault sessions found for project:{project.project_id}")
|
|
||||||
return {
|
|
||||||
'total': 0,
|
|
||||||
'sessions': [],
|
|
||||||
'src': 1
|
|
||||||
}
|
|
||||||
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
|
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
|
||||||
errors_only=errors_only,
|
errors_only=errors_only,
|
||||||
favorite_only=data.bookmarked, issue=issue,
|
favorite_only=data.bookmarked, issue=issue,
|
||||||
|
|
@ -122,10 +116,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
||||||
sort = 'session_id'
|
sort = 'session_id'
|
||||||
if data.sort is not None and data.sort != "session_id":
|
if data.sort is not None and data.sort != "session_id":
|
||||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||||
if data.sort == 'datetime':
|
sort = helper.key_to_snake_case(data.sort)
|
||||||
sort = 'start_ts'
|
|
||||||
else:
|
|
||||||
sort = helper.key_to_snake_case(data.sort)
|
|
||||||
|
|
||||||
meta_keys = metadata.get(project_id=project.project_id)
|
meta_keys = metadata.get(project_id=project.project_id)
|
||||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||||
|
|
@ -175,8 +166,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
||||||
# reverse=data.order.upper() == "DESC")
|
# reverse=data.order.upper() == "DESC")
|
||||||
return {
|
return {
|
||||||
'total': total,
|
'total': total,
|
||||||
'sessions': helper.list_to_camel_case(sessions),
|
'sessions': helper.list_to_camel_case(sessions)
|
||||||
'src': 1
|
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,7 +1,7 @@
|
||||||
import logging
|
import logging
|
||||||
|
|
||||||
from chalicelib.core import assist
|
from chalicelib.core import assist
|
||||||
from . import sessions
|
from chalicelib.core.sessions import sessions
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -34,10 +34,7 @@ if config("CH_COMPRESSION", cast=bool, default=True):
|
||||||
def transform_result(self, original_function):
|
def transform_result(self, original_function):
|
||||||
@wraps(original_function)
|
@wraps(original_function)
|
||||||
def wrapper(*args, **kwargs):
|
def wrapper(*args, **kwargs):
|
||||||
if kwargs.get("parameters"):
|
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
||||||
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
|
||||||
elif len(args) > 0:
|
|
||||||
logger.debug(str.encode(args[0]))
|
|
||||||
result = original_function(*args, **kwargs)
|
result = original_function(*args, **kwargs)
|
||||||
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
||||||
column_names = result.column_names
|
column_names = result.column_names
|
||||||
|
|
@ -149,11 +146,13 @@ class ClickHouseClient:
|
||||||
def __enter__(self):
|
def __enter__(self):
|
||||||
return self.__client
|
return self.__client
|
||||||
|
|
||||||
def format(self, query, parameters=None):
|
def format(self, query, *, parameters=None):
|
||||||
if parameters:
|
if parameters is None:
|
||||||
ctx = QueryContext(query=query, parameters=parameters)
|
return query
|
||||||
return ctx.final_query
|
return query % {
|
||||||
return query
|
key: f"'{value}'" if isinstance(value, str) else value
|
||||||
|
for key, value in parameters.items()
|
||||||
|
}
|
||||||
|
|
||||||
def __exit__(self, *args):
|
def __exit__(self, *args):
|
||||||
if config('CH_POOL', cast=bool, default=True):
|
if config('CH_POOL', cast=bool, default=True):
|
||||||
|
|
|
||||||
14
api/chalicelib/utils/errors_helper.py
Normal file
14
api/chalicelib/utils/errors_helper.py
Normal file
|
|
@ -0,0 +1,14 @@
|
||||||
|
from chalicelib.core.sourcemaps import sourcemaps
|
||||||
|
|
||||||
|
|
||||||
|
def format_first_stack_frame(error):
|
||||||
|
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||||
|
for s in error["stack"]:
|
||||||
|
for c in s.get("context", []):
|
||||||
|
for sci, sc in enumerate(c):
|
||||||
|
if isinstance(sc, str) and len(sc) > 1000:
|
||||||
|
c[sci] = sc[:1000]
|
||||||
|
# convert bytes to string:
|
||||||
|
if isinstance(s["filename"], bytes):
|
||||||
|
s["filename"] = s["filename"].decode("utf-8")
|
||||||
|
return error
|
||||||
|
|
@ -19,16 +19,6 @@ PG_CONFIG = dict(_PG_CONFIG)
|
||||||
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
||||||
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
||||||
|
|
||||||
if config('PG_POOL', cast=bool, default=True):
|
|
||||||
PG_CONFIG = {
|
|
||||||
**PG_CONFIG,
|
|
||||||
# Keepalive settings
|
|
||||||
"keepalives": 1, # Enable keepalives
|
|
||||||
"keepalives_idle": 300, # Seconds before sending keepalive
|
|
||||||
"keepalives_interval": 10, # Seconds between keepalives
|
|
||||||
"keepalives_count": 3 # Number of keepalives before giving up
|
|
||||||
}
|
|
||||||
|
|
||||||
|
|
||||||
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
||||||
def __init__(self, minconn, maxconn, *args, **kwargs):
|
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||||
|
|
@ -65,7 +55,6 @@ RETRY = 0
|
||||||
|
|
||||||
def make_pool():
|
def make_pool():
|
||||||
if not config('PG_POOL', cast=bool, default=True):
|
if not config('PG_POOL', cast=bool, default=True):
|
||||||
logger.info("PG_POOL is disabled, not creating a new one")
|
|
||||||
return
|
return
|
||||||
global postgreSQL_pool
|
global postgreSQL_pool
|
||||||
global RETRY
|
global RETRY
|
||||||
|
|
@ -187,7 +176,8 @@ class PostgresClient:
|
||||||
|
|
||||||
async def init():
|
async def init():
|
||||||
logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}")
|
logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}")
|
||||||
make_pool()
|
if config('PG_POOL', cast=bool, default=True):
|
||||||
|
make_pool()
|
||||||
|
|
||||||
|
|
||||||
async def terminate():
|
async def terminate():
|
||||||
|
|
|
||||||
|
|
@ -4,41 +4,37 @@ import schemas
|
||||||
|
|
||||||
|
|
||||||
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
||||||
if isinstance(op, Enum):
|
|
||||||
op = op.value
|
|
||||||
return {
|
return {
|
||||||
schemas.SearchEventOperator.IS.value: "=",
|
schemas.SearchEventOperator.IS: "=",
|
||||||
schemas.SearchEventOperator.ON.value: "=",
|
schemas.SearchEventOperator.ON: "=",
|
||||||
schemas.SearchEventOperator.ON_ANY.value: "IN",
|
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||||
schemas.SearchEventOperator.IS_NOT.value: "!=",
|
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||||
schemas.SearchEventOperator.NOT_ON.value: "!=",
|
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||||
schemas.SearchEventOperator.CONTAINS.value: "ILIKE",
|
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||||
schemas.SearchEventOperator.STARTS_WITH.value: "ILIKE",
|
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||||
schemas.SearchEventOperator.ENDS_WITH.value: "ILIKE",
|
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||||
# Selector operators:
|
# Selector operators:
|
||||||
schemas.ClickEventExtraOperator.IS.value: "=",
|
schemas.ClickEventExtraOperator.IS: "=",
|
||||||
schemas.ClickEventExtraOperator.IS_NOT.value: "!=",
|
schemas.ClickEventExtraOperator.IS_NOT: "!=",
|
||||||
schemas.ClickEventExtraOperator.CONTAINS.value: "ILIKE",
|
schemas.ClickEventExtraOperator.CONTAINS: "ILIKE",
|
||||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
schemas.ClickEventExtraOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||||
schemas.ClickEventExtraOperator.STARTS_WITH.value: "ILIKE",
|
schemas.ClickEventExtraOperator.STARTS_WITH: "ILIKE",
|
||||||
schemas.ClickEventExtraOperator.ENDS_WITH.value: "ILIKE",
|
schemas.ClickEventExtraOperator.ENDS_WITH: "ILIKE",
|
||||||
|
|
||||||
schemas.MathOperator.GREATER.value: ">",
|
schemas.MathOperator.GREATER: ">",
|
||||||
schemas.MathOperator.GREATER_EQ.value: ">=",
|
schemas.MathOperator.GREATER_EQ: ">=",
|
||||||
schemas.MathOperator.LESS.value: "<",
|
schemas.MathOperator.LESS: "<",
|
||||||
schemas.MathOperator.LESS_EQ.value: "<=",
|
schemas.MathOperator.LESS_EQ: "<=",
|
||||||
}.get(op, "=")
|
}.get(op, "=")
|
||||||
|
|
||||||
|
|
||||||
def is_negation_operator(op: schemas.SearchEventOperator):
|
def is_negation_operator(op: schemas.SearchEventOperator):
|
||||||
if isinstance(op, Enum):
|
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||||
op = op.value
|
schemas.SearchEventOperator.NOT_ON,
|
||||||
return op in [schemas.SearchEventOperator.IS_NOT.value,
|
schemas.SearchEventOperator.NOT_CONTAINS,
|
||||||
schemas.SearchEventOperator.NOT_ON.value,
|
schemas.ClickEventExtraOperator.IS_NOT,
|
||||||
schemas.SearchEventOperator.NOT_CONTAINS.value,
|
schemas.ClickEventExtraOperator.NOT_CONTAINS]
|
||||||
schemas.ClickEventExtraOperator.IS_NOT.value,
|
|
||||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value]
|
|
||||||
|
|
||||||
|
|
||||||
def reverse_sql_operator(op):
|
def reverse_sql_operator(op):
|
||||||
|
|
|
||||||
591
api/product_analytics_working_query.sql
Normal file
591
api/product_analytics_working_query.sql
Normal file
|
|
@ -0,0 +1,591 @@
|
||||||
|
-- -- Original Q3
|
||||||
|
-- WITH ranked_events AS (SELECT *
|
||||||
|
-- FROM ranked_events_1736344377403),
|
||||||
|
-- n1 AS (SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- COUNT(1) AS sessions_count
|
||||||
|
-- FROM ranked_events
|
||||||
|
-- WHERE event_number_in_session = 1
|
||||||
|
-- AND isNotNull(next_value)
|
||||||
|
-- GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
-- ORDER BY sessions_count DESC
|
||||||
|
-- LIMIT 8),
|
||||||
|
-- n2 AS (SELECT *
|
||||||
|
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||||
|
-- re.event_type AS event_type,
|
||||||
|
-- re.e_value AS e_value,
|
||||||
|
-- re.next_type AS next_type,
|
||||||
|
-- re.next_value AS next_value,
|
||||||
|
-- COUNT(1) AS sessions_count
|
||||||
|
-- FROM n1
|
||||||
|
-- INNER JOIN ranked_events AS re
|
||||||
|
-- ON (n1.next_value = re.e_value AND n1.next_type = re.event_type)
|
||||||
|
-- WHERE re.event_number_in_session = 2
|
||||||
|
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||||
|
-- re.next_value) AS sub_level
|
||||||
|
-- ORDER BY sessions_count DESC
|
||||||
|
-- LIMIT 8),
|
||||||
|
-- n3 AS (SELECT *
|
||||||
|
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||||
|
-- re.event_type AS event_type,
|
||||||
|
-- re.e_value AS e_value,
|
||||||
|
-- re.next_type AS next_type,
|
||||||
|
-- re.next_value AS next_value,
|
||||||
|
-- COUNT(1) AS sessions_count
|
||||||
|
-- FROM n2
|
||||||
|
-- INNER JOIN ranked_events AS re
|
||||||
|
-- ON (n2.next_value = re.e_value AND n2.next_type = re.event_type)
|
||||||
|
-- WHERE re.event_number_in_session = 3
|
||||||
|
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||||
|
-- re.next_value) AS sub_level
|
||||||
|
-- ORDER BY sessions_count DESC
|
||||||
|
-- LIMIT 8),
|
||||||
|
-- n4 AS (SELECT *
|
||||||
|
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||||
|
-- re.event_type AS event_type,
|
||||||
|
-- re.e_value AS e_value,
|
||||||
|
-- re.next_type AS next_type,
|
||||||
|
-- re.next_value AS next_value,
|
||||||
|
-- COUNT(1) AS sessions_count
|
||||||
|
-- FROM n3
|
||||||
|
-- INNER JOIN ranked_events AS re
|
||||||
|
-- ON (n3.next_value = re.e_value AND n3.next_type = re.event_type)
|
||||||
|
-- WHERE re.event_number_in_session = 4
|
||||||
|
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||||
|
-- re.next_value) AS sub_level
|
||||||
|
-- ORDER BY sessions_count DESC
|
||||||
|
-- LIMIT 8),
|
||||||
|
-- n5 AS (SELECT *
|
||||||
|
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||||
|
-- re.event_type AS event_type,
|
||||||
|
-- re.e_value AS e_value,
|
||||||
|
-- re.next_type AS next_type,
|
||||||
|
-- re.next_value AS next_value,
|
||||||
|
-- COUNT(1) AS sessions_count
|
||||||
|
-- FROM n4
|
||||||
|
-- INNER JOIN ranked_events AS re
|
||||||
|
-- ON (n4.next_value = re.e_value AND n4.next_type = re.event_type)
|
||||||
|
-- WHERE re.event_number_in_session = 5
|
||||||
|
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||||
|
-- re.next_value) AS sub_level
|
||||||
|
-- ORDER BY sessions_count DESC
|
||||||
|
-- LIMIT 8)
|
||||||
|
-- SELECT *
|
||||||
|
-- FROM (SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- sessions_count
|
||||||
|
-- FROM n1
|
||||||
|
-- UNION ALL
|
||||||
|
-- SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- sessions_count
|
||||||
|
-- FROM n2
|
||||||
|
-- UNION ALL
|
||||||
|
-- SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- sessions_count
|
||||||
|
-- FROM n3
|
||||||
|
-- UNION ALL
|
||||||
|
-- SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- sessions_count
|
||||||
|
-- FROM n4
|
||||||
|
-- UNION ALL
|
||||||
|
-- SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- next_type,
|
||||||
|
-- next_value,
|
||||||
|
-- sessions_count
|
||||||
|
-- FROM n5) AS chart_steps
|
||||||
|
-- ORDER BY event_number_in_session;
|
||||||
|
|
||||||
|
-- Q1
|
||||||
|
-- CREATE TEMPORARY TABLE pre_ranked_events_1736344377403 AS
|
||||||
|
CREATE TABLE pre_ranked_events_1736344377403 ENGINE = Memory AS
|
||||||
|
(WITH initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
|
||||||
|
FROM experimental.events AS events
|
||||||
|
WHERE ((event_type = 'LOCATION' AND (url_path = '/en/deployment/')))
|
||||||
|
AND events.project_id = toUInt16(65)
|
||||||
|
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||||
|
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||||
|
GROUP BY 1),
|
||||||
|
pre_ranked_events AS (SELECT *
|
||||||
|
FROM (SELECT session_id,
|
||||||
|
event_type,
|
||||||
|
datetime,
|
||||||
|
url_path AS e_value,
|
||||||
|
row_number() OVER (PARTITION BY session_id
|
||||||
|
ORDER BY datetime ,
|
||||||
|
message_id ) AS event_number_in_session
|
||||||
|
FROM experimental.events AS events
|
||||||
|
INNER JOIN initial_event ON (events.session_id = initial_event.session_id)
|
||||||
|
WHERE events.project_id = toUInt16(65)
|
||||||
|
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||||
|
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||||
|
AND (events.event_type = 'LOCATION')
|
||||||
|
AND events.datetime >= initial_event.start_event_timestamp
|
||||||
|
) AS full_ranked_events
|
||||||
|
WHERE event_number_in_session <= 5)
|
||||||
|
SELECT *
|
||||||
|
FROM pre_ranked_events);
|
||||||
|
;
|
||||||
|
|
||||||
|
SELECT *
|
||||||
|
FROM pre_ranked_events_1736344377403
|
||||||
|
WHERE event_number_in_session < 3;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
-- ---------Q2-----------
|
||||||
|
-- CREATE TEMPORARY TABLE ranked_events_1736344377403 AS
|
||||||
|
DROP TABLE ranked_events_1736344377403;
|
||||||
|
CREATE TABLE ranked_events_1736344377403 ENGINE = Memory AS
|
||||||
|
(WITH pre_ranked_events AS (SELECT *
|
||||||
|
FROM pre_ranked_events_1736344377403),
|
||||||
|
start_points AS (SELECT DISTINCT session_id
|
||||||
|
FROM pre_ranked_events
|
||||||
|
WHERE ((event_type = 'LOCATION' AND (e_value = '/en/deployment/')))
|
||||||
|
AND pre_ranked_events.event_number_in_session = 1),
|
||||||
|
ranked_events AS (SELECT pre_ranked_events.*,
|
||||||
|
leadInFrame(e_value)
|
||||||
|
OVER (PARTITION BY session_id ORDER BY datetime
|
||||||
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||||
|
leadInFrame(toNullable(event_type))
|
||||||
|
OVER (PARTITION BY session_id ORDER BY datetime
|
||||||
|
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||||
|
FROM start_points
|
||||||
|
INNER JOIN pre_ranked_events USING (session_id))
|
||||||
|
SELECT *
|
||||||
|
FROM ranked_events);
|
||||||
|
|
||||||
|
|
||||||
|
-- ranked events
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events_1736344377403
|
||||||
|
WHERE event_number_in_session = 2
|
||||||
|
-- AND e_value='/en/deployment/deploy-docker/'
|
||||||
|
-- AND next_value NOT IN ('/en/deployment/','/en/plugins/','/en/using-or/')
|
||||||
|
-- AND e_value NOT IN ('/en/deployment/deploy-docker/','/en/getting-started/','/en/deployment/deploy-ubuntu/')
|
||||||
|
AND isNotNull(next_value)
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY event_number_in_session, sessions_count DESC;
|
||||||
|
|
||||||
|
|
||||||
|
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events_1736344377403
|
||||||
|
WHERE event_number_in_session = 1
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
ORDER BY event_number_in_session, sessions_count DESC;
|
||||||
|
|
||||||
|
SELECT COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events_1736344377403
|
||||||
|
WHERE event_number_in_session = 2
|
||||||
|
AND isNull(next_value)
|
||||||
|
;
|
||||||
|
|
||||||
|
-- ---------Q3 MORE -----------
|
||||||
|
WITH ranked_events AS (SELECT *
|
||||||
|
FROM ranked_events_1736344377403),
|
||||||
|
n1 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 1
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
n2 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 2
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
n3 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 3
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
drop_n AS (-- STEP 1
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n1
|
||||||
|
WHERE isNull(n1.next_type)
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 2
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n2
|
||||||
|
WHERE isNull(n2.next_type)),
|
||||||
|
-- TODO: make this as top_steps, where every step will go to next as top/others
|
||||||
|
top_n1 AS (-- STEP 1
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n1
|
||||||
|
WHERE isNotNull(next_type)
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT 3),
|
||||||
|
top_n2 AS (-- STEP 2
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n2
|
||||||
|
WHERE (event_type, e_value) IN (SELECT event_type,
|
||||||
|
e_value
|
||||||
|
FROM n2
|
||||||
|
WHERE isNotNull(next_type)
|
||||||
|
GROUP BY event_type, e_value
|
||||||
|
ORDER BY SUM(sessions_count) DESC
|
||||||
|
LIMIT 3)
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
top_n AS (SELECT *
|
||||||
|
FROM top_n1
|
||||||
|
UNION ALL
|
||||||
|
SELECT *
|
||||||
|
FROM top_n2),
|
||||||
|
u_top_n AS (SELECT DISTINCT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value
|
||||||
|
FROM top_n),
|
||||||
|
others_n AS (
|
||||||
|
-- STEP 1
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n1
|
||||||
|
WHERE isNotNull(next_type)
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT 1000000 OFFSET 3
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 2
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n2
|
||||||
|
WHERE isNotNull(next_type)
|
||||||
|
-- GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT 1000000 OFFSET 3)
|
||||||
|
SELECT *
|
||||||
|
FROM (
|
||||||
|
-- Top
|
||||||
|
SELECT *
|
||||||
|
FROM top_n
|
||||||
|
-- UNION ALL
|
||||||
|
-- -- Others
|
||||||
|
-- SELECT event_number_in_session,
|
||||||
|
-- event_type,
|
||||||
|
-- e_value,
|
||||||
|
-- 'OTHER' AS next_type,
|
||||||
|
-- NULL AS next_value,
|
||||||
|
-- SUM(sessions_count)
|
||||||
|
-- FROM others_n
|
||||||
|
-- GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
-- UNION ALL
|
||||||
|
-- -- Top go to Drop
|
||||||
|
-- SELECT drop_n.event_number_in_session,
|
||||||
|
-- drop_n.event_type,
|
||||||
|
-- drop_n.e_value,
|
||||||
|
-- drop_n.next_type,
|
||||||
|
-- drop_n.next_value,
|
||||||
|
-- drop_n.sessions_count
|
||||||
|
-- FROM drop_n
|
||||||
|
-- INNER JOIN u_top_n ON (drop_n.event_number_in_session = u_top_n.event_number_in_session
|
||||||
|
-- AND drop_n.event_type = u_top_n.event_type
|
||||||
|
-- AND drop_n.e_value = u_top_n.e_value)
|
||||||
|
-- ORDER BY drop_n.event_number_in_session
|
||||||
|
-- -- -- UNION ALL
|
||||||
|
-- -- -- Top go to Others
|
||||||
|
-- SELECT top_n.event_number_in_session,
|
||||||
|
-- top_n.event_type,
|
||||||
|
-- top_n.e_value,
|
||||||
|
-- 'OTHER' AS next_type,
|
||||||
|
-- NULL AS next_value,
|
||||||
|
-- SUM(top_n.sessions_count) AS sessions_count
|
||||||
|
-- FROM top_n
|
||||||
|
-- LEFT JOIN others_n ON (others_n.event_number_in_session = (top_n.event_number_in_session + 1)
|
||||||
|
-- AND top_n.next_type = others_n.event_type
|
||||||
|
-- AND top_n.next_value = others_n.e_value)
|
||||||
|
-- WHERE others_n.event_number_in_session IS NULL
|
||||||
|
-- AND top_n.next_type IS NOT NULL
|
||||||
|
-- GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
-- UNION ALL
|
||||||
|
-- -- Others got to Top
|
||||||
|
-- SELECT others_n.event_number_in_session,
|
||||||
|
-- 'OTHER' AS event_type,
|
||||||
|
-- NULL AS e_value,
|
||||||
|
-- others_n.s_next_type AS next_type,
|
||||||
|
-- others_n.s_next_value AS next_value,
|
||||||
|
-- SUM(sessions_count) AS sessions_count
|
||||||
|
-- FROM others_n
|
||||||
|
-- INNER JOIN top_n ON (others_n.event_number_in_session = top_n.event_number_in_session + 1 AND
|
||||||
|
-- others_n.s_next_type = top_n.event_type AND
|
||||||
|
-- others_n.s_next_value = top_n.event_type)
|
||||||
|
-- GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||||
|
-- UNION ALL
|
||||||
|
-- -- TODO: find if this works or not
|
||||||
|
-- -- Others got to Others
|
||||||
|
-- SELECT others_n.event_number_in_session,
|
||||||
|
-- 'OTHER' AS event_type,
|
||||||
|
-- NULL AS e_value,
|
||||||
|
-- 'OTHERS' AS next_type,
|
||||||
|
-- NULL AS next_value,
|
||||||
|
-- SUM(sessions_count) AS sessions_count
|
||||||
|
-- FROM others_n
|
||||||
|
-- LEFT JOIN u_top_n ON ((others_n.event_number_in_session + 1) = u_top_n.event_number_in_session
|
||||||
|
-- AND others_n.s_next_type = u_top_n.event_type
|
||||||
|
-- AND others_n.s_next_value = u_top_n.e_value)
|
||||||
|
-- WHERE u_top_n.event_number_in_session IS NULL
|
||||||
|
-- GROUP BY others_n.event_number_in_session
|
||||||
|
)
|
||||||
|
ORDER BY event_number_in_session;
|
||||||
|
|
||||||
|
|
||||||
|
-- ---------Q3 TOP ON VALUE ONLY -----------
|
||||||
|
WITH ranked_events AS (SELECT *
|
||||||
|
FROM ranked_events_1736344377403),
|
||||||
|
n1 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 1
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
n2 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 2
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
n3 AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
next_type,
|
||||||
|
next_value,
|
||||||
|
COUNT(1) AS sessions_count
|
||||||
|
FROM ranked_events
|
||||||
|
WHERE event_number_in_session = 3
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||||
|
ORDER BY sessions_count DESC),
|
||||||
|
|
||||||
|
drop_n AS (-- STEP 1
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n1
|
||||||
|
WHERE isNull(n1.next_type)
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 2
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
sessions_count
|
||||||
|
FROM n2
|
||||||
|
WHERE isNull(n2.next_type)),
|
||||||
|
top_n AS (SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
SUM(sessions_count) AS sessions_count
|
||||||
|
FROM n1
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
LIMIT 1
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 2
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
SUM(sessions_count) AS sessions_count
|
||||||
|
FROM n2
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT 3
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 3
|
||||||
|
SELECT event_number_in_session,
|
||||||
|
event_type,
|
||||||
|
e_value,
|
||||||
|
SUM(sessions_count) AS sessions_count
|
||||||
|
FROM n3
|
||||||
|
GROUP BY event_number_in_session, event_type, e_value
|
||||||
|
ORDER BY sessions_count DESC
|
||||||
|
LIMIT 3),
|
||||||
|
top_n_with_next AS (SELECT n1.*
|
||||||
|
FROM n1
|
||||||
|
UNION ALL
|
||||||
|
SELECT n2.*
|
||||||
|
FROM n2
|
||||||
|
INNER JOIN top_n ON (n2.event_number_in_session = top_n.event_number_in_session
|
||||||
|
AND n2.event_type = top_n.event_type
|
||||||
|
AND n2.e_value = top_n.e_value)),
|
||||||
|
others_n AS (
|
||||||
|
-- STEP 2
|
||||||
|
SELECT n2.*
|
||||||
|
FROM n2
|
||||||
|
WHERE (n2.event_number_in_session, n2.event_type, n2.e_value) NOT IN
|
||||||
|
(SELECT event_number_in_session, event_type, e_value
|
||||||
|
FROM top_n
|
||||||
|
WHERE top_n.event_number_in_session = 2)
|
||||||
|
UNION ALL
|
||||||
|
-- STEP 3
|
||||||
|
SELECT n3.*
|
||||||
|
FROM n3
|
||||||
|
WHERE (n3.event_number_in_session, n3.event_type, n3.e_value) NOT IN
|
||||||
|
(SELECT event_number_in_session, event_type, e_value
|
||||||
|
FROM top_n
|
||||||
|
WHERE top_n.event_number_in_session = 3))
|
||||||
|
SELECT *
|
||||||
|
FROM (
|
||||||
|
-- SELECT sum(top_n_with_next.sessions_count)
|
||||||
|
-- FROM top_n_with_next
|
||||||
|
-- WHERE event_number_in_session = 1
|
||||||
|
-- -- AND isNotNull(next_value)
|
||||||
|
-- AND (next_type, next_value) IN
|
||||||
|
-- (SELECT others_n.event_type, others_n.e_value FROM others_n WHERE others_n.event_number_in_session = 2)
|
||||||
|
-- -- SELECT * FROM others_n
|
||||||
|
-- -- SELECT * FROM n2
|
||||||
|
-- SELECT *
|
||||||
|
-- FROM top_n
|
||||||
|
-- );
|
||||||
|
-- Top to Top: valid
|
||||||
|
SELECT top_n_with_next.*
|
||||||
|
FROM top_n_with_next
|
||||||
|
INNER JOIN top_n
|
||||||
|
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
|
||||||
|
AND top_n_with_next.next_type = top_n.event_type
|
||||||
|
AND top_n_with_next.next_value = top_n.e_value)
|
||||||
|
UNION ALL
|
||||||
|
-- Top to Others: valid
|
||||||
|
SELECT top_n_with_next.event_number_in_session,
|
||||||
|
top_n_with_next.event_type,
|
||||||
|
top_n_with_next.e_value,
|
||||||
|
'OTHER' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(top_n_with_next.sessions_count) AS sessions_count
|
||||||
|
FROM top_n_with_next
|
||||||
|
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
|
||||||
|
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
|
||||||
|
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
|
||||||
|
UNION ALL
|
||||||
|
-- Top go to Drop: valid
|
||||||
|
SELECT drop_n.event_number_in_session,
|
||||||
|
drop_n.event_type,
|
||||||
|
drop_n.e_value,
|
||||||
|
drop_n.next_type,
|
||||||
|
drop_n.next_value,
|
||||||
|
drop_n.sessions_count
|
||||||
|
FROM drop_n
|
||||||
|
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
|
||||||
|
AND drop_n.event_type = top_n.event_type
|
||||||
|
AND drop_n.e_value = top_n.e_value)
|
||||||
|
ORDER BY drop_n.event_number_in_session
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Drop: valid
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS event_type,
|
||||||
|
NULL AS e_value,
|
||||||
|
'DROP' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(others_n.sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNull(others_n.next_type)
|
||||||
|
AND others_n.event_number_in_session < 3
|
||||||
|
GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Top:valid
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS event_type,
|
||||||
|
NULL AS e_value,
|
||||||
|
others_n.next_type,
|
||||||
|
others_n.next_value,
|
||||||
|
SUM(others_n.sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNotNull(others_n.next_type)
|
||||||
|
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
|
||||||
|
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
|
||||||
|
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
|
||||||
|
UNION ALL
|
||||||
|
-- Others got to Others
|
||||||
|
SELECT others_n.event_number_in_session,
|
||||||
|
'OTHER' AS event_type,
|
||||||
|
NULL AS e_value,
|
||||||
|
'OTHERS' AS next_type,
|
||||||
|
NULL AS next_value,
|
||||||
|
SUM(sessions_count) AS sessions_count
|
||||||
|
FROM others_n
|
||||||
|
WHERE isNotNull(others_n.next_type)
|
||||||
|
AND others_n.event_number_in_session < 3
|
||||||
|
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
|
||||||
|
(SELECT event_number_in_session, event_type, e_value FROM top_n)
|
||||||
|
GROUP BY others_n.event_number_in_session)
|
||||||
|
ORDER BY event_number_in_session, sessions_count
|
||||||
|
DESC;
|
||||||
|
|
||||||
|
|
||||||
|
|
@ -10,22 +10,31 @@ public_app, app, app_apikey = get_routers()
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/dashboards", tags=["dashboard"])
|
@app.post("/{projectId}/dashboards", tags=["dashboard"])
|
||||||
def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...),
|
def create_dashboards(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.CreateDashboardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return dashboards.create_dashboard(
|
return dashboards.create_dashboard(
|
||||||
project_id=projectId, user_id=context.user_id, data=data
|
project_id=projectId, user_id=context.user_id, data=data
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.get("/{projectId}/dashboards", tags=["dashboard"])
|
@app.get("/{projectId}/dashboards", tags=["dashboard"])
|
||||||
def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
def get_dashboards(
|
||||||
|
projectId: int, context: schemas.CurrentContext = Depends(OR_context)
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)
|
"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
@app.get("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
@app.get("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||||
def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
def get_dashboard(
|
||||||
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = dashboards.get_dashboard(
|
data = dashboards.get_dashboard(
|
||||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||||
)
|
)
|
||||||
|
|
@ -35,8 +44,12 @@ def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentCont
|
||||||
|
|
||||||
|
|
||||||
@app.put("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
@app.put("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||||
def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...),
|
def update_dashboard(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
data: schemas.EditDashboardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": dashboards.update_dashboard(
|
"data": dashboards.update_dashboard(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
|
|
@ -48,15 +61,23 @@ def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboa
|
||||||
|
|
||||||
|
|
||||||
@app.delete("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
@app.delete("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||||
def delete_dashboard(projectId: int, dashboardId: int, _=Body(None),
|
def delete_dashboard(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
_=Body(None),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return dashboards.delete_dashboard(
|
return dashboards.delete_dashboard(
|
||||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.get("/{projectId}/dashboards/{dashboardId}/pin", tags=["dashboard"])
|
@app.get("/{projectId}/dashboards/{dashboardId}/pin", tags=["dashboard"])
|
||||||
def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
def pin_dashboard(
|
||||||
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": dashboards.pin_dashboard(
|
"data": dashboards.pin_dashboard(
|
||||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||||
|
|
@ -65,8 +86,12 @@ def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentCont
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/dashboards/{dashboardId}/cards", tags=["cards"])
|
@app.post("/{projectId}/dashboards/{dashboardId}/cards", tags=["cards"])
|
||||||
def add_card_to_dashboard(projectId: int, dashboardId: int, data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
def add_card_to_dashboard(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": dashboards.add_widget(
|
"data": dashboards.add_widget(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
|
|
@ -79,8 +104,12 @@ def add_card_to_dashboard(projectId: int, dashboardId: int, data: schemas.AddWid
|
||||||
|
|
||||||
@app.post("/{projectId}/dashboards/{dashboardId}/metrics", tags=["dashboard"])
|
@app.post("/{projectId}/dashboards/{dashboardId}/metrics", tags=["dashboard"])
|
||||||
# @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
# @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int, data: schemas.CardSchema = Body(...),
|
def create_metric_and_add_to_dashboard(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
data: schemas.CardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": dashboards.create_metric_add_widget(
|
"data": dashboards.create_metric_add_widget(
|
||||||
project=context.project,
|
project=context.project,
|
||||||
|
|
@ -92,9 +121,13 @@ def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int, data: s
|
||||||
|
|
||||||
|
|
||||||
@app.put("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
@app.put("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
||||||
def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
def update_widget_in_dashboard(
|
||||||
data: schemas.UpdateWidgetPayloadSchema = Body(...),
|
projectId: int,
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
dashboardId: int,
|
||||||
|
widgetId: int,
|
||||||
|
data: schemas.UpdateWidgetPayloadSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return dashboards.update_widget(
|
return dashboards.update_widget(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
user_id=context.user_id,
|
user_id=context.user_id,
|
||||||
|
|
@ -104,9 +137,16 @@ def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.delete("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
@app.delete(
|
||||||
def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int, _=Body(None),
|
"/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"]
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
)
|
||||||
|
def remove_widget_from_dashboard(
|
||||||
|
projectId: int,
|
||||||
|
dashboardId: int,
|
||||||
|
widgetId: int,
|
||||||
|
_=Body(None),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return dashboards.remove_widget(
|
return dashboards.remove_widget(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
user_id=context.user_id,
|
user_id=context.user_id,
|
||||||
|
|
@ -116,8 +156,11 @@ def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/try", tags=["cards"])
|
@app.post("/{projectId}/cards/try", tags=["cards"])
|
||||||
def try_card(projectId: int, data: schemas.CardSchema = Body(...),
|
def try_card(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.CardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": custom_metrics.get_chart(
|
"data": custom_metrics.get_chart(
|
||||||
project=context.project, data=data, user_id=context.user_id
|
project=context.project, data=data, user_id=context.user_id
|
||||||
|
|
@ -126,8 +169,11 @@ def try_card(projectId: int, data: schemas.CardSchema = Body(...),
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/try/sessions", tags=["cards"])
|
@app.post("/{projectId}/cards/try/sessions", tags=["cards"])
|
||||||
def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(...),
|
def try_card_sessions(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.CardSessionsSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = custom_metrics.get_sessions(
|
data = custom_metrics.get_sessions(
|
||||||
project=context.project, user_id=context.user_id, data=data
|
project=context.project, user_id=context.user_id, data=data
|
||||||
)
|
)
|
||||||
|
|
@ -135,8 +181,11 @@ def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(..
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/try/issues", tags=["cards"])
|
@app.post("/{projectId}/cards/try/issues", tags=["cards"])
|
||||||
def try_card_issues(projectId: int, data: schemas.CardSchema = Body(...),
|
def try_card_issues(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.CardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": custom_metrics.get_issues(
|
"data": custom_metrics.get_issues(
|
||||||
project=context.project, user_id=context.user_id, data=data
|
project=context.project, user_id=context.user_id, data=data
|
||||||
|
|
@ -152,16 +201,22 @@ def get_cards(projectId: int, context: schemas.CurrentContext = Depends(OR_conte
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards", tags=["cards"])
|
@app.post("/{projectId}/cards", tags=["cards"])
|
||||||
def create_card(projectId: int, data: schemas.CardSchema = Body(...),
|
def create_card(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.CardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return custom_metrics.create_card(
|
return custom_metrics.create_card(
|
||||||
project=context.project, user_id=context.user_id, data=data
|
project=context.project, user_id=context.user_id, data=data
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/search", tags=["cards"])
|
@app.post("/{projectId}/cards/search", tags=["cards"])
|
||||||
def search_cards(projectId: int, data: schemas.MetricSearchSchema = Body(...),
|
def search_cards(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
data: schemas.MetricSearchSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": custom_metrics.search_metrics(
|
"data": custom_metrics.search_metrics(
|
||||||
project_id=projectId, user_id=context.user_id, data=data
|
project_id=projectId, user_id=context.user_id, data=data
|
||||||
|
|
@ -170,7 +225,11 @@ def search_cards(projectId: int, data: schemas.MetricSearchSchema = Body(...),
|
||||||
|
|
||||||
|
|
||||||
@app.get("/{projectId}/cards/{metric_id}", tags=["cards"])
|
@app.get("/{projectId}/cards/{metric_id}", tags=["cards"])
|
||||||
def get_card(projectId: int, metric_id: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)):
|
def get_card(
|
||||||
|
projectId: int,
|
||||||
|
metric_id: Union[int, str],
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
if metric_id.isnumeric():
|
if metric_id.isnumeric():
|
||||||
metric_id = int(metric_id)
|
metric_id = int(metric_id)
|
||||||
else:
|
else:
|
||||||
|
|
@ -184,8 +243,12 @@ def get_card(projectId: int, metric_id: Union[int, str], context: schemas.Curren
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/{metric_id}/sessions", tags=["cards"])
|
@app.post("/{projectId}/cards/{metric_id}/sessions", tags=["cards"])
|
||||||
def get_card_sessions(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
def get_card_sessions(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
data: schemas.CardSessionsSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = custom_metrics.get_sessions_by_card_id(
|
data = custom_metrics.get_sessions_by_card_id(
|
||||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||||
)
|
)
|
||||||
|
|
@ -194,10 +257,16 @@ def get_card_sessions(projectId: int, metric_id: int, data: schemas.CardSessions
|
||||||
return {"data": data}
|
return {"data": data}
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/{metric_id}/issues/{issueId}/sessions", tags=["dashboard"])
|
@app.post(
|
||||||
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
|
"/{projectId}/cards/{metric_id}/issues/{issueId}/sessions", tags=["dashboard"]
|
||||||
data: schemas.CardSessionsSchema = Body(...),
|
)
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
def get_metric_funnel_issue_sessions(
|
||||||
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
issueId: str,
|
||||||
|
data: schemas.CardSessionsSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = custom_metrics.get_funnel_sessions_by_issue(
|
data = custom_metrics.get_funnel_sessions_by_issue(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
user_id=context.user_id,
|
user_id=context.user_id,
|
||||||
|
|
@ -211,8 +280,12 @@ def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: st
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/{metric_id}/chart", tags=["card"])
|
@app.post("/{projectId}/cards/{metric_id}/chart", tags=["card"])
|
||||||
def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
def get_card_chart(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
data: schemas.CardSessionsSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = custom_metrics.make_chart_from_card(
|
data = custom_metrics.make_chart_from_card(
|
||||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||||
)
|
)
|
||||||
|
|
@ -220,8 +293,12 @@ def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSch
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
@app.post("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||||
def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(...),
|
def update_card(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
data: schemas.CardSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
data = custom_metrics.update_card(
|
data = custom_metrics.update_card(
|
||||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data
|
project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data
|
||||||
)
|
)
|
||||||
|
|
@ -231,8 +308,12 @@ def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(
|
||||||
|
|
||||||
|
|
||||||
@app.post("/{projectId}/cards/{metric_id}/status", tags=["dashboard"])
|
@app.post("/{projectId}/cards/{metric_id}/status", tags=["dashboard"])
|
||||||
def update_card_state(projectId: int, metric_id: int, data: schemas.UpdateCardStatusSchema = Body(...),
|
def update_card_state(
|
||||||
context: schemas.CurrentContext = Depends(OR_context)):
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
data: schemas.UpdateCardStatusSchema = Body(...),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": custom_metrics.change_state(
|
"data": custom_metrics.change_state(
|
||||||
project_id=projectId,
|
project_id=projectId,
|
||||||
|
|
@ -244,7 +325,12 @@ def update_card_state(projectId: int, metric_id: int, data: schemas.UpdateCardSt
|
||||||
|
|
||||||
|
|
||||||
@app.delete("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
@app.delete("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||||
def delete_card(projectId: int, metric_id: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)):
|
def delete_card(
|
||||||
|
projectId: int,
|
||||||
|
metric_id: int,
|
||||||
|
_=Body(None),
|
||||||
|
context: schemas.CurrentContext = Depends(OR_context),
|
||||||
|
):
|
||||||
return {
|
return {
|
||||||
"data": custom_metrics.delete_card(
|
"data": custom_metrics.delete_card(
|
||||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
||||||
|
|
|
||||||
|
|
@ -960,6 +960,36 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
|
||||||
|
|
||||||
return self
|
return self
|
||||||
|
|
||||||
|
# We don't need this as the UI is expecting filters to override the full series' filters
|
||||||
|
# @model_validator(mode="after")
|
||||||
|
# def __merge_out_filters_with_series(self):
|
||||||
|
# for f in self.filters:
|
||||||
|
# for s in self.series:
|
||||||
|
# found = False
|
||||||
|
#
|
||||||
|
# if f.is_event:
|
||||||
|
# sub = s.filter.events
|
||||||
|
# else:
|
||||||
|
# sub = s.filter.filters
|
||||||
|
#
|
||||||
|
# for e in sub:
|
||||||
|
# if f.type == e.type and f.operator == e.operator:
|
||||||
|
# found = True
|
||||||
|
# if f.is_event:
|
||||||
|
# # If extra event: append value
|
||||||
|
# for v in f.value:
|
||||||
|
# if v not in e.value:
|
||||||
|
# e.value.append(v)
|
||||||
|
# else:
|
||||||
|
# # If extra filter: override value
|
||||||
|
# e.value = f.value
|
||||||
|
# if not found:
|
||||||
|
# sub.append(f)
|
||||||
|
#
|
||||||
|
# self.filters = []
|
||||||
|
#
|
||||||
|
# return self
|
||||||
|
|
||||||
# UI is expecting filters to override the full series' filters
|
# UI is expecting filters to override the full series' filters
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def __override_series_filters_with_outer_filters(self):
|
def __override_series_filters_with_outer_filters(self):
|
||||||
|
|
@ -1030,16 +1060,6 @@ class CardTable(__CardSchema):
|
||||||
values["metricValue"] = []
|
values["metricValue"] = []
|
||||||
return values
|
return values
|
||||||
|
|
||||||
@model_validator(mode="after")
|
|
||||||
def __enforce_AND_operator(self):
|
|
||||||
self.metric_of = MetricOfTable(self.metric_of)
|
|
||||||
if self.metric_of in (MetricOfTable.VISITED_URL, MetricOfTable.FETCH, \
|
|
||||||
MetricOfTable.VISITED_URL.value, MetricOfTable.FETCH.value):
|
|
||||||
for s in self.series:
|
|
||||||
if s.filter is not None:
|
|
||||||
s.filter.events_order = SearchEventOrder.AND
|
|
||||||
return self
|
|
||||||
|
|
||||||
@model_validator(mode="after")
|
@model_validator(mode="after")
|
||||||
def __transform(self):
|
def __transform(self):
|
||||||
self.metric_of = MetricOfTable(self.metric_of)
|
self.metric_of = MetricOfTable(self.metric_of)
|
||||||
|
|
@ -1115,7 +1135,7 @@ class CardPathAnalysis(__CardSchema):
|
||||||
view_type: MetricOtherViewType = Field(...)
|
view_type: MetricOtherViewType = Field(...)
|
||||||
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
|
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
|
||||||
density: int = Field(default=4, ge=2, le=10)
|
density: int = Field(default=4, ge=2, le=10)
|
||||||
rows: int = Field(default=5, ge=1, le=10)
|
rows: int = Field(default=3, ge=1, le=10)
|
||||||
|
|
||||||
start_type: Literal["start", "end"] = Field(default="start")
|
start_type: Literal["start", "end"] = Field(default="start")
|
||||||
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
|
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
|
||||||
|
|
|
||||||
|
|
@ -19,16 +19,14 @@ const EVENTS_DEFINITION = {
|
||||||
}
|
}
|
||||||
};
|
};
|
||||||
EVENTS_DEFINITION.emit = {
|
EVENTS_DEFINITION.emit = {
|
||||||
NEW_AGENT: "NEW_AGENT",
|
NEW_AGENT: "NEW_AGENT",
|
||||||
NO_AGENTS: "NO_AGENT",
|
NO_AGENTS: "NO_AGENT",
|
||||||
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
|
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
|
||||||
AGENTS_CONNECTED: "AGENTS_CONNECTED",
|
AGENTS_CONNECTED: "AGENTS_CONNECTED",
|
||||||
AGENTS_INFO_CONNECTED: "AGENTS_INFO_CONNECTED",
|
NO_SESSIONS: "SESSION_DISCONNECTED",
|
||||||
NO_SESSIONS: "SESSION_DISCONNECTED",
|
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
|
||||||
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
|
SESSION_RECONNECTED: "SESSION_RECONNECTED",
|
||||||
SESSION_RECONNECTED: "SESSION_RECONNECTED",
|
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
|
||||||
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT,
|
|
||||||
WEBRTC_CONFIG: "WEBRTC_CONFIG",
|
|
||||||
};
|
};
|
||||||
|
|
||||||
const BASE_sessionInfo = {
|
const BASE_sessionInfo = {
|
||||||
|
|
|
||||||
|
|
@ -27,14 +27,9 @@ const respond = function (req, res, data) {
|
||||||
res.setHeader('Content-Type', 'application/json');
|
res.setHeader('Content-Type', 'application/json');
|
||||||
res.end(JSON.stringify(result));
|
res.end(JSON.stringify(result));
|
||||||
} else {
|
} else {
|
||||||
if (!res.aborted) {
|
res.cork(() => {
|
||||||
res.cork(() => {
|
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
|
||||||
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
|
});
|
||||||
});
|
|
||||||
} else {
|
|
||||||
logger.debug("response aborted");
|
|
||||||
return;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
const duration = performance.now() - req.startTs;
|
const duration = performance.now() - req.startTs;
|
||||||
IncreaseTotalRequests();
|
IncreaseTotalRequests();
|
||||||
|
|
|
||||||
|
|
@ -42,7 +42,7 @@ const findSessionSocketId = async (io, roomId, tabId) => {
|
||||||
};
|
};
|
||||||
|
|
||||||
async function getRoomData(io, roomID) {
|
async function getRoomData(io, roomID) {
|
||||||
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [], config = null, agentInfos = [];
|
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
|
||||||
const connected_sockets = await io.in(roomID).fetchSockets();
|
const connected_sockets = await io.in(roomID).fetchSockets();
|
||||||
if (connected_sockets.length > 0) {
|
if (connected_sockets.length > 0) {
|
||||||
for (let socket of connected_sockets) {
|
for (let socket of connected_sockets) {
|
||||||
|
|
@ -52,19 +52,13 @@ async function getRoomData(io, roomID) {
|
||||||
} else {
|
} else {
|
||||||
agentsCount++;
|
agentsCount++;
|
||||||
agentIDs.push(socket.id);
|
agentIDs.push(socket.id);
|
||||||
agentInfos.push({ ...socket.handshake.query.agentInfo, socketId: socket.id });
|
|
||||||
if (socket.handshake.query.config !== undefined) {
|
|
||||||
config = socket.handshake.query.config;
|
|
||||||
}
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
} else {
|
} else {
|
||||||
tabsCount = -1;
|
tabsCount = -1;
|
||||||
agentsCount = -1;
|
agentsCount = -1;
|
||||||
agentInfos = [];
|
|
||||||
agentIDs = [];
|
|
||||||
}
|
}
|
||||||
return {tabsCount, agentsCount, tabIDs, agentIDs, config, agentInfos};
|
return {tabsCount, agentsCount, tabIDs, agentIDs};
|
||||||
}
|
}
|
||||||
|
|
||||||
function processNewSocket(socket) {
|
function processNewSocket(socket) {
|
||||||
|
|
@ -84,7 +78,7 @@ async function onConnect(socket) {
|
||||||
IncreaseOnlineConnections(socket.handshake.query.identity);
|
IncreaseOnlineConnections(socket.handshake.query.identity);
|
||||||
|
|
||||||
const io = getServer();
|
const io = getServer();
|
||||||
const {tabsCount, agentsCount, tabIDs, agentInfos, agentIDs, config} = await getRoomData(io, socket.handshake.query.roomId);
|
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId);
|
||||||
|
|
||||||
if (socket.handshake.query.identity === IDENTITIES.session) {
|
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||||
// Check if session with the same tabID already connected, if so, refuse new connexion
|
// Check if session with the same tabID already connected, if so, refuse new connexion
|
||||||
|
|
@ -106,9 +100,7 @@ async function onConnect(socket) {
|
||||||
// Inform all connected agents about reconnected session
|
// Inform all connected agents about reconnected session
|
||||||
if (agentsCount > 0) {
|
if (agentsCount > 0) {
|
||||||
logger.debug(`notifying new session about agent-existence`);
|
logger.debug(`notifying new session about agent-existence`);
|
||||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, config);
|
|
||||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
|
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
|
||||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_INFO_CONNECTED, agentInfos);
|
|
||||||
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
|
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
|
||||||
}
|
}
|
||||||
} else if (tabsCount <= 0) {
|
} else if (tabsCount <= 0) {
|
||||||
|
|
@ -126,8 +118,7 @@ async function onConnect(socket) {
|
||||||
// Stats
|
// Stats
|
||||||
startAssist(socket, socket.handshake.query.agentID);
|
startAssist(socket, socket.handshake.query.agentID);
|
||||||
}
|
}
|
||||||
io.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, socket.handshake.query.config);
|
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
|
||||||
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, { ...socket.handshake.query.agentInfo });
|
|
||||||
}
|
}
|
||||||
|
|
||||||
// Set disconnect handler
|
// Set disconnect handler
|
||||||
|
|
|
||||||
|
|
@ -8,7 +8,8 @@ import (
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/metrics"
|
"openreplay/backend/pkg/metrics"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
analyticsMetrics "openreplay/backend/pkg/metrics/analytics"
|
||||||
|
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||||
"openreplay/backend/pkg/metrics/web"
|
"openreplay/backend/pkg/metrics/web"
|
||||||
"openreplay/backend/pkg/server"
|
"openreplay/backend/pkg/server"
|
||||||
"openreplay/backend/pkg/server/api"
|
"openreplay/backend/pkg/server/api"
|
||||||
|
|
@ -18,18 +19,16 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := analyticsConfig.New(log)
|
cfg := analyticsConfig.New(log)
|
||||||
// Observability
|
|
||||||
webMetrics := web.New("analytics")
|
webMetrics := web.New("analytics")
|
||||||
dbMetrics := database.New("analytics")
|
metrics.New(log, append(webMetrics.List(), append(analyticsMetrics.List(), databaseMetrics.List()...)...))
|
||||||
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
defer pgConn.Close()
|
defer pgConn.Close()
|
||||||
|
|
||||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
|
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init services: %s", err)
|
log.Fatal(ctx, "can't init services: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -22,15 +22,13 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
metrics.New(log, assetsMetrics.List())
|
||||||
assetMetrics := assetsMetrics.New("assets")
|
|
||||||
metrics.New(log, assetMetrics.List())
|
|
||||||
|
|
||||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||||
}
|
}
|
||||||
cacher, err := cacher.NewCacher(cfg, objStore, assetMetrics)
|
cacher, err := cacher.NewCacher(cfg, objStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init cacher: %s", err)
|
log.Fatal(ctx, "can't init cacher: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -39,7 +37,7 @@ func main() {
|
||||||
switch m := msg.(type) {
|
switch m := msg.(type) {
|
||||||
case *messages.AssetCache:
|
case *messages.AssetCache:
|
||||||
cacher.CacheURL(m.SessionID(), m.URL)
|
cacher.CacheURL(m.SessionID(), m.URL)
|
||||||
assetMetrics.IncreaseProcessesSessions()
|
assetsMetrics.IncreaseProcessesSessions()
|
||||||
case *messages.JSException:
|
case *messages.JSException:
|
||||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
|
|
|
||||||
|
|
@ -22,7 +22,6 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
|
||||||
canvasMetrics := canvasesMetrics.New("canvases")
|
canvasMetrics := canvasesMetrics.New("canvases")
|
||||||
metrics.New(log, canvasMetrics.List())
|
metrics.New(log, canvasMetrics.List())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -14,7 +14,7 @@ import (
|
||||||
"openreplay/backend/pkg/memory"
|
"openreplay/backend/pkg/memory"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
"openreplay/backend/pkg/metrics"
|
"openreplay/backend/pkg/metrics"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||||
"openreplay/backend/pkg/projects"
|
"openreplay/backend/pkg/projects"
|
||||||
"openreplay/backend/pkg/queue"
|
"openreplay/backend/pkg/queue"
|
||||||
"openreplay/backend/pkg/sessions"
|
"openreplay/backend/pkg/sessions"
|
||||||
|
|
@ -26,24 +26,22 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
metrics.New(log, databaseMetrics.List())
|
||||||
dbMetric := database.New("db")
|
|
||||||
metrics.New(log, dbMetric.List())
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
defer pgConn.Close()
|
defer pgConn.Close()
|
||||||
|
|
||||||
chConn := clickhouse.NewConnector(cfg.Clickhouse, dbMetric)
|
chConn := clickhouse.NewConnector(cfg.Clickhouse)
|
||||||
if err := chConn.Prepare(); err != nil {
|
if err := chConn.Prepare(); err != nil {
|
||||||
log.Fatal(ctx, "can't prepare clickhouse: %s", err)
|
log.Fatal(ctx, "can't prepare clickhouse: %s", err)
|
||||||
}
|
}
|
||||||
defer chConn.Stop()
|
defer chConn.Stop()
|
||||||
|
|
||||||
// Init db proxy module (postgres + clickhouse + batches)
|
// Init db proxy module (postgres + clickhouse + batches)
|
||||||
dbProxy := postgres.NewConn(log, pgConn, chConn, dbMetric)
|
dbProxy := postgres.NewConn(log, pgConn, chConn)
|
||||||
defer dbProxy.Close()
|
defer dbProxy.Close()
|
||||||
|
|
||||||
// Init redis connection
|
// Init redis connection
|
||||||
|
|
@ -53,8 +51,8 @@ func main() {
|
||||||
}
|
}
|
||||||
defer redisClient.Close()
|
defer redisClient.Close()
|
||||||
|
|
||||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
projManager := projects.New(log, pgConn, redisClient)
|
||||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||||
tagsManager := tags.New(log, pgConn)
|
tagsManager := tags.New(log, pgConn)
|
||||||
|
|
||||||
// Init data saver
|
// Init data saver
|
||||||
|
|
|
||||||
|
|
@ -19,7 +19,7 @@ import (
|
||||||
"openreplay/backend/pkg/memory"
|
"openreplay/backend/pkg/memory"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
"openreplay/backend/pkg/metrics"
|
"openreplay/backend/pkg/metrics"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||||
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
||||||
"openreplay/backend/pkg/projects"
|
"openreplay/backend/pkg/projects"
|
||||||
"openreplay/backend/pkg/queue"
|
"openreplay/backend/pkg/queue"
|
||||||
|
|
@ -31,12 +31,9 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := ender.New(log)
|
cfg := ender.New(log)
|
||||||
// Observability
|
metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...))
|
||||||
dbMetric := database.New("ender")
|
|
||||||
enderMetric := enderMetrics.New("ender")
|
|
||||||
metrics.New(log, append(enderMetric.List(), dbMetric.List()...))
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -48,10 +45,10 @@ func main() {
|
||||||
}
|
}
|
||||||
defer redisClient.Close()
|
defer redisClient.Close()
|
||||||
|
|
||||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
projManager := projects.New(log, pgConn, redisClient)
|
||||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||||
|
|
||||||
sessionEndGenerator, err := sessionender.New(enderMetric, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init ender service: %s", err)
|
log.Fatal(ctx, "can't init ender service: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,9 +23,7 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
metrics.New(log, heuristicsMetrics.List())
|
||||||
heuristicsMetric := heuristicsMetrics.New("heuristics")
|
|
||||||
metrics.New(log, heuristicsMetric.List())
|
|
||||||
|
|
||||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||||
handlersFabric := func() []handlers.MessageProcessor {
|
handlersFabric := func() []handlers.MessageProcessor {
|
||||||
|
|
@ -64,7 +62,7 @@ func main() {
|
||||||
}
|
}
|
||||||
|
|
||||||
// Run service and wait for TERM signal
|
// Run service and wait for TERM signal
|
||||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager, heuristicsMetric)
|
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager)
|
||||||
log.Info(ctx, "Heuristics service started")
|
log.Info(ctx, "Heuristics service started")
|
||||||
terminator.Wait(log, service)
|
terminator.Wait(log, service)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,7 @@ import (
|
||||||
"openreplay/backend/pkg/db/redis"
|
"openreplay/backend/pkg/db/redis"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/metrics"
|
"openreplay/backend/pkg/metrics"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||||
"openreplay/backend/pkg/metrics/web"
|
"openreplay/backend/pkg/metrics/web"
|
||||||
"openreplay/backend/pkg/queue"
|
"openreplay/backend/pkg/queue"
|
||||||
"openreplay/backend/pkg/server"
|
"openreplay/backend/pkg/server"
|
||||||
|
|
@ -20,15 +20,13 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := http.New(log)
|
cfg := http.New(log)
|
||||||
// Observability
|
|
||||||
webMetrics := web.New("http")
|
webMetrics := web.New("http")
|
||||||
dbMetric := database.New("http")
|
metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...))
|
||||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
|
||||||
|
|
||||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||||
defer producer.Close(15000)
|
defer producer.Close(15000)
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -40,7 +38,7 @@ func main() {
|
||||||
}
|
}
|
||||||
defer redisClient.Close()
|
defer redisClient.Close()
|
||||||
|
|
||||||
builder, err := services.New(log, cfg, webMetrics, dbMetric, producer, pgConn, redisClient)
|
builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "failed while creating services: %s", err)
|
log.Fatal(ctx, "failed while creating services: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,7 +23,6 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
|
||||||
imageMetrics := imagesMetrics.New("images")
|
imageMetrics := imagesMetrics.New("images")
|
||||||
metrics.New(log, imageMetrics.List())
|
metrics.New(log, imageMetrics.List())
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -18,18 +18,16 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
|
||||||
webMetrics := web.New("integrations")
|
webMetrics := web.New("integrations")
|
||||||
dbMetric := database.New("integrations")
|
metrics.New(log, append(webMetrics.List(), database.List()...))
|
||||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
defer pgConn.Close()
|
defer pgConn.Close()
|
||||||
|
|
||||||
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn)
|
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init services: %s", err)
|
log.Fatal(ctx, "can't init services: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,14 +9,14 @@ import (
|
||||||
"syscall"
|
"syscall"
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
config "openreplay/backend/internal/config/sink"
|
"openreplay/backend/internal/config/sink"
|
||||||
"openreplay/backend/internal/sink/assetscache"
|
"openreplay/backend/internal/sink/assetscache"
|
||||||
"openreplay/backend/internal/sink/sessionwriter"
|
"openreplay/backend/internal/sink/sessionwriter"
|
||||||
"openreplay/backend/internal/storage"
|
"openreplay/backend/internal/storage"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
"openreplay/backend/pkg/metrics"
|
"openreplay/backend/pkg/metrics"
|
||||||
"openreplay/backend/pkg/metrics/sink"
|
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||||
"openreplay/backend/pkg/queue"
|
"openreplay/backend/pkg/queue"
|
||||||
"openreplay/backend/pkg/url/assets"
|
"openreplay/backend/pkg/url/assets"
|
||||||
)
|
)
|
||||||
|
|
@ -24,9 +24,7 @@ import (
|
||||||
func main() {
|
func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := sink.New(log)
|
||||||
// Observability
|
|
||||||
sinkMetrics := sink.New("sink")
|
|
||||||
metrics.New(log, sinkMetrics.List())
|
metrics.New(log, sinkMetrics.List())
|
||||||
|
|
||||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||||
|
|
@ -41,7 +39,7 @@ func main() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init rewriter: %s", err)
|
log.Fatal(ctx, "can't init rewriter: %s", err)
|
||||||
}
|
}
|
||||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer, sinkMetrics)
|
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer)
|
||||||
counter := storage.NewLogCounter()
|
counter := storage.NewLogCounter()
|
||||||
|
|
||||||
var (
|
var (
|
||||||
|
|
@ -193,7 +191,7 @@ func main() {
|
||||||
cfg.TopicRawWeb,
|
cfg.TopicRawWeb,
|
||||||
cfg.TopicRawMobile,
|
cfg.TopicRawMobile,
|
||||||
},
|
},
|
||||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false, sinkMetrics),
|
messages.NewSinkMessageIterator(log, msgHandler, nil, false),
|
||||||
false,
|
false,
|
||||||
cfg.MessageSizeLimit,
|
cfg.MessageSizeLimit,
|
||||||
)
|
)
|
||||||
|
|
|
||||||
|
|
@ -19,20 +19,17 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := spotConfig.New(log)
|
cfg := spotConfig.New(log)
|
||||||
// Observability
|
|
||||||
webMetrics := web.New("spot")
|
webMetrics := web.New("spot")
|
||||||
spotMetric := spotMetrics.New("spot")
|
metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...))
|
||||||
dbMetric := databaseMetrics.New("spot")
|
|
||||||
metrics.New(log, append(webMetrics.List(), append(spotMetric.List(), dbMetric.List()...)...))
|
|
||||||
|
|
||||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
pgConn, err := pool.New(cfg.Postgres.String())
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||||
}
|
}
|
||||||
defer pgConn.Close()
|
defer pgConn.Close()
|
||||||
|
|
||||||
prefix := api.NoPrefix
|
prefix := api.NoPrefix
|
||||||
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, spotMetric, dbMetric, pgConn, prefix)
|
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn, prefix)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init services: %s", err)
|
log.Fatal(ctx, "can't init services: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -23,15 +23,13 @@ func main() {
|
||||||
ctx := context.Background()
|
ctx := context.Background()
|
||||||
log := logger.New()
|
log := logger.New()
|
||||||
cfg := config.New(log)
|
cfg := config.New(log)
|
||||||
// Observability
|
metrics.New(log, storageMetrics.List())
|
||||||
storageMetric := storageMetrics.New("storage")
|
|
||||||
metrics.New(log, storageMetric.List())
|
|
||||||
|
|
||||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||||
}
|
}
|
||||||
srv, err := storage.New(cfg, log, objStore, storageMetric)
|
srv, err := storage.New(cfg, log, objStore)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,7 +27,6 @@ type cacher struct {
|
||||||
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
|
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
|
||||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||||
rewriter *assets.Rewriter // Read only
|
rewriter *assets.Rewriter // Read only
|
||||||
metrics metrics.Assets
|
|
||||||
Errors chan error
|
Errors chan error
|
||||||
sizeLimit int
|
sizeLimit int
|
||||||
requestHeaders map[string]string
|
requestHeaders map[string]string
|
||||||
|
|
@ -38,7 +37,7 @@ func (c *cacher) CanCache() bool {
|
||||||
return c.workers.CanAddTask()
|
return c.workers.CanAddTask()
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics metrics.Assets) (*cacher, error) {
|
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) {
|
||||||
switch {
|
switch {
|
||||||
case cfg == nil:
|
case cfg == nil:
|
||||||
return nil, errors.New("config is nil")
|
return nil, errors.New("config is nil")
|
||||||
|
|
@ -94,7 +93,6 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics me
|
||||||
Errors: make(chan error),
|
Errors: make(chan error),
|
||||||
sizeLimit: cfg.AssetsSizeLimit,
|
sizeLimit: cfg.AssetsSizeLimit,
|
||||||
requestHeaders: cfg.AssetsRequestHeaders,
|
requestHeaders: cfg.AssetsRequestHeaders,
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
c.workers = NewPool(64, c.CacheFile)
|
c.workers = NewPool(64, c.CacheFile)
|
||||||
return c, nil
|
return c, nil
|
||||||
|
|
@ -117,7 +115,7 @@ func (c *cacher) cacheURL(t *Task) {
|
||||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||||
defer res.Body.Close()
|
defer res.Body.Close()
|
||||||
if res.StatusCode >= 400 {
|
if res.StatusCode >= 400 {
|
||||||
printErr := true
|
printErr := true
|
||||||
|
|
@ -164,12 +162,12 @@ func (c *cacher) cacheURL(t *Task) {
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
|
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||||
c.metrics.IncreaseSavedSessions()
|
metrics.IncreaseSavedSessions()
|
||||||
|
|
||||||
if isCSS {
|
if isCSS {
|
||||||
if t.depth > 0 {
|
if t.depth > 0 {
|
||||||
|
|
|
||||||
|
|
@ -2,12 +2,11 @@ package datasaver
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"encoding/json"
|
|
||||||
"openreplay/backend/pkg/db/types"
|
|
||||||
|
|
||||||
"openreplay/backend/internal/config/db"
|
"openreplay/backend/internal/config/db"
|
||||||
"openreplay/backend/pkg/db/clickhouse"
|
"openreplay/backend/pkg/db/clickhouse"
|
||||||
"openreplay/backend/pkg/db/postgres"
|
"openreplay/backend/pkg/db/postgres"
|
||||||
|
"openreplay/backend/pkg/db/types"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
. "openreplay/backend/pkg/messages"
|
. "openreplay/backend/pkg/messages"
|
||||||
queue "openreplay/backend/pkg/queue/types"
|
queue "openreplay/backend/pkg/queue/types"
|
||||||
|
|
@ -51,6 +50,10 @@ func New(log logger.Logger, cfg *db.Config, pg *postgres.Conn, ch clickhouse.Con
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *saverImpl) Handle(msg Message) {
|
func (s *saverImpl) Handle(msg Message) {
|
||||||
|
if msg.TypeID() == MsgCustomEvent {
|
||||||
|
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||||
|
}
|
||||||
|
|
||||||
var (
|
var (
|
||||||
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||||
session *sessions.Session
|
session *sessions.Session
|
||||||
|
|
@ -66,23 +69,6 @@ func (s *saverImpl) Handle(msg Message) {
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
|
|
||||||
if msg.TypeID() == MsgCustomEvent {
|
|
||||||
m := msg.(*CustomEvent)
|
|
||||||
// Try to parse custom event payload to JSON and extract or_payload field
|
|
||||||
type CustomEventPayload struct {
|
|
||||||
CustomTimestamp uint64 `json:"or_timestamp"`
|
|
||||||
}
|
|
||||||
customPayload := &CustomEventPayload{}
|
|
||||||
if err := json.Unmarshal([]byte(m.Payload), customPayload); err == nil {
|
|
||||||
if customPayload.CustomTimestamp >= session.Timestamp {
|
|
||||||
s.log.Info(sessCtx, "custom event timestamp received: %v", m.Timestamp)
|
|
||||||
msg.Meta().Timestamp = customPayload.CustomTimestamp
|
|
||||||
s.log.Info(sessCtx, "custom event timestamp updated: %v", m.Timestamp)
|
|
||||||
}
|
|
||||||
}
|
|
||||||
defer s.Handle(types.WrapCustomEvent(m))
|
|
||||||
}
|
|
||||||
|
|
||||||
if IsMobileType(msg.TypeID()) {
|
if IsMobileType(msg.TypeID()) {
|
||||||
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
|
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
|
||||||
if !postgres.IsPkeyViolation(err) {
|
if !postgres.IsPkeyViolation(err) {
|
||||||
|
|
|
||||||
|
|
@ -11,7 +11,7 @@ import (
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/memory"
|
"openreplay/backend/pkg/memory"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
heuristicMetrics "openreplay/backend/pkg/metrics/heuristics"
|
metrics "openreplay/backend/pkg/metrics/heuristics"
|
||||||
"openreplay/backend/pkg/queue/types"
|
"openreplay/backend/pkg/queue/types"
|
||||||
)
|
)
|
||||||
|
|
||||||
|
|
@ -23,12 +23,11 @@ type heuristicsImpl struct {
|
||||||
consumer types.Consumer
|
consumer types.Consumer
|
||||||
events builders.EventBuilder
|
events builders.EventBuilder
|
||||||
mm memory.Manager
|
mm memory.Manager
|
||||||
metrics heuristicMetrics.Heuristics
|
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
finished chan struct{}
|
finished chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager, metrics heuristicMetrics.Heuristics) service.Interface {
|
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
|
||||||
s := &heuristicsImpl{
|
s := &heuristicsImpl{
|
||||||
log: log,
|
log: log,
|
||||||
ctx: context.Background(),
|
ctx: context.Background(),
|
||||||
|
|
@ -37,7 +36,6 @@ func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Co
|
||||||
consumer: c,
|
consumer: c,
|
||||||
events: e,
|
events: e,
|
||||||
mm: mm,
|
mm: mm,
|
||||||
metrics: metrics,
|
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
finished: make(chan struct{}),
|
finished: make(chan struct{}),
|
||||||
}
|
}
|
||||||
|
|
@ -53,7 +51,7 @@ func (h *heuristicsImpl) run() {
|
||||||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||||
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
||||||
} else {
|
} else {
|
||||||
h.metrics.IncreaseTotalEvents(messageTypeName(evt))
|
metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||||
}
|
}
|
||||||
case <-tick:
|
case <-tick:
|
||||||
h.producer.Flush(h.cfg.ProducerTimeout)
|
h.producer.Flush(h.cfg.ProducerTimeout)
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,6 @@ import (
|
||||||
featureflagsAPI "openreplay/backend/pkg/featureflags/api"
|
featureflagsAPI "openreplay/backend/pkg/featureflags/api"
|
||||||
"openreplay/backend/pkg/flakeid"
|
"openreplay/backend/pkg/flakeid"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"openreplay/backend/pkg/metrics/web"
|
"openreplay/backend/pkg/metrics/web"
|
||||||
"openreplay/backend/pkg/objectstorage/store"
|
"openreplay/backend/pkg/objectstorage/store"
|
||||||
"openreplay/backend/pkg/projects"
|
"openreplay/backend/pkg/projects"
|
||||||
|
|
@ -37,8 +36,8 @@ type ServicesBuilder struct {
|
||||||
UxTestsAPI api.Handlers
|
UxTestsAPI api.Handlers
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics database.Database, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||||
projs := projects.New(log, pgconn, redis, dbMetrics)
|
projs := projects.New(log, pgconn, redis)
|
||||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -54,11 +53,11 @@ func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics data
|
||||||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||||
conditions := conditions.New(pgconn)
|
conditions := conditions.New(pgconn)
|
||||||
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
||||||
sessions := sessions.New(log, pgconn, projs, redis, dbMetrics)
|
sessions := sessions.New(log, pgconn, projs, redis)
|
||||||
featureFlags := featureflags.New(pgconn)
|
featureFlags := featureflags.New(pgconn)
|
||||||
tags := tags.New(log, pgconn)
|
tags := tags.New(log, pgconn)
|
||||||
uxTesting := uxtesting.New(pgconn)
|
uxTesting := uxtesting.New(pgconn)
|
||||||
responser := api.NewResponser(webMetrics)
|
responser := api.NewResponser(metrics)
|
||||||
builder := &ServicesBuilder{}
|
builder := &ServicesBuilder{}
|
||||||
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
|
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
|
||||||
|
|
@ -21,7 +21,6 @@ type session struct {
|
||||||
|
|
||||||
// SessionEnder updates timestamp of last message for each session
|
// SessionEnder updates timestamp of last message for each session
|
||||||
type SessionEnder struct {
|
type SessionEnder struct {
|
||||||
metrics ender.Ender
|
|
||||||
timeout int64
|
timeout int64
|
||||||
sessions map[uint64]*session // map[sessionID]session
|
sessions map[uint64]*session // map[sessionID]session
|
||||||
timeCtrl *timeController
|
timeCtrl *timeController
|
||||||
|
|
@ -29,9 +28,8 @@ type SessionEnder struct {
|
||||||
enabled bool
|
enabled bool
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(metrics ender.Ender, timeout int64, parts int) (*SessionEnder, error) {
|
func New(timeout int64, parts int) (*SessionEnder, error) {
|
||||||
return &SessionEnder{
|
return &SessionEnder{
|
||||||
metrics: metrics,
|
|
||||||
timeout: timeout,
|
timeout: timeout,
|
||||||
sessions: make(map[uint64]*session),
|
sessions: make(map[uint64]*session),
|
||||||
timeCtrl: NewTimeController(parts),
|
timeCtrl: NewTimeController(parts),
|
||||||
|
|
@ -58,7 +56,7 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
|
||||||
for sessID, _ := range se.sessions {
|
for sessID, _ := range se.sessions {
|
||||||
if !activeParts[sessID%se.parts] {
|
if !activeParts[sessID%se.parts] {
|
||||||
delete(se.sessions, sessID)
|
delete(se.sessions, sessID)
|
||||||
se.metrics.DecreaseActiveSessions()
|
ender.DecreaseActiveSessions()
|
||||||
removedSessions++
|
removedSessions++
|
||||||
} else {
|
} else {
|
||||||
activeSessions++
|
activeSessions++
|
||||||
|
|
@ -91,8 +89,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
||||||
isEnded: false,
|
isEnded: false,
|
||||||
isMobile: messages.IsMobileType(msg.TypeID()),
|
isMobile: messages.IsMobileType(msg.TypeID()),
|
||||||
}
|
}
|
||||||
se.metrics.IncreaseActiveSessions()
|
ender.IncreaseActiveSessions()
|
||||||
se.metrics.IncreaseTotalSessions()
|
ender.IncreaseTotalSessions()
|
||||||
return
|
return
|
||||||
}
|
}
|
||||||
// Keep the highest user's timestamp for correct session duration value
|
// Keep the highest user's timestamp for correct session duration value
|
||||||
|
|
@ -141,8 +139,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
||||||
sess.isEnded = true
|
sess.isEnded = true
|
||||||
if res, _ := handler(sessID, sess.lastUserTime); res {
|
if res, _ := handler(sessID, sess.lastUserTime); res {
|
||||||
delete(se.sessions, sessID)
|
delete(se.sessions, sessID)
|
||||||
se.metrics.DecreaseActiveSessions()
|
ender.DecreaseActiveSessions()
|
||||||
se.metrics.IncreaseClosedSessions()
|
ender.IncreaseClosedSessions()
|
||||||
removedSessions++
|
removedSessions++
|
||||||
if endCase == 2 {
|
if endCase == 2 {
|
||||||
brokerTime[1]++
|
brokerTime[1]++
|
||||||
|
|
|
||||||
|
|
@ -12,7 +12,7 @@ import (
|
||||||
"openreplay/backend/internal/config/sink"
|
"openreplay/backend/internal/config/sink"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
metrics "openreplay/backend/pkg/metrics/sink"
|
||||||
"openreplay/backend/pkg/queue/types"
|
"openreplay/backend/pkg/queue/types"
|
||||||
"openreplay/backend/pkg/url/assets"
|
"openreplay/backend/pkg/url/assets"
|
||||||
)
|
)
|
||||||
|
|
@ -30,10 +30,9 @@ type AssetsCache struct {
|
||||||
producer types.Producer
|
producer types.Producer
|
||||||
cache map[string]*CachedAsset
|
cache map[string]*CachedAsset
|
||||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||||
metrics sinkMetrics.Sink
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics sinkMetrics.Sink) *AssetsCache {
|
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||||
assetsCache := &AssetsCache{
|
assetsCache := &AssetsCache{
|
||||||
log: log,
|
log: log,
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
|
|
@ -41,7 +40,6 @@ func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, produce
|
||||||
producer: producer,
|
producer: producer,
|
||||||
cache: make(map[string]*CachedAsset, 64),
|
cache: make(map[string]*CachedAsset, 64),
|
||||||
blackList: make([]string, 0),
|
blackList: make([]string, 0),
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
// Parse black list for cache layer
|
// Parse black list for cache layer
|
||||||
if len(cfg.CacheBlackList) > 0 {
|
if len(cfg.CacheBlackList) > 0 {
|
||||||
|
|
@ -78,7 +76,7 @@ func (e *AssetsCache) clearCache() {
|
||||||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||||
deleted++
|
deleted++
|
||||||
delete(e.cache, id)
|
delete(e.cache, id)
|
||||||
e.metrics.DecreaseCachedAssets()
|
metrics.DecreaseCachedAssets()
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||||
|
|
@ -196,7 +194,7 @@ func parseHost(baseURL string) (string, error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||||
e.metrics.IncreaseTotalAssets()
|
metrics.IncreaseTotalAssets()
|
||||||
// Try to find asset in cache
|
// Try to find asset in cache
|
||||||
h := md5.New()
|
h := md5.New()
|
||||||
// Cut first part of url (scheme + host)
|
// Cut first part of url (scheme + host)
|
||||||
|
|
@ -219,7 +217,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
||||||
e.mutex.RUnlock()
|
e.mutex.RUnlock()
|
||||||
if ok {
|
if ok {
|
||||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||||
e.metrics.IncreaseSkippedAssets()
|
metrics.IncreaseSkippedAssets()
|
||||||
return cachedAsset.msg
|
return cachedAsset.msg
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
@ -231,8 +229,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||||
duration := time.Now().Sub(start).Milliseconds()
|
duration := time.Now().Sub(start).Milliseconds()
|
||||||
e.metrics.RecordAssetSize(float64(len(res)))
|
metrics.RecordAssetSize(float64(len(res)))
|
||||||
e.metrics.RecordProcessAssetDuration(float64(duration))
|
metrics.RecordProcessAssetDuration(float64(duration))
|
||||||
// Save asset to cache if we spent more than threshold
|
// Save asset to cache if we spent more than threshold
|
||||||
if duration > e.cfg.CacheThreshold {
|
if duration > e.cfg.CacheThreshold {
|
||||||
e.mutex.Lock()
|
e.mutex.Lock()
|
||||||
|
|
@ -241,7 +239,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
||||||
ts: time.Now(),
|
ts: time.Now(),
|
||||||
}
|
}
|
||||||
e.mutex.Unlock()
|
e.mutex.Unlock()
|
||||||
e.metrics.IncreaseCachedAssets()
|
metrics.IncreaseCachedAssets()
|
||||||
}
|
}
|
||||||
// Return rewritten asset
|
// Return rewritten asset
|
||||||
return res
|
return res
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,7 @@ import (
|
||||||
config "openreplay/backend/internal/config/storage"
|
config "openreplay/backend/internal/config/storage"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
metrics "openreplay/backend/pkg/metrics/storage"
|
||||||
"openreplay/backend/pkg/objectstorage"
|
"openreplay/backend/pkg/objectstorage"
|
||||||
"openreplay/backend/pkg/pool"
|
"openreplay/backend/pkg/pool"
|
||||||
)
|
)
|
||||||
|
|
@ -77,10 +77,9 @@ type Storage struct {
|
||||||
splitTime uint64
|
splitTime uint64
|
||||||
processorPool pool.WorkerPool
|
processorPool pool.WorkerPool
|
||||||
uploaderPool pool.WorkerPool
|
uploaderPool pool.WorkerPool
|
||||||
metrics storageMetrics.Storage
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics storageMetrics.Storage) (*Storage, error) {
|
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) {
|
||||||
switch {
|
switch {
|
||||||
case cfg == nil:
|
case cfg == nil:
|
||||||
return nil, fmt.Errorf("config is empty")
|
return nil, fmt.Errorf("config is empty")
|
||||||
|
|
@ -93,7 +92,6 @@ func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectS
|
||||||
objStorage: objStorage,
|
objStorage: objStorage,
|
||||||
startBytes: make([]byte, cfg.FileSplitSize),
|
startBytes: make([]byte, cfg.FileSplitSize),
|
||||||
splitTime: parseSplitTime(cfg.FileSplitTime),
|
splitTime: parseSplitTime(cfg.FileSplitTime),
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
s.processorPool = pool.NewPool(1, 1, s.doCompression)
|
s.processorPool = pool.NewPool(1, 1, s.doCompression)
|
||||||
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
|
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
|
||||||
|
|
@ -143,7 +141,7 @@ func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err er
|
||||||
if err != nil {
|
if err != nil {
|
||||||
if strings.Contains(err.Error(), "big file") {
|
if strings.Contains(err.Error(), "big file") {
|
||||||
s.log.Warn(ctx, "can't process session: %s", err)
|
s.log.Warn(ctx, "can't process session: %s", err)
|
||||||
s.metrics.IncreaseStorageTotalSkippedSessions()
|
metrics.IncreaseStorageTotalSkippedSessions()
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
return err
|
return err
|
||||||
|
|
@ -161,8 +159,8 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
s.metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||||
s.metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||||
|
|
||||||
// Put opened session file into task struct
|
// Put opened session file into task struct
|
||||||
task.SetMob(mob, index, tp)
|
task.SetMob(mob, index, tp)
|
||||||
|
|
@ -176,7 +174,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
||||||
// Check file size before download into memory
|
// Check file size before download into memory
|
||||||
info, err := os.Stat(filePath)
|
info, err := os.Stat(filePath)
|
||||||
if err == nil && info.Size() > s.cfg.MaxFileSize {
|
if err == nil && info.Size() > s.cfg.MaxFileSize {
|
||||||
s.metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
||||||
return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
|
return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
|
||||||
}
|
}
|
||||||
// Read file into memory
|
// Read file into memory
|
||||||
|
|
@ -192,7 +190,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
|
return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
|
||||||
}
|
}
|
||||||
s.metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||||
return mob, index, nil
|
return mob, index, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -236,12 +234,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
||||||
// Compression
|
// Compression
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
data := s.compress(task.ctx, mob, task.compression)
|
data := s.compress(task.ctx, mob, task.compression)
|
||||||
s.metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||||
|
|
||||||
// Encryption
|
// Encryption
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
result := s.encryptSession(task.ctx, data.Bytes(), task.key)
|
result := s.encryptSession(task.ctx, data.Bytes(), task.key)
|
||||||
s.metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||||
|
|
||||||
if tp == DOM {
|
if tp == DOM {
|
||||||
task.doms = bytes.NewBuffer(result)
|
task.doms = bytes.NewBuffer(result)
|
||||||
|
|
@ -298,8 +296,8 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
|
|
||||||
// Record metrics
|
// Record metrics
|
||||||
s.metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
||||||
s.metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
|
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
|
||||||
|
|
@ -384,7 +382,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
||||||
go func() {
|
go func() {
|
||||||
if task.doms != nil {
|
if task.doms != nil {
|
||||||
// Record compression ratio
|
// Record compression ratio
|
||||||
s.metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
||||||
// Upload session to s3
|
// Upload session to s3
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||||
|
|
@ -397,7 +395,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
||||||
go func() {
|
go func() {
|
||||||
if task.dome != nil {
|
if task.dome != nil {
|
||||||
// Record compression ratio
|
// Record compression ratio
|
||||||
s.metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
||||||
// Upload session to s3
|
// Upload session to s3
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||||
|
|
@ -410,7 +408,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
||||||
go func() {
|
go func() {
|
||||||
if task.dev != nil {
|
if task.dev != nil {
|
||||||
// Record compression ratio
|
// Record compression ratio
|
||||||
s.metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
||||||
// Upload session to s3
|
// Upload session to s3
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||||
|
|
@ -421,9 +419,9 @@ func (s *Storage) uploadSession(payload interface{}) {
|
||||||
wg.Done()
|
wg.Done()
|
||||||
}()
|
}()
|
||||||
wg.Wait()
|
wg.Wait()
|
||||||
s.metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||||
s.metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||||
s.metrics.IncreaseStorageTotalSessions()
|
metrics.IncreaseStorageTotalSessions()
|
||||||
}
|
}
|
||||||
|
|
||||||
func (s *Storage) doCompression(payload interface{}) {
|
func (s *Storage) doCompression(payload interface{}) {
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package analytics
|
||||||
import (
|
import (
|
||||||
"github.com/go-playground/validator/v10"
|
"github.com/go-playground/validator/v10"
|
||||||
"openreplay/backend/pkg/analytics/charts"
|
"openreplay/backend/pkg/analytics/charts"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"openreplay/backend/internal/config/analytics"
|
"openreplay/backend/internal/config/analytics"
|
||||||
|
|
@ -27,9 +26,9 @@ type ServicesBuilder struct {
|
||||||
ChartsAPI api.Handlers
|
ChartsAPI api.Handlers
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServicesBuilder, error) {
|
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
|
||||||
responser := api.NewResponser(webMetrics)
|
responser := api.NewResponser(webMetrics)
|
||||||
audiTrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
audiTrail, err := tracer.NewTracer(log, pgconn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -18,14 +18,13 @@ type Bulk interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type bulkImpl struct {
|
type bulkImpl struct {
|
||||||
conn driver.Conn
|
conn driver.Conn
|
||||||
metrics database.Database
|
table string
|
||||||
table string
|
query string
|
||||||
query string
|
values [][]interface{}
|
||||||
values [][]interface{}
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (Bulk, error) {
|
func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
||||||
switch {
|
switch {
|
||||||
case conn == nil:
|
case conn == nil:
|
||||||
return nil, errors.New("clickhouse connection is empty")
|
return nil, errors.New("clickhouse connection is empty")
|
||||||
|
|
@ -35,11 +34,10 @@ func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (
|
||||||
return nil, errors.New("query is empty")
|
return nil, errors.New("query is empty")
|
||||||
}
|
}
|
||||||
return &bulkImpl{
|
return &bulkImpl{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
metrics: metrics,
|
table: table,
|
||||||
table: table,
|
query: query,
|
||||||
query: query,
|
values: make([][]interface{}, 0),
|
||||||
values: make([][]interface{}, 0),
|
|
||||||
}, nil
|
}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -62,8 +60,8 @@ func (b *bulkImpl) Send() error {
|
||||||
}
|
}
|
||||||
err = batch.Send()
|
err = batch.Send()
|
||||||
// Save bulk metrics
|
// Save bulk metrics
|
||||||
b.metrics.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
database.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||||
// Prepare values slice for a new data
|
// Prepare values slice for a new data
|
||||||
b.values = make([][]interface{}, 0)
|
b.values = make([][]interface{}, 0)
|
||||||
return err
|
return err
|
||||||
|
|
|
||||||
|
|
@ -18,7 +18,6 @@ import (
|
||||||
"openreplay/backend/pkg/db/types"
|
"openreplay/backend/pkg/db/types"
|
||||||
"openreplay/backend/pkg/hashid"
|
"openreplay/backend/pkg/hashid"
|
||||||
"openreplay/backend/pkg/messages"
|
"openreplay/backend/pkg/messages"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"openreplay/backend/pkg/sessions"
|
"openreplay/backend/pkg/sessions"
|
||||||
"openreplay/backend/pkg/url"
|
"openreplay/backend/pkg/url"
|
||||||
)
|
)
|
||||||
|
|
@ -58,14 +57,13 @@ func NewTask() *task {
|
||||||
|
|
||||||
type connectorImpl struct {
|
type connectorImpl struct {
|
||||||
conn driver.Conn
|
conn driver.Conn
|
||||||
metrics database.Database
|
|
||||||
batches map[string]Bulk //driver.Batch
|
batches map[string]Bulk //driver.Batch
|
||||||
workerTask chan *task
|
workerTask chan *task
|
||||||
done chan struct{}
|
done chan struct{}
|
||||||
finished chan struct{}
|
finished chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
|
func NewConnector(cfg common.Clickhouse) Connector {
|
||||||
conn, err := clickhouse.Open(&clickhouse.Options{
|
conn, err := clickhouse.Open(&clickhouse.Options{
|
||||||
Addr: []string{cfg.GetTrimmedURL()},
|
Addr: []string{cfg.GetTrimmedURL()},
|
||||||
Auth: clickhouse.Auth{
|
Auth: clickhouse.Auth{
|
||||||
|
|
@ -86,7 +84,6 @@ func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
|
||||||
|
|
||||||
c := &connectorImpl{
|
c := &connectorImpl{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
metrics: metrics,
|
|
||||||
batches: make(map[string]Bulk, 20),
|
batches: make(map[string]Bulk, 20),
|
||||||
workerTask: make(chan *task, 1),
|
workerTask: make(chan *task, 1),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
|
|
@ -97,7 +94,7 @@ func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectorImpl) newBatch(name, query string) error {
|
func (c *connectorImpl) newBatch(name, query string) error {
|
||||||
batch, err := NewBulk(c.conn, c.metrics, name, query)
|
batch, err := NewBulk(c.conn, name, query)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't create new batch: %s", err)
|
return fmt.Errorf("can't create new batch: %s", err)
|
||||||
}
|
}
|
||||||
|
|
@ -106,25 +103,25 @@ func (c *connectorImpl) newBatch(name, query string) error {
|
||||||
}
|
}
|
||||||
|
|
||||||
var batches = map[string]string{
|
var batches = map[string]string{
|
||||||
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)",
|
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?)",
|
||||||
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
|
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
|
||||||
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
|
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
|
||||||
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
|
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
|
||||||
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
"mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
"mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectorImpl) Prepare() error {
|
func (c *connectorImpl) Prepare() error {
|
||||||
|
|
@ -215,7 +212,6 @@ func (c *connectorImpl) InsertWebSession(session *sessions.Session) error {
|
||||||
session.Metadata8,
|
session.Metadata8,
|
||||||
session.Metadata9,
|
session.Metadata9,
|
||||||
session.Metadata10,
|
session.Metadata10,
|
||||||
"web",
|
|
||||||
session.Timezone,
|
session.Timezone,
|
||||||
session.UtmSource,
|
session.UtmSource,
|
||||||
session.UtmMedium,
|
session.UtmMedium,
|
||||||
|
|
@ -247,10 +243,8 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"label": msg.Label,
|
"label": msg.Label,
|
||||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal input event: %s", err)
|
return fmt.Errorf("can't marshal input event: %s", err)
|
||||||
|
|
@ -265,8 +259,6 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
nullableUint16(uint16(msg.InputDuration)),
|
nullableUint16(uint16(msg.InputDuration)),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -283,14 +275,12 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
|
||||||
return fmt.Errorf("can't extract url parts: %s", err)
|
return fmt.Errorf("can't extract url parts: %s", err)
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"issue_id": issueID,
|
"issue_id": issueID,
|
||||||
"issue_type": "mouse_thrashing",
|
"issue_type": "mouse_thrashing",
|
||||||
"url": cropString(msg.Url),
|
"url": cropString(msg.Url),
|
||||||
"url_host": host,
|
"url_host": host,
|
||||||
"url_path": path,
|
"url_path": path,
|
||||||
"url_hostpath": hostpath,
|
"url_hostpath": hostpath,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal issue event: %s", err)
|
return fmt.Errorf("can't marshal issue event: %s", err)
|
||||||
|
|
@ -305,8 +295,6 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
"mouse_thrashing",
|
"mouse_thrashing",
|
||||||
issueID,
|
issueID,
|
||||||
jsonString,
|
jsonString,
|
||||||
|
|
@ -339,14 +327,12 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
|
||||||
return fmt.Errorf("can't extract url parts: %s", err)
|
return fmt.Errorf("can't extract url parts: %s", err)
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"issue_id": issueID,
|
"issue_id": issueID,
|
||||||
"issue_type": msg.Type,
|
"issue_type": msg.Type,
|
||||||
"url": cropString(msg.Url),
|
"url": cropString(msg.Url),
|
||||||
"url_host": host,
|
"url_host": host,
|
||||||
"url_path": path,
|
"url_path": path,
|
||||||
"url_hostpath": hostpath,
|
"url_hostpath": hostpath,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal issue event: %s", err)
|
return fmt.Errorf("can't marshal issue event: %s", err)
|
||||||
|
|
@ -361,8 +347,6 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
msg.Type,
|
msg.Type,
|
||||||
issueID,
|
issueID,
|
||||||
jsonString,
|
jsonString,
|
||||||
|
|
@ -434,8 +418,6 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
|
||||||
"dom_building_time": domBuildingTime,
|
"dom_building_time": domBuildingTime,
|
||||||
"dom_content_loaded_event_time": domContentLoadedEventTime,
|
"dom_content_loaded_event_time": domContentLoadedEventTime,
|
||||||
"load_event_time": loadEventTime,
|
"load_event_time": loadEventTime,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal page event: %s", err)
|
return fmt.Errorf("can't marshal page event: %s", err)
|
||||||
|
|
@ -450,8 +432,6 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
cropString(msg.URL),
|
cropString(msg.URL),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -485,17 +465,15 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
||||||
return fmt.Errorf("can't extract url parts: %s", err)
|
return fmt.Errorf("can't extract url parts: %s", err)
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"label": msg.Label,
|
"label": msg.Label,
|
||||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||||
"selector": msg.Selector,
|
"selector": msg.Selector,
|
||||||
"normalized_x": nX,
|
"normalized_x": nX,
|
||||||
"normalized_y": nY,
|
"normalized_y": nY,
|
||||||
"url": cropString(msg.Url),
|
"url": cropString(msg.Url),
|
||||||
"url_host": host,
|
"url_host": host,
|
||||||
"url_path": path,
|
"url_path": path,
|
||||||
"url_hostpath": hostpath,
|
"url_hostpath": hostpath,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal click event: %s", err)
|
return fmt.Errorf("can't marshal click event: %s", err)
|
||||||
|
|
@ -510,8 +488,6 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
cropString(msg.Url),
|
cropString(msg.Url),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -522,6 +498,11 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
||||||
}
|
}
|
||||||
|
|
||||||
func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *types.ErrorEvent) error {
|
func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *types.ErrorEvent) error {
|
||||||
|
keys, values := make([]string, 0, len(msg.Tags)), make([]*string, 0, len(msg.Tags))
|
||||||
|
for k, v := range msg.Tags {
|
||||||
|
keys = append(keys, k)
|
||||||
|
values = append(values, v)
|
||||||
|
}
|
||||||
// Check error source before insert to avoid panic from clickhouse lib
|
// Check error source before insert to avoid panic from clickhouse lib
|
||||||
switch msg.Source {
|
switch msg.Source {
|
||||||
case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic":
|
case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic":
|
||||||
|
|
@ -530,11 +511,12 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
|
||||||
}
|
}
|
||||||
msgID, _ := msg.ID(session.ProjectID)
|
msgID, _ := msg.ID(session.ProjectID)
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"source": msg.Source,
|
"source": msg.Source,
|
||||||
"name": nullableString(msg.Name),
|
"name": nullableString(msg.Name),
|
||||||
"message": msg.Message,
|
"message": msg.Message,
|
||||||
"user_device": session.UserDevice,
|
"error_id": msgID,
|
||||||
"user_device_type": session.UserDeviceType,
|
"error_tags_keys": keys,
|
||||||
|
"error_tags_values": values,
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal error event: %s", err)
|
return fmt.Errorf("can't marshal error event: %s", err)
|
||||||
|
|
@ -549,9 +531,6 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
msgID,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("errors", err)
|
c.checkError("errors", err)
|
||||||
|
|
@ -583,8 +562,6 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
|
||||||
"min_used_js_heap_size": msg.MinUsedJSHeapSize,
|
"min_used_js_heap_size": msg.MinUsedJSHeapSize,
|
||||||
"avg_used_js_heap_size": msg.AvgUsedJSHeapSize,
|
"avg_used_js_heap_size": msg.AvgUsedJSHeapSize,
|
||||||
"max_used_js_heap_size": msg.MaxUsedJSHeapSize,
|
"max_used_js_heap_size": msg.MaxUsedJSHeapSize,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal performance event: %s", err)
|
return fmt.Errorf("can't marshal performance event: %s", err)
|
||||||
|
|
@ -599,8 +576,6 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("performance", err)
|
c.checkError("performance", err)
|
||||||
|
|
@ -624,18 +599,16 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
||||||
return fmt.Errorf("can't extract url parts: %s", err)
|
return fmt.Errorf("can't extract url parts: %s", err)
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"request_body": request,
|
"request_body": request,
|
||||||
"response_body": response,
|
"response_body": response,
|
||||||
"status": uint16(msg.Status),
|
"status": uint16(msg.Status),
|
||||||
"method": url.EnsureMethod(msg.Method),
|
"method": url.EnsureMethod(msg.Method),
|
||||||
"success": msg.Status < 400,
|
"success": msg.Status < 400,
|
||||||
"transfer_size": uint32(msg.TransferredBodySize),
|
"transfer_size": uint32(msg.TransferredBodySize),
|
||||||
"url": cropString(msg.URL),
|
"url": cropString(msg.URL),
|
||||||
"url_host": host,
|
"url_host": host,
|
||||||
"url_path": path,
|
"url_path": path,
|
||||||
"url_hostpath": hostpath,
|
"url_hostpath": hostpath,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal request event: %s", err)
|
return fmt.Errorf("can't marshal request event: %s", err)
|
||||||
|
|
@ -650,8 +623,6 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
nullableUint16(uint16(msg.Duration)),
|
nullableUint16(uint16(msg.Duration)),
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
|
|
@ -663,10 +634,8 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
||||||
|
|
||||||
func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.CustomEvent) error {
|
func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.CustomEvent) error {
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"name": msg.Name,
|
"name": msg.Name,
|
||||||
"payload": msg.Payload,
|
"payload": msg.Payload,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal custom event: %s", err)
|
return fmt.Errorf("can't marshal custom event: %s", err)
|
||||||
|
|
@ -681,8 +650,6 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("custom", err)
|
c.checkError("custom", err)
|
||||||
|
|
@ -693,11 +660,9 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
|
||||||
|
|
||||||
func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.GraphQL) error {
|
func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.GraphQL) error {
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"name": msg.OperationName,
|
"name": msg.OperationName,
|
||||||
"request_body": nullableString(msg.Variables),
|
"request_body": nullableString(msg.Variables),
|
||||||
"response_body": nullableString(msg.Response),
|
"response_body": nullableString(msg.Response),
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal graphql event: %s", err)
|
return fmt.Errorf("can't marshal graphql event: %s", err)
|
||||||
|
|
@ -712,8 +677,6 @@ func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.G
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("graphql", err)
|
c.checkError("graphql", err)
|
||||||
|
|
@ -761,7 +724,7 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
|
||||||
session.Metadata8,
|
session.Metadata8,
|
||||||
session.Metadata9,
|
session.Metadata9,
|
||||||
session.Metadata10,
|
session.Metadata10,
|
||||||
"mobile",
|
"ios",
|
||||||
session.Timezone,
|
session.Timezone,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_sessions", err)
|
c.checkError("mobile_sessions", err)
|
||||||
|
|
@ -772,10 +735,8 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
|
||||||
|
|
||||||
func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messages.MobileEvent) error {
|
func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messages.MobileEvent) error {
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"name": msg.Name,
|
"name": msg.Name,
|
||||||
"payload": msg.Payload,
|
"payload": msg.Payload,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile custom event: %s", err)
|
return fmt.Errorf("can't marshal mobile custom event: %s", err)
|
||||||
|
|
@ -790,8 +751,6 @@ func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messa
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_custom", err)
|
c.checkError("mobile_custom", err)
|
||||||
|
|
@ -805,9 +764,7 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"label": msg.Label,
|
"label": msg.Label,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile clicks event: %s", err)
|
return fmt.Errorf("can't marshal mobile clicks event: %s", err)
|
||||||
|
|
@ -822,8 +779,6 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_clicks", err)
|
c.checkError("mobile_clicks", err)
|
||||||
|
|
@ -837,10 +792,8 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"label": msg.Label,
|
"label": msg.Label,
|
||||||
"direction": nullableString(msg.Direction),
|
"direction": nullableString(msg.Direction),
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile swipe event: %s", err)
|
return fmt.Errorf("can't marshal mobile swipe event: %s", err)
|
||||||
|
|
@ -855,8 +808,6 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_swipes", err)
|
c.checkError("mobile_swipes", err)
|
||||||
|
|
@ -870,9 +821,7 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"label": msg.Label,
|
"label": msg.Label,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile input event: %s", err)
|
return fmt.Errorf("can't marshal mobile input event: %s", err)
|
||||||
|
|
@ -887,8 +836,6 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_inputs", err)
|
c.checkError("mobile_inputs", err)
|
||||||
|
|
@ -908,15 +855,13 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
||||||
response = &msg.Response
|
response = &msg.Response
|
||||||
}
|
}
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"url": cropString(msg.URL),
|
"url": cropString(msg.URL),
|
||||||
"request_body": request,
|
"request_body": request,
|
||||||
"response_body": response,
|
"response_body": response,
|
||||||
"status": uint16(msg.Status),
|
"status": uint16(msg.Status),
|
||||||
"method": url.EnsureMethod(msg.Method),
|
"method": url.EnsureMethod(msg.Method),
|
||||||
"duration": uint16(msg.Duration),
|
"duration": uint16(msg.Duration),
|
||||||
"success": msg.Status < 400,
|
"success": msg.Status < 400,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile request event: %s", err)
|
return fmt.Errorf("can't marshal mobile request event: %s", err)
|
||||||
|
|
@ -931,8 +876,6 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_requests", err)
|
c.checkError("mobile_requests", err)
|
||||||
|
|
@ -943,11 +886,9 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
||||||
|
|
||||||
func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messages.MobileCrash) error {
|
func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messages.MobileCrash) error {
|
||||||
jsonString, err := json.Marshal(map[string]interface{}{
|
jsonString, err := json.Marshal(map[string]interface{}{
|
||||||
"name": msg.Name,
|
"name": msg.Name,
|
||||||
"reason": msg.Reason,
|
"reason": msg.Reason,
|
||||||
"stacktrace": msg.Stacktrace,
|
"stacktrace": msg.Stacktrace,
|
||||||
"user_device": session.UserDevice,
|
|
||||||
"user_device_type": session.UserDeviceType,
|
|
||||||
})
|
})
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("can't marshal mobile crash event: %s", err)
|
return fmt.Errorf("can't marshal mobile crash event: %s", err)
|
||||||
|
|
@ -962,8 +903,6 @@ func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messag
|
||||||
eventTime.Unix(),
|
eventTime.Unix(),
|
||||||
session.UserUUID,
|
session.UserUUID,
|
||||||
true,
|
true,
|
||||||
session.Platform,
|
|
||||||
session.UserOSVersion,
|
|
||||||
jsonString,
|
jsonString,
|
||||||
); err != nil {
|
); err != nil {
|
||||||
c.checkError("mobile_crashes", err)
|
c.checkError("mobile_crashes", err)
|
||||||
|
|
|
||||||
|
|
@ -52,7 +52,6 @@ func NewBatchesTask(size int) *batchesTask {
|
||||||
type BatchSet struct {
|
type BatchSet struct {
|
||||||
log logger.Logger
|
log logger.Logger
|
||||||
c pool.Pool
|
c pool.Pool
|
||||||
metrics database.Database
|
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
batches map[uint64]*SessionBatch
|
batches map[uint64]*SessionBatch
|
||||||
workerTask chan *batchesTask
|
workerTask chan *batchesTask
|
||||||
|
|
@ -60,11 +59,10 @@ type BatchSet struct {
|
||||||
finished chan struct{}
|
finished chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBatchSet(log logger.Logger, c pool.Pool, metrics database.Database) *BatchSet {
|
func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet {
|
||||||
bs := &BatchSet{
|
bs := &BatchSet{
|
||||||
log: log,
|
log: log,
|
||||||
c: c,
|
c: c,
|
||||||
metrics: metrics,
|
|
||||||
ctx: context.Background(),
|
ctx: context.Background(),
|
||||||
batches: make(map[uint64]*SessionBatch),
|
batches: make(map[uint64]*SessionBatch),
|
||||||
workerTask: make(chan *batchesTask, 1),
|
workerTask: make(chan *batchesTask, 1),
|
||||||
|
|
@ -106,7 +104,7 @@ func (conn *BatchSet) Stop() {
|
||||||
func (conn *BatchSet) sendBatches(t *batchesTask) {
|
func (conn *BatchSet) sendBatches(t *batchesTask) {
|
||||||
for _, batch := range t.batches {
|
for _, batch := range t.batches {
|
||||||
// Record batch size
|
// Record batch size
|
||||||
conn.metrics.RecordBatchElements(float64(batch.Len()))
|
database.RecordBatchElements(float64(batch.Len()))
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
|
|
@ -122,7 +120,7 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
br.Close() // returns err
|
br.Close() // returns err
|
||||||
conn.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -24,7 +24,6 @@ type Bulk interface {
|
||||||
|
|
||||||
type bulkImpl struct {
|
type bulkImpl struct {
|
||||||
conn pool.Pool
|
conn pool.Pool
|
||||||
metrics database.Database
|
|
||||||
table string
|
table string
|
||||||
columns string
|
columns string
|
||||||
template string
|
template string
|
||||||
|
|
@ -76,12 +75,12 @@ func (b *bulkImpl) send() error {
|
||||||
return fmt.Errorf("send bulk err: %s", err)
|
return fmt.Errorf("send bulk err: %s", err)
|
||||||
}
|
}
|
||||||
// Save bulk metrics
|
// Save bulk metrics
|
||||||
b.metrics.RecordBulkElements(float64(size), "pg", b.table)
|
database.RecordBulkElements(float64(size), "pg", b.table)
|
||||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||||
switch {
|
switch {
|
||||||
case conn == nil:
|
case conn == nil:
|
||||||
return nil, errors.New("db conn is empty")
|
return nil, errors.New("db conn is empty")
|
||||||
|
|
@ -98,7 +97,6 @@ func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template
|
||||||
}
|
}
|
||||||
return &bulkImpl{
|
return &bulkImpl{
|
||||||
conn: conn,
|
conn: conn,
|
||||||
metrics: metrics,
|
|
||||||
table: table,
|
table: table,
|
||||||
columns: columns,
|
columns: columns,
|
||||||
template: template,
|
template: template,
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package postgres
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
|
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
|
|
@ -22,7 +21,6 @@ type BulkSet struct {
|
||||||
log logger.Logger
|
log logger.Logger
|
||||||
c pool.Pool
|
c pool.Pool
|
||||||
ctx context.Context
|
ctx context.Context
|
||||||
metrics database.Database
|
|
||||||
autocompletes Bulk
|
autocompletes Bulk
|
||||||
requests Bulk
|
requests Bulk
|
||||||
customEvents Bulk
|
customEvents Bulk
|
||||||
|
|
@ -45,11 +43,10 @@ type BulkSet struct {
|
||||||
finished chan struct{}
|
finished chan struct{}
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewBulkSet(log logger.Logger, c pool.Pool, metrics database.Database) *BulkSet {
|
func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet {
|
||||||
bs := &BulkSet{
|
bs := &BulkSet{
|
||||||
log: log,
|
log: log,
|
||||||
c: c,
|
c: c,
|
||||||
metrics: metrics,
|
|
||||||
ctx: context.Background(),
|
ctx: context.Background(),
|
||||||
workerTask: make(chan *bulksTask, 1),
|
workerTask: make(chan *bulksTask, 1),
|
||||||
done: make(chan struct{}),
|
done: make(chan struct{}),
|
||||||
|
|
@ -103,7 +100,7 @@ func (conn *BulkSet) Get(name string) Bulk {
|
||||||
|
|
||||||
func (conn *BulkSet) initBulks() {
|
func (conn *BulkSet) initBulks() {
|
||||||
var err error
|
var err error
|
||||||
conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
|
conn.autocompletes, err = NewBulk(conn.c,
|
||||||
"autocomplete",
|
"autocomplete",
|
||||||
"(value, type, project_id)",
|
"(value, type, project_id)",
|
||||||
"($%d, $%d, $%d)",
|
"($%d, $%d, $%d)",
|
||||||
|
|
@ -111,7 +108,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.requests, err = NewBulk(conn.c, conn.metrics,
|
conn.requests, err = NewBulk(conn.c,
|
||||||
"events_common.requests",
|
"events_common.requests",
|
||||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||||
|
|
@ -119,7 +116,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.customEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.customEvents, err = NewBulk(conn.c,
|
||||||
"events_common.customs",
|
"events_common.customs",
|
||||||
"(session_id, timestamp, seq_index, name, payload)",
|
"(session_id, timestamp, seq_index, name, payload)",
|
||||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||||
|
|
@ -127,7 +124,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webPageEvents, err = NewBulk(conn.c,
|
||||||
"events.pages",
|
"events.pages",
|
||||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||||
|
|
@ -139,7 +136,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webInputDurations, err = NewBulk(conn.c, conn.metrics,
|
conn.webInputDurations, err = NewBulk(conn.c,
|
||||||
"events.inputs",
|
"events.inputs",
|
||||||
"(session_id, message_id, timestamp, label, hesitation, duration)",
|
"(session_id, message_id, timestamp, label, hesitation, duration)",
|
||||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
|
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
|
||||||
|
|
@ -147,7 +144,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
|
conn.webGraphQL, err = NewBulk(conn.c,
|
||||||
"events.graphql",
|
"events.graphql",
|
||||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||||
|
|
@ -155,7 +152,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webErrors, err = NewBulk(conn.c, conn.metrics,
|
conn.webErrors, err = NewBulk(conn.c,
|
||||||
"errors",
|
"errors",
|
||||||
"(error_id, project_id, source, name, message, payload)",
|
"(error_id, project_id, source, name, message, payload)",
|
||||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||||
|
|
@ -163,7 +160,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||||
"events.errors",
|
"events.errors",
|
||||||
"(session_id, message_id, timestamp, error_id)",
|
"(session_id, message_id, timestamp, error_id)",
|
||||||
"($%d, $%d, $%d, $%d)",
|
"($%d, $%d, $%d, $%d)",
|
||||||
|
|
@ -171,7 +168,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
|
conn.webErrorTags, err = NewBulk(conn.c,
|
||||||
"public.errors_tags",
|
"public.errors_tags",
|
||||||
"(session_id, message_id, error_id, key, value)",
|
"(session_id, message_id, error_id, key, value)",
|
||||||
"($%d, $%d, $%d, $%d, $%d)",
|
"($%d, $%d, $%d, $%d, $%d)",
|
||||||
|
|
@ -179,7 +176,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webIssues, err = NewBulk(conn.c, conn.metrics,
|
conn.webIssues, err = NewBulk(conn.c,
|
||||||
"issues",
|
"issues",
|
||||||
"(project_id, issue_id, type, context_string)",
|
"(project_id, issue_id, type, context_string)",
|
||||||
"($%d, $%d, $%d, $%d)",
|
"($%d, $%d, $%d, $%d)",
|
||||||
|
|
@ -187,7 +184,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||||
"events_common.issues",
|
"events_common.issues",
|
||||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||||
|
|
@ -195,7 +192,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||||
"events_common.customs",
|
"events_common.customs",
|
||||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||||
|
|
@ -203,7 +200,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webClickEvents, err = NewBulk(conn.c,
|
||||||
"events.clicks",
|
"events.clicks",
|
||||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
|
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
|
||||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
|
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
|
||||||
|
|
@ -211,7 +208,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webClickXYEvents, err = NewBulk(conn.c, conn.metrics,
|
conn.webClickXYEvents, err = NewBulk(conn.c,
|
||||||
"events.clicks",
|
"events.clicks",
|
||||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
|
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
|
||||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
|
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
|
||||||
|
|
@ -219,7 +216,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
|
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||||
"events_common.requests",
|
"events_common.requests",
|
||||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
|
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
|
||||||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
|
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
|
||||||
|
|
@ -227,7 +224,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webCanvasNodes, err = NewBulk(conn.c, conn.metrics,
|
conn.webCanvasNodes, err = NewBulk(conn.c,
|
||||||
"events.canvas_recordings",
|
"events.canvas_recordings",
|
||||||
"(session_id, recording_id, timestamp)",
|
"(session_id, recording_id, timestamp)",
|
||||||
"($%d, $%d, $%d)",
|
"($%d, $%d, $%d)",
|
||||||
|
|
@ -235,7 +232,7 @@ func (conn *BulkSet) initBulks() {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
|
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
|
||||||
}
|
}
|
||||||
conn.webTagTriggers, err = NewBulk(conn.c, conn.metrics,
|
conn.webTagTriggers, err = NewBulk(conn.c,
|
||||||
"events.tags",
|
"events.tags",
|
||||||
"(session_id, timestamp, seq_index, tag_id)",
|
"(session_id, timestamp, seq_index, tag_id)",
|
||||||
"($%d, $%d, $%d, $%d)",
|
"($%d, $%d, $%d, $%d)",
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package postgres
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
|
|
||||||
"openreplay/backend/pkg/db/postgres/batch"
|
"openreplay/backend/pkg/db/postgres/batch"
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
|
|
@ -23,7 +22,7 @@ type Conn struct {
|
||||||
chConn CH
|
chConn CH
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database) *Conn {
|
func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
|
||||||
if pool == nil {
|
if pool == nil {
|
||||||
log.Fatal(context.Background(), "pg pool is empty")
|
log.Fatal(context.Background(), "pg pool is empty")
|
||||||
}
|
}
|
||||||
|
|
@ -31,8 +30,8 @@ func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database
|
||||||
log: log,
|
log: log,
|
||||||
Pool: pool,
|
Pool: pool,
|
||||||
chConn: ch,
|
chConn: ch,
|
||||||
bulks: NewBulkSet(log, pool, metrics),
|
bulks: NewBulkSet(log, pool),
|
||||||
batches: batch.NewBatchSet(log, pool, metrics),
|
batches: batch.NewBatchSet(log, pool),
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -181,6 +181,11 @@ func (conn *Conn) InsertWebErrorEvent(sess *sessions.Session, e *types.ErrorEven
|
||||||
if err := conn.bulks.Get("webErrorEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil {
|
if err := conn.bulks.Get("webErrorEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil {
|
||||||
conn.log.Error(sessCtx, "insert web error event err: %s", err)
|
conn.log.Error(sessCtx, "insert web error event err: %s", err)
|
||||||
}
|
}
|
||||||
|
for key, value := range e.Tags {
|
||||||
|
if err := conn.bulks.Get("webErrorTags").Append(sess.SessionID, truncSqIdx(e.MessageID), errorID, key, value); err != nil {
|
||||||
|
conn.log.Error(sessCtx, "insert web error token err: %s", err)
|
||||||
|
}
|
||||||
|
}
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -23,12 +23,58 @@ type Pool interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type poolImpl struct {
|
type poolImpl struct {
|
||||||
url string
|
url string
|
||||||
conn *pgxpool.Pool
|
conn *pgxpool.Pool
|
||||||
metrics database.Database
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(metrics database.Database, url string) (Pool, error) {
|
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||||
|
start := time.Now()
|
||||||
|
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||||
|
method, table := methodName(sql)
|
||||||
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
|
database.IncreaseTotalRequests(method, table)
|
||||||
|
return res, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
||||||
|
start := time.Now()
|
||||||
|
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
||||||
|
method, table := methodName(sql)
|
||||||
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
|
database.IncreaseTotalRequests(method, table)
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
||||||
|
start := time.Now()
|
||||||
|
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
||||||
|
method, table := methodName(sql)
|
||||||
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
|
database.IncreaseTotalRequests(method, table)
|
||||||
|
return err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||||
|
start := time.Now()
|
||||||
|
res := p.conn.SendBatch(getTimeoutContext(), b)
|
||||||
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
||||||
|
database.IncreaseTotalRequests("sendBatch", "")
|
||||||
|
return res
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *poolImpl) Begin() (*Tx, error) {
|
||||||
|
start := time.Now()
|
||||||
|
tx, err := p.conn.Begin(context.Background())
|
||||||
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||||
|
database.IncreaseTotalRequests("begin", "")
|
||||||
|
return &Tx{tx}, err
|
||||||
|
}
|
||||||
|
|
||||||
|
func (p *poolImpl) Close() {
|
||||||
|
p.conn.Close()
|
||||||
|
}
|
||||||
|
|
||||||
|
func New(url string) (Pool, error) {
|
||||||
if url == "" {
|
if url == "" {
|
||||||
return nil, errors.New("pg connection url is empty")
|
return nil, errors.New("pg connection url is empty")
|
||||||
}
|
}
|
||||||
|
|
@ -37,73 +83,24 @@ func New(metrics database.Database, url string) (Pool, error) {
|
||||||
return nil, fmt.Errorf("pgxpool.Connect error: %v", err)
|
return nil, fmt.Errorf("pgxpool.Connect error: %v", err)
|
||||||
}
|
}
|
||||||
res := &poolImpl{
|
res := &poolImpl{
|
||||||
url: url,
|
url: url,
|
||||||
conn: conn,
|
conn: conn,
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
return res, nil
|
return res, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
|
||||||
start := time.Now()
|
|
||||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
|
||||||
method, table := methodName(sql)
|
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
|
||||||
p.metrics.IncreaseTotalRequests(method, table)
|
|
||||||
return res, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
|
||||||
start := time.Now()
|
|
||||||
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
|
||||||
method, table := methodName(sql)
|
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
|
||||||
p.metrics.IncreaseTotalRequests(method, table)
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
|
||||||
start := time.Now()
|
|
||||||
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
|
||||||
method, table := methodName(sql)
|
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
|
||||||
p.metrics.IncreaseTotalRequests(method, table)
|
|
||||||
return err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
|
||||||
start := time.Now()
|
|
||||||
res := p.conn.SendBatch(getTimeoutContext(), b)
|
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
|
||||||
p.metrics.IncreaseTotalRequests("sendBatch", "")
|
|
||||||
return res
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *poolImpl) Begin() (*Tx, error) {
|
|
||||||
start := time.Now()
|
|
||||||
tx, err := p.conn.Begin(context.Background())
|
|
||||||
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
|
||||||
p.metrics.IncreaseTotalRequests("begin", "")
|
|
||||||
return &Tx{tx, p.metrics}, err
|
|
||||||
}
|
|
||||||
|
|
||||||
func (p *poolImpl) Close() {
|
|
||||||
p.conn.Close()
|
|
||||||
}
|
|
||||||
|
|
||||||
// TX - start
|
// TX - start
|
||||||
|
|
||||||
type Tx struct {
|
type Tx struct {
|
||||||
pgx.Tx
|
pgx.Tx
|
||||||
metrics database.Database
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
func (tx *Tx) TxExec(sql string, args ...interface{}) error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
_, err := tx.Exec(context.Background(), sql, args...)
|
_, err := tx.Exec(context.Background(), sql, args...)
|
||||||
method, table := methodName(sql)
|
method, table := methodName(sql)
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
tx.metrics.IncreaseTotalRequests(method, table)
|
database.IncreaseTotalRequests(method, table)
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -111,24 +108,24 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
res := tx.QueryRow(context.Background(), sql, args...)
|
res := tx.QueryRow(context.Background(), sql, args...)
|
||||||
method, table := methodName(sql)
|
method, table := methodName(sql)
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||||
tx.metrics.IncreaseTotalRequests(method, table)
|
database.IncreaseTotalRequests(method, table)
|
||||||
return res
|
return res
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) TxRollback() error {
|
func (tx *Tx) TxRollback() error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := tx.Rollback(context.Background())
|
err := tx.Rollback(context.Background())
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||||
tx.metrics.IncreaseTotalRequests("rollback", "")
|
database.IncreaseTotalRequests("rollback", "")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
func (tx *Tx) TxCommit() error {
|
func (tx *Tx) TxCommit() error {
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
err := tx.Commit(context.Background())
|
err := tx.Commit(context.Background())
|
||||||
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||||
tx.metrics.IncreaseTotalRequests("commit", "")
|
database.IncreaseTotalRequests("commit", "")
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -61,6 +61,7 @@ func parseTags(tagsJSON string) (tags map[string]*string, err error) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
||||||
|
meta, err := parseTags(m.Metadata)
|
||||||
return &ErrorEvent{
|
return &ErrorEvent{
|
||||||
MessageID: m.Meta().Index,
|
MessageID: m.Meta().Index,
|
||||||
Timestamp: m.Meta().Timestamp,
|
Timestamp: m.Meta().Timestamp,
|
||||||
|
|
@ -68,8 +69,9 @@ func WrapJSException(m *JSException) (*ErrorEvent, error) {
|
||||||
Name: m.Name,
|
Name: m.Name,
|
||||||
Message: m.Message,
|
Message: m.Message,
|
||||||
Payload: m.Payload,
|
Payload: m.Payload,
|
||||||
|
Tags: meta,
|
||||||
OriginType: m.TypeID(),
|
OriginType: m.TypeID(),
|
||||||
}, nil
|
}, err
|
||||||
}
|
}
|
||||||
|
|
||||||
func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
|
func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package integrations
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"openreplay/backend/pkg/integrations/service"
|
"openreplay/backend/pkg/integrations/service"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"openreplay/backend/pkg/metrics/web"
|
"openreplay/backend/pkg/metrics/web"
|
||||||
"openreplay/backend/pkg/server/tracer"
|
"openreplay/backend/pkg/server/tracer"
|
||||||
"time"
|
"time"
|
||||||
|
|
@ -24,7 +23,7 @@ type ServiceBuilder struct {
|
||||||
IntegrationsAPI api.Handlers
|
IntegrationsAPI api.Handlers
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServiceBuilder, error) {
|
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, pgconn pool.Pool) (*ServiceBuilder, error) {
|
||||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -38,7 +37,7 @@ func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics w
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
auditrail, err := tracer.NewTracer(log, pgconn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -8,13 +8,11 @@ import (
|
||||||
type sinkIteratorImpl struct {
|
type sinkIteratorImpl struct {
|
||||||
coreIterator MessageIterator
|
coreIterator MessageIterator
|
||||||
handler MessageHandler
|
handler MessageHandler
|
||||||
metrics sink.Sink
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool, metrics sink.Sink) MessageIterator {
|
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||||
iter := &sinkIteratorImpl{
|
iter := &sinkIteratorImpl{
|
||||||
handler: messageHandler,
|
handler: messageHandler,
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
|
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
|
||||||
return iter
|
return iter
|
||||||
|
|
@ -25,8 +23,8 @@ func (i *sinkIteratorImpl) handle(message Message) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||||
i.metrics.RecordBatchSize(float64(len(batchData)))
|
sink.RecordBatchSize(float64(len(batchData)))
|
||||||
i.metrics.IncreaseTotalBatches()
|
sink.IncreaseTotalBatches()
|
||||||
// Call core iterator
|
// Call core iterator
|
||||||
i.coreIterator.Iterate(batchData, batchInfo)
|
i.coreIterator.Iterate(batchData, batchInfo)
|
||||||
// Send batch end signal
|
// Send batch end signal
|
||||||
|
|
|
||||||
22
backend/pkg/metrics/analytics/analytics.go
Normal file
22
backend/pkg/metrics/analytics/analytics.go
Normal file
|
|
@ -0,0 +1,22 @@
|
||||||
|
package analytics
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var cardCreated = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "card",
|
||||||
|
Name: "created",
|
||||||
|
Help: "Histogram for tracking card creation",
|
||||||
|
Buckets: common.DefaultBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
cardCreated,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -2,22 +2,71 @@ package assets
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Assets interface {
|
var assetsProcessedSessions = prometheus.NewCounter(
|
||||||
IncreaseProcessesSessions()
|
prometheus.CounterOpts{
|
||||||
IncreaseSavedSessions()
|
Namespace: "assets",
|
||||||
RecordDownloadDuration(durMillis float64, code int)
|
Name: "processed_total",
|
||||||
RecordUploadDuration(durMillis float64, isFailed bool)
|
Help: "A counter displaying the total count of processed assets.",
|
||||||
List() []prometheus.Collector
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseProcessesSessions() {
|
||||||
|
assetsProcessedSessions.Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
type assetsImpl struct{}
|
var assetsSavedSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "assets",
|
||||||
|
Name: "saved_total",
|
||||||
|
Help: "A counter displaying the total number of cached assets.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Assets { return &assetsImpl{} }
|
func IncreaseSavedSessions() {
|
||||||
|
assetsSavedSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
func (a *assetsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var assetsDownloadDuration = prometheus.NewHistogramVec(
|
||||||
func (a *assetsImpl) IncreaseProcessesSessions() {}
|
prometheus.HistogramOpts{
|
||||||
func (a *assetsImpl) IncreaseSavedSessions() {}
|
Namespace: "assets",
|
||||||
func (a *assetsImpl) RecordDownloadDuration(durMillis float64, code int) {}
|
Name: "download_duration_seconds",
|
||||||
func (a *assetsImpl) RecordUploadDuration(durMillis float64, isFailed bool) {}
|
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"response_code"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordDownloadDuration(durMillis float64, code int) {
|
||||||
|
assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var assetsUploadDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "assets",
|
||||||
|
Name: "upload_s3_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"failed"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordUploadDuration(durMillis float64, isFailed bool) {
|
||||||
|
failed := "false"
|
||||||
|
if isFailed {
|
||||||
|
failed = "true"
|
||||||
|
}
|
||||||
|
assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
assetsProcessedSessions,
|
||||||
|
assetsSavedSessions,
|
||||||
|
assetsDownloadDuration,
|
||||||
|
assetsUploadDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package canvas
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Canvas interface {
|
type Canvas interface {
|
||||||
|
|
@ -17,17 +18,175 @@ type Canvas interface {
|
||||||
List() []prometheus.Collector
|
List() []prometheus.Collector
|
||||||
}
|
}
|
||||||
|
|
||||||
type canvasImpl struct{}
|
type canvasImpl struct {
|
||||||
|
canvasesImageSize prometheus.Histogram
|
||||||
|
canvasesTotalSavedImages prometheus.Counter
|
||||||
|
canvasesImagesPerCanvas prometheus.Histogram
|
||||||
|
canvasesCanvasesPerSession prometheus.Histogram
|
||||||
|
canvasesPreparingDuration prometheus.Histogram
|
||||||
|
canvasesTotalCreatedArchives prometheus.Counter
|
||||||
|
canvasesArchivingDuration prometheus.Histogram
|
||||||
|
canvasesArchiveSize prometheus.Histogram
|
||||||
|
canvasesUploadingDuration prometheus.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
func New(serviceName string) Canvas { return &canvasImpl{} }
|
func New(serviceName string) Canvas {
|
||||||
|
return &canvasImpl{
|
||||||
|
canvasesImageSize: newImageSizeMetric(serviceName),
|
||||||
|
canvasesTotalSavedImages: newTotalSavedImages(serviceName),
|
||||||
|
canvasesImagesPerCanvas: newImagesPerCanvas(serviceName),
|
||||||
|
canvasesCanvasesPerSession: newCanvasesPerSession(serviceName),
|
||||||
|
canvasesPreparingDuration: newPreparingDuration(serviceName),
|
||||||
|
canvasesTotalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||||
|
canvasesArchivingDuration: newArchivingDuration(serviceName),
|
||||||
|
canvasesArchiveSize: newArchiveSize(serviceName),
|
||||||
|
canvasesUploadingDuration: newUploadingDuration(serviceName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (c *canvasImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
func (c *canvasImpl) List() []prometheus.Collector {
|
||||||
func (c *canvasImpl) RecordCanvasImageSize(size float64) {}
|
return []prometheus.Collector{
|
||||||
func (c *canvasImpl) IncreaseTotalSavedImages() {}
|
c.canvasesImageSize,
|
||||||
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {}
|
c.canvasesTotalSavedImages,
|
||||||
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {}
|
c.canvasesImagesPerCanvas,
|
||||||
func (c *canvasImpl) RecordPreparingDuration(duration float64) {}
|
c.canvasesCanvasesPerSession,
|
||||||
func (c *canvasImpl) IncreaseTotalCreatedArchives() {}
|
c.canvasesPreparingDuration,
|
||||||
func (c *canvasImpl) RecordArchivingDuration(duration float64) {}
|
c.canvasesTotalCreatedArchives,
|
||||||
func (c *canvasImpl) RecordArchiveSize(size float64) {}
|
c.canvasesArchivingDuration,
|
||||||
func (c *canvasImpl) RecordUploadingDuration(duration float64) {}
|
c.canvasesArchiveSize,
|
||||||
|
c.canvasesUploadingDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newImageSizeMetric(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "image_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each canvas image in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordCanvasImageSize(size float64) {
|
||||||
|
c.canvasesImageSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "total_saved_images",
|
||||||
|
Help: "A counter displaying the total number of saved images.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) IncreaseTotalSavedImages() {
|
||||||
|
c.canvasesTotalSavedImages.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newImagesPerCanvas(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "images_per_canvas",
|
||||||
|
Help: "A histogram displaying the number of images per canvas.",
|
||||||
|
Buckets: common.DefaultBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordImagesPerCanvas(number float64) {
|
||||||
|
c.canvasesImagesPerCanvas.Observe(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newCanvasesPerSession(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "canvases_per_session",
|
||||||
|
Help: "A histogram displaying the number of canvases per session.",
|
||||||
|
Buckets: common.DefaultBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordCanvasesPerSession(number float64) {
|
||||||
|
c.canvasesCanvasesPerSession.Observe(number)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newPreparingDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "preparing_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of preparing the list of canvases for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordPreparingDuration(duration float64) {
|
||||||
|
c.canvasesPreparingDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "total_created_archives",
|
||||||
|
Help: "A counter displaying the total number of created canvas archives.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) IncreaseTotalCreatedArchives() {
|
||||||
|
c.canvasesTotalCreatedArchives.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "archiving_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of archiving for each canvas in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordArchivingDuration(duration float64) {
|
||||||
|
c.canvasesArchivingDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "archive_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each canvas archive in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordArchiveSize(size float64) {
|
||||||
|
c.canvasesArchiveSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "uploading_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading for each canvas in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (c *canvasImpl) RecordUploadingDuration(duration float64) {
|
||||||
|
c.canvasesUploadingDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,32 +2,141 @@ package database
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Database interface {
|
var dbBatchElements = prometheus.NewHistogram(
|
||||||
RecordBatchElements(number float64)
|
prometheus.HistogramOpts{
|
||||||
RecordBatchInsertDuration(durMillis float64)
|
Namespace: "db",
|
||||||
RecordBulkSize(size float64, db, table string)
|
Name: "batch_size_elements",
|
||||||
RecordBulkElements(size float64, db, table string)
|
Help: "A histogram displaying the number of SQL commands in each batch.",
|
||||||
RecordBulkInsertDuration(durMillis float64, db, table string)
|
Buckets: common.DefaultBuckets,
|
||||||
RecordRequestDuration(durMillis float64, method, table string)
|
},
|
||||||
IncreaseTotalRequests(method, table string)
|
)
|
||||||
IncreaseRedisRequests(method, table string)
|
|
||||||
RecordRedisRequestDuration(durMillis float64, method, table string)
|
func RecordBatchElements(number float64) {
|
||||||
List() []prometheus.Collector
|
dbBatchElements.Observe(number)
|
||||||
}
|
}
|
||||||
|
|
||||||
type databaseImpl struct{}
|
var dbBatchInsertDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "db",
|
||||||
|
Name: "batch_insert_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of batch inserts in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Database { return &databaseImpl{} }
|
func RecordBatchInsertDuration(durMillis float64) {
|
||||||
|
dbBatchInsertDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
func (d *databaseImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var dbBulkSize = prometheus.NewHistogramVec(
|
||||||
func (d *databaseImpl) RecordBatchElements(number float64) {}
|
prometheus.HistogramOpts{
|
||||||
func (d *databaseImpl) RecordBatchInsertDuration(durMillis float64) {}
|
Namespace: "db",
|
||||||
func (d *databaseImpl) RecordBulkSize(size float64, db, table string) {}
|
Name: "bulk_size_bytes",
|
||||||
func (d *databaseImpl) RecordBulkElements(size float64, db, table string) {}
|
Help: "A histogram displaying the bulk size in bytes.",
|
||||||
func (d *databaseImpl) RecordBulkInsertDuration(durMillis float64, db, table string) {}
|
Buckets: common.DefaultSizeBuckets,
|
||||||
func (d *databaseImpl) RecordRequestDuration(durMillis float64, method, table string) {}
|
},
|
||||||
func (d *databaseImpl) IncreaseTotalRequests(method, table string) {}
|
[]string{"db", "table"},
|
||||||
func (d *databaseImpl) IncreaseRedisRequests(method, table string) {}
|
)
|
||||||
func (d *databaseImpl) RecordRedisRequestDuration(durMillis float64, method, table string) {}
|
|
||||||
|
func RecordBulkSize(size float64, db, table string) {
|
||||||
|
dbBulkSize.WithLabelValues(db, table).Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbBulkElements = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "db",
|
||||||
|
Name: "bulk_size_elements",
|
||||||
|
Help: "A histogram displaying the size of data set in each bulk.",
|
||||||
|
Buckets: common.DefaultBuckets,
|
||||||
|
},
|
||||||
|
[]string{"db", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordBulkElements(size float64, db, table string) {
|
||||||
|
dbBulkElements.WithLabelValues(db, table).Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbBulkInsertDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "db",
|
||||||
|
Name: "bulk_insert_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of bulk inserts in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"db", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordBulkInsertDuration(durMillis float64, db, table string) {
|
||||||
|
dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbRequestDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "db",
|
||||||
|
Name: "request_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of each sql request in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"method", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordRequestDuration(durMillis float64, method, table string) {
|
||||||
|
dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var dbTotalRequests = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "db",
|
||||||
|
Name: "requests_total",
|
||||||
|
Help: "A counter showing the total number of all SQL requests.",
|
||||||
|
},
|
||||||
|
[]string{"method", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalRequests(method, table string) {
|
||||||
|
dbTotalRequests.WithLabelValues(method, table).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var cacheRedisRequests = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "cache",
|
||||||
|
Name: "redis_requests_total",
|
||||||
|
Help: "A counter showing the total number of all Redis requests.",
|
||||||
|
},
|
||||||
|
[]string{"method", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseRedisRequests(method, table string) {
|
||||||
|
cacheRedisRequests.WithLabelValues(method, table).Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var cacheRedisRequestDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "cache",
|
||||||
|
Name: "redis_request_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of each Redis request in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"method", "table"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordRedisRequestDuration(durMillis float64, method, table string) {
|
||||||
|
cacheRedisRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
dbBatchElements,
|
||||||
|
dbBatchInsertDuration,
|
||||||
|
dbBulkSize,
|
||||||
|
dbBulkElements,
|
||||||
|
dbBulkInsertDuration,
|
||||||
|
dbRequestDuration,
|
||||||
|
dbTotalRequests,
|
||||||
|
cacheRedisRequests,
|
||||||
|
cacheRedisRequestDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,20 +2,50 @@ package ender
|
||||||
|
|
||||||
import "github.com/prometheus/client_golang/prometheus"
|
import "github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
type Ender interface {
|
var enderActiveSessions = prometheus.NewGauge(
|
||||||
IncreaseActiveSessions()
|
prometheus.GaugeOpts{
|
||||||
DecreaseActiveSessions()
|
Namespace: "ender",
|
||||||
IncreaseClosedSessions()
|
Name: "sessions_active",
|
||||||
IncreaseTotalSessions()
|
Help: "A gauge displaying the number of active (live) sessions.",
|
||||||
List() []prometheus.Collector
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseActiveSessions() {
|
||||||
|
enderActiveSessions.Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
type enderImpl struct{}
|
func DecreaseActiveSessions() {
|
||||||
|
enderActiveSessions.Dec()
|
||||||
|
}
|
||||||
|
|
||||||
func New(serviceName string) Ender { return &enderImpl{} }
|
var enderClosedSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "ender",
|
||||||
|
Name: "sessions_closed",
|
||||||
|
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func (e *enderImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
func IncreaseClosedSessions() {
|
||||||
func (e *enderImpl) IncreaseActiveSessions() {}
|
enderClosedSessions.Inc()
|
||||||
func (e *enderImpl) DecreaseActiveSessions() {}
|
}
|
||||||
func (e *enderImpl) IncreaseClosedSessions() {}
|
|
||||||
func (e *enderImpl) IncreaseTotalSessions() {}
|
var enderTotalSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "ender",
|
||||||
|
Name: "sessions_total",
|
||||||
|
Help: "A counter displaying the number of all processed sessions.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalSessions() {
|
||||||
|
enderTotalSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
enderActiveSessions,
|
||||||
|
enderClosedSessions,
|
||||||
|
enderTotalSessions,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,16 +2,65 @@ package heuristics
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
|
"strconv"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Heuristics interface {
|
var heuristicsTotalEvents = prometheus.NewCounterVec(
|
||||||
IncreaseTotalEvents(eventType string)
|
prometheus.CounterOpts{
|
||||||
List() []prometheus.Collector
|
Namespace: "heuristics",
|
||||||
|
Name: "events_total",
|
||||||
|
Help: "A counter displaying the number of all processed events",
|
||||||
|
},
|
||||||
|
[]string{"type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalEvents(eventType string) {
|
||||||
|
heuristicsTotalEvents.WithLabelValues(eventType).Inc()
|
||||||
}
|
}
|
||||||
|
|
||||||
type heuristicsImpl struct{}
|
var heuristicsRequestSize = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "heuristics",
|
||||||
|
Name: "request_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
[]string{"url", "response_code"},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Heuristics { return &heuristicsImpl{} }
|
func RecordRequestSize(size float64, url string, code int) {
|
||||||
|
heuristicsRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
func (h *heuristicsImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var heuristicsRequestDuration = prometheus.NewHistogramVec(
|
||||||
func (h *heuristicsImpl) IncreaseTotalEvents(eventType string) {}
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "heuristics",
|
||||||
|
Name: "request_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"url", "response_code"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordRequestDuration(durMillis float64, url string, code int) {
|
||||||
|
heuristicsRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var heuristicsTotalRequests = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "heuristics",
|
||||||
|
Name: "requests_total",
|
||||||
|
Help: "A counter displaying the number all HTTP requests.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalRequests() {
|
||||||
|
heuristicsTotalRequests.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
heuristicsTotalEvents,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,6 +2,7 @@ package images
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Images interface {
|
type Images interface {
|
||||||
|
|
@ -17,17 +18,174 @@ type Images interface {
|
||||||
List() []prometheus.Collector
|
List() []prometheus.Collector
|
||||||
}
|
}
|
||||||
|
|
||||||
type imagesImpl struct{}
|
type imagesImpl struct {
|
||||||
|
originalArchiveSize prometheus.Histogram
|
||||||
|
originalArchiveExtractionDuration prometheus.Histogram
|
||||||
|
totalSavedArchives prometheus.Counter
|
||||||
|
savingImageDuration prometheus.Histogram
|
||||||
|
totalSavedImages prometheus.Counter
|
||||||
|
totalCreatedArchives prometheus.Counter
|
||||||
|
archivingDuration prometheus.Histogram
|
||||||
|
archiveSize prometheus.Histogram
|
||||||
|
uploadingDuration prometheus.Histogram
|
||||||
|
}
|
||||||
|
|
||||||
func New(serviceName string) Images { return &imagesImpl{} }
|
func New(serviceName string) Images {
|
||||||
|
return &imagesImpl{
|
||||||
|
originalArchiveSize: newOriginalArchiveSize(serviceName),
|
||||||
|
originalArchiveExtractionDuration: newOriginalArchiveExtractionDuration(serviceName),
|
||||||
|
totalSavedArchives: newTotalSavedArchives(serviceName),
|
||||||
|
savingImageDuration: newSavingImageDuration(serviceName),
|
||||||
|
totalSavedImages: newTotalSavedImages(serviceName),
|
||||||
|
totalCreatedArchives: newTotalCreatedArchives(serviceName),
|
||||||
|
archivingDuration: newArchivingDuration(serviceName),
|
||||||
|
archiveSize: newArchiveSize(serviceName),
|
||||||
|
uploadingDuration: newUploadingDuration(serviceName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (i *imagesImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
func (i *imagesImpl) List() []prometheus.Collector {
|
||||||
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {}
|
return []prometheus.Collector{
|
||||||
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {}
|
i.originalArchiveSize,
|
||||||
func (i *imagesImpl) IncreaseTotalSavedArchives() {}
|
i.originalArchiveExtractionDuration,
|
||||||
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {}
|
i.totalSavedArchives,
|
||||||
func (i *imagesImpl) IncreaseTotalSavedImages() {}
|
i.savingImageDuration,
|
||||||
func (i *imagesImpl) IncreaseTotalCreatedArchives() {}
|
i.totalSavedImages,
|
||||||
func (i *imagesImpl) RecordArchivingDuration(duration float64) {}
|
i.totalCreatedArchives,
|
||||||
func (i *imagesImpl) RecordArchiveSize(size float64) {}
|
i.archivingDuration,
|
||||||
func (i *imagesImpl) RecordUploadingDuration(duration float64) {}
|
i.archiveSize,
|
||||||
|
i.uploadingDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOriginalArchiveSize(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "original_archive_size_bytes",
|
||||||
|
Help: "A histogram displaying the original archive size in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordOriginalArchiveSize(size float64) {
|
||||||
|
i.archiveSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newOriginalArchiveExtractionDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "original_archive_extraction_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of extracting the original archive.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordOriginalArchiveExtractionDuration(duration float64) {
|
||||||
|
i.originalArchiveExtractionDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalSavedArchives(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "total_saved_archives",
|
||||||
|
Help: "A counter displaying the total number of saved original archives.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) IncreaseTotalSavedArchives() {
|
||||||
|
i.totalSavedArchives.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newSavingImageDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "saving_image_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of saving each image in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordSavingImageDuration(duration float64) {
|
||||||
|
i.savingImageDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalSavedImages(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "total_saved_images",
|
||||||
|
Help: "A counter displaying the total number of saved images.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) IncreaseTotalSavedImages() {
|
||||||
|
i.totalSavedImages.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalCreatedArchives(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "total_created_archives",
|
||||||
|
Help: "A counter displaying the total number of created archives.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) IncreaseTotalCreatedArchives() {
|
||||||
|
i.totalCreatedArchives.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func newArchivingDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "archiving_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of archiving each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordArchivingDuration(duration float64) {
|
||||||
|
i.archivingDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newArchiveSize(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "archive_size_bytes",
|
||||||
|
Help: "A histogram displaying the session's archive size in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordArchiveSize(size float64) {
|
||||||
|
i.archiveSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newUploadingDuration(serviceName string) prometheus.Histogram {
|
||||||
|
return prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "uploading_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading each session's archive to S3 in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (i *imagesImpl) RecordUploadingDuration(duration float64) {
|
||||||
|
i.uploadingDuration.Observe(duration)
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,40 +2,184 @@ package sink
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Sink interface {
|
var sinkMessageSize = prometheus.NewHistogram(
|
||||||
RecordMessageSize(size float64)
|
prometheus.HistogramOpts{
|
||||||
IncreaseWrittenMessages()
|
Namespace: "sink",
|
||||||
IncreaseTotalMessages()
|
Name: "message_size_bytes",
|
||||||
RecordBatchSize(size float64)
|
Help: "A histogram displaying the size of each message in bytes.",
|
||||||
IncreaseTotalBatches()
|
Buckets: common.DefaultSizeBuckets,
|
||||||
RecordWrittenBytes(size float64, fileType string)
|
},
|
||||||
IncreaseTotalWrittenBytes(size float64, fileType string)
|
)
|
||||||
IncreaseCachedAssets()
|
|
||||||
DecreaseCachedAssets()
|
func RecordMessageSize(size float64) {
|
||||||
IncreaseSkippedAssets()
|
sinkMessageSize.Observe(size)
|
||||||
IncreaseTotalAssets()
|
|
||||||
RecordAssetSize(size float64)
|
|
||||||
RecordProcessAssetDuration(durMillis float64)
|
|
||||||
List() []prometheus.Collector
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type sinkImpl struct{}
|
var sinkWrittenMessages = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "messages_written",
|
||||||
|
Help: "A counter displaying the total number of all written messages.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Sink { return &sinkImpl{} }
|
func IncreaseWrittenMessages() {
|
||||||
|
sinkWrittenMessages.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *sinkImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var sinkTotalMessages = prometheus.NewCounter(
|
||||||
func (s *sinkImpl) RecordMessageSize(size float64) {}
|
prometheus.CounterOpts{
|
||||||
func (s *sinkImpl) IncreaseWrittenMessages() {}
|
Namespace: "sink",
|
||||||
func (s *sinkImpl) IncreaseTotalMessages() {}
|
Name: "messages_total",
|
||||||
func (s *sinkImpl) RecordBatchSize(size float64) {}
|
Help: "A counter displaying the total number of all processed messages.",
|
||||||
func (s *sinkImpl) IncreaseTotalBatches() {}
|
},
|
||||||
func (s *sinkImpl) RecordWrittenBytes(size float64, fileType string) {}
|
)
|
||||||
func (s *sinkImpl) IncreaseTotalWrittenBytes(size float64, fileType string) {}
|
|
||||||
func (s *sinkImpl) IncreaseCachedAssets() {}
|
func IncreaseTotalMessages() {
|
||||||
func (s *sinkImpl) DecreaseCachedAssets() {}
|
sinkTotalMessages.Inc()
|
||||||
func (s *sinkImpl) IncreaseSkippedAssets() {}
|
}
|
||||||
func (s *sinkImpl) IncreaseTotalAssets() {}
|
|
||||||
func (s *sinkImpl) RecordAssetSize(size float64) {}
|
var sinkBatchSize = prometheus.NewHistogram(
|
||||||
func (s *sinkImpl) RecordProcessAssetDuration(durMillis float64) {}
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "batch_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each batch in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordBatchSize(size float64) {
|
||||||
|
sinkBatchSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkTotalBatches = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "batches_total",
|
||||||
|
Help: "A counter displaying the total number of all written batches.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalBatches() {
|
||||||
|
sinkTotalBatches.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkWrittenBytes = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "written_bytes",
|
||||||
|
Help: "A histogram displaying the size of buffer in bytes written to session file.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordWrittenBytes(size float64, fileType string) {
|
||||||
|
if size == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sinkWrittenBytes.WithLabelValues(fileType).Observe(size)
|
||||||
|
IncreaseTotalWrittenBytes(size, fileType)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkTotalWrittenBytes = prometheus.NewCounterVec(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "written_bytes_total",
|
||||||
|
Help: "A counter displaying the total number of bytes written to all session files.",
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalWrittenBytes(size float64, fileType string) {
|
||||||
|
if size == 0 {
|
||||||
|
return
|
||||||
|
}
|
||||||
|
sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkCachedAssets = prometheus.NewGauge(
|
||||||
|
prometheus.GaugeOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "assets_cached",
|
||||||
|
Help: "A gauge displaying the current number of cached assets.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseCachedAssets() {
|
||||||
|
sinkCachedAssets.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
func DecreaseCachedAssets() {
|
||||||
|
sinkCachedAssets.Dec()
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkSkippedAssets = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "assets_skipped",
|
||||||
|
Help: "A counter displaying the total number of all skipped assets.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseSkippedAssets() {
|
||||||
|
sinkSkippedAssets.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkTotalAssets = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "assets_total",
|
||||||
|
Help: "A counter displaying the total number of all processed assets.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseTotalAssets() {
|
||||||
|
sinkTotalAssets.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkAssetSize = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "asset_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each asset in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordAssetSize(size float64) {
|
||||||
|
sinkAssetSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
var sinkProcessAssetDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "sink",
|
||||||
|
Name: "asset_process_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of processing for each asset in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordProcessAssetDuration(durMillis float64) {
|
||||||
|
sinkProcessAssetDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
sinkMessageSize,
|
||||||
|
sinkWrittenMessages,
|
||||||
|
sinkTotalMessages,
|
||||||
|
sinkBatchSize,
|
||||||
|
sinkTotalBatches,
|
||||||
|
sinkWrittenBytes,
|
||||||
|
sinkTotalWrittenBytes,
|
||||||
|
sinkCachedAssets,
|
||||||
|
sinkSkippedAssets,
|
||||||
|
sinkTotalAssets,
|
||||||
|
sinkAssetSize,
|
||||||
|
sinkProcessAssetDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,34 +2,148 @@ package spot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Spot interface {
|
var spotOriginalVideoSize = prometheus.NewHistogram(
|
||||||
RecordOriginalVideoSize(size float64)
|
prometheus.HistogramOpts{
|
||||||
RecordCroppedVideoSize(size float64)
|
Namespace: "spot",
|
||||||
IncreaseVideosTotal()
|
Name: "original_video_size_bytes",
|
||||||
IncreaseVideosCropped()
|
Help: "A histogram displaying the size of each original video in bytes.",
|
||||||
IncreaseVideosTranscoded()
|
Buckets: common.VideoSizeBuckets,
|
||||||
RecordOriginalVideoDownloadDuration(durMillis float64)
|
},
|
||||||
RecordCroppingDuration(durMillis float64)
|
)
|
||||||
RecordCroppedVideoUploadDuration(durMillis float64)
|
|
||||||
RecordTranscodingDuration(durMillis float64)
|
func RecordOriginalVideoSize(size float64) {
|
||||||
RecordTranscodedVideoUploadDuration(durMillis float64)
|
spotOriginalVideoSize.Observe(size)
|
||||||
List() []prometheus.Collector
|
|
||||||
}
|
}
|
||||||
|
|
||||||
type spotImpl struct{}
|
var spotCroppedVideoSize = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "cropped_video_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each cropped video in bytes.",
|
||||||
|
Buckets: common.VideoSizeBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Spot { return &spotImpl{} }
|
func RecordCroppedVideoSize(size float64) {
|
||||||
|
spotCroppedVideoSize.Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
func (s *spotImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var spotVideosTotal = prometheus.NewCounter(
|
||||||
func (s *spotImpl) RecordOriginalVideoSize(size float64) {}
|
prometheus.CounterOpts{
|
||||||
func (s *spotImpl) RecordCroppedVideoSize(size float64) {}
|
Namespace: "spot",
|
||||||
func (s *spotImpl) IncreaseVideosTotal() {}
|
Name: "videos_total",
|
||||||
func (s *spotImpl) IncreaseVideosCropped() {}
|
Help: "A counter displaying the total number of all processed videos.",
|
||||||
func (s *spotImpl) IncreaseVideosTranscoded() {}
|
},
|
||||||
func (s *spotImpl) RecordOriginalVideoDownloadDuration(durMillis float64) {}
|
)
|
||||||
func (s *spotImpl) RecordCroppingDuration(durMillis float64) {}
|
|
||||||
func (s *spotImpl) RecordCroppedVideoUploadDuration(durMillis float64) {}
|
func IncreaseVideosTotal() {
|
||||||
func (s *spotImpl) RecordTranscodingDuration(durMillis float64) {}
|
spotVideosTotal.Inc()
|
||||||
func (s *spotImpl) RecordTranscodedVideoUploadDuration(durMillis float64) {}
|
}
|
||||||
|
|
||||||
|
var spotVideosCropped = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "videos_cropped_total",
|
||||||
|
Help: "A counter displaying the total number of all cropped videos.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseVideosCropped() {
|
||||||
|
spotVideosCropped.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotVideosTranscoded = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "videos_transcoded_total",
|
||||||
|
Help: "A counter displaying the total number of all transcoded videos.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseVideosTranscoded() {
|
||||||
|
spotVideosTranscoded.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotOriginalVideoDownloadDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "original_video_download_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of downloading each original video in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordOriginalVideoDownloadDuration(durMillis float64) {
|
||||||
|
spotOriginalVideoDownloadDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotCroppingDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "cropping_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of cropping each video in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordCroppingDuration(durMillis float64) {
|
||||||
|
spotCroppingDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotCroppedVideoUploadDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "cropped_video_upload_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading each cropped video in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordCroppedVideoUploadDuration(durMillis float64) {
|
||||||
|
spotCroppedVideoUploadDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotTranscodingDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "transcoding_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of transcoding each video in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordTranscodingDuration(durMillis float64) {
|
||||||
|
spotTranscodingDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var spotTranscodedVideoUploadDuration = prometheus.NewHistogram(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "spot",
|
||||||
|
Name: "transcoded_video_upload_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading each transcoded video in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordTranscodedVideoUploadDuration(durMillis float64) {
|
||||||
|
spotTranscodedVideoUploadDuration.Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
spotOriginalVideoSize,
|
||||||
|
spotCroppedVideoSize,
|
||||||
|
spotVideosTotal,
|
||||||
|
spotVideosCropped,
|
||||||
|
spotVideosTranscoded,
|
||||||
|
spotOriginalVideoDownloadDuration,
|
||||||
|
spotCroppingDuration,
|
||||||
|
spotCroppedVideoUploadDuration,
|
||||||
|
spotTranscodingDuration,
|
||||||
|
spotTranscodedVideoUploadDuration,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -2,34 +2,154 @@ package storage
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Storage interface {
|
var storageSessionSize = prometheus.NewHistogramVec(
|
||||||
RecordSessionSize(fileSize float64, fileType string)
|
prometheus.HistogramOpts{
|
||||||
IncreaseStorageTotalSessions()
|
Namespace: "storage",
|
||||||
RecordSkippedSessionSize(fileSize float64, fileType string)
|
Name: "session_size_bytes",
|
||||||
IncreaseStorageTotalSkippedSessions()
|
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||||
RecordSessionReadDuration(durMillis float64, fileType string)
|
Buckets: common.DefaultSizeBuckets,
|
||||||
RecordSessionSortDuration(durMillis float64, fileType string)
|
},
|
||||||
RecordSessionEncryptionDuration(durMillis float64, fileType string)
|
[]string{"file_type"},
|
||||||
RecordSessionCompressDuration(durMillis float64, fileType string)
|
)
|
||||||
RecordSessionUploadDuration(durMillis float64, fileType string)
|
|
||||||
RecordSessionCompressionRatio(ratio float64, fileType string)
|
func RecordSessionSize(fileSize float64, fileType string) {
|
||||||
List() []prometheus.Collector
|
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||||
}
|
}
|
||||||
|
|
||||||
type storageImpl struct{}
|
var storageTotalSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sessions_total",
|
||||||
|
Help: "A counter displaying the total number of all processed sessions.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
func New(serviceName string) Storage { return &storageImpl{} }
|
func IncreaseStorageTotalSessions() {
|
||||||
|
storageTotalSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
func (s *storageImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||||
func (s *storageImpl) RecordSessionSize(fileSize float64, fileType string) {}
|
prometheus.HistogramOpts{
|
||||||
func (s *storageImpl) IncreaseStorageTotalSessions() {}
|
Namespace: "storage",
|
||||||
func (s *storageImpl) RecordSkippedSessionSize(fileSize float64, fileType string) {}
|
Name: "session_size_bytes",
|
||||||
func (s *storageImpl) IncreaseStorageTotalSkippedSessions() {}
|
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||||
func (s *storageImpl) RecordSessionReadDuration(durMillis float64, fileType string) {}
|
Buckets: common.DefaultSizeBuckets,
|
||||||
func (s *storageImpl) RecordSessionSortDuration(durMillis float64, fileType string) {}
|
},
|
||||||
func (s *storageImpl) RecordSessionEncryptionDuration(durMillis float64, fileType string) {}
|
[]string{"file_type"},
|
||||||
func (s *storageImpl) RecordSessionCompressDuration(durMillis float64, fileType string) {}
|
)
|
||||||
func (s *storageImpl) RecordSessionUploadDuration(durMillis float64, fileType string) {}
|
|
||||||
func (s *storageImpl) RecordSessionCompressionRatio(ratio float64, fileType string) {}
|
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||||
|
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sessions_skipped_total",
|
||||||
|
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseStorageTotalSkippedSessions() {
|
||||||
|
storageTotalSkippedSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "read_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sort_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "encryption_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "compress_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "upload_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "compression_ratio",
|
||||||
|
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||||
|
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
storageSessionSize,
|
||||||
|
storageTotalSessions,
|
||||||
|
storageSessionReadDuration,
|
||||||
|
storageSessionSortDuration,
|
||||||
|
storageSessionEncryptionDuration,
|
||||||
|
storageSessionCompressDuration,
|
||||||
|
storageSessionUploadDuration,
|
||||||
|
storageSessionCompressionRatio,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
|
||||||
155
backend/pkg/metrics/videostorage/metrics.go
Normal file
155
backend/pkg/metrics/videostorage/metrics.go
Normal file
|
|
@ -0,0 +1,155 @@
|
||||||
|
package videostorage
|
||||||
|
|
||||||
|
import (
|
||||||
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
|
)
|
||||||
|
|
||||||
|
var storageSessionSize = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "session_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionSize(fileSize float64, fileType string) {
|
||||||
|
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageTotalSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sessions_total",
|
||||||
|
Help: "A counter displaying the total number of all processed sessions.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseStorageTotalSessions() {
|
||||||
|
storageTotalSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSkippedSessionSize = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "session_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each skipped session file in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSkippedSessionSize(fileSize float64, fileType string) {
|
||||||
|
storageSkippedSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageTotalSkippedSessions = prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sessions_skipped_total",
|
||||||
|
Help: "A counter displaying the total number of all skipped sessions because of the size limits.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
|
||||||
|
func IncreaseStorageTotalSkippedSessions() {
|
||||||
|
storageTotalSkippedSessions.Inc()
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "read_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "sort_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionEncryptionDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "encryption_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionEncryptionDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionEncryptionDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "compress_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "upload_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||||
|
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
var storageSessionCompressionRatio = prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: "storage",
|
||||||
|
Name: "compression_ratio",
|
||||||
|
Help: "A histogram displaying the compression ratio of mob files for each session.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"file_type"},
|
||||||
|
)
|
||||||
|
|
||||||
|
func RecordSessionCompressionRatio(ratio float64, fileType string) {
|
||||||
|
storageSessionCompressionRatio.WithLabelValues(fileType).Observe(ratio)
|
||||||
|
}
|
||||||
|
|
||||||
|
func List() []prometheus.Collector {
|
||||||
|
return []prometheus.Collector{
|
||||||
|
storageSessionSize,
|
||||||
|
storageTotalSessions,
|
||||||
|
storageSessionReadDuration,
|
||||||
|
storageSessionSortDuration,
|
||||||
|
storageSessionEncryptionDuration,
|
||||||
|
storageSessionCompressDuration,
|
||||||
|
storageSessionUploadDuration,
|
||||||
|
storageSessionCompressionRatio,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
@ -1,7 +1,11 @@
|
||||||
package web
|
package web
|
||||||
|
|
||||||
import (
|
import (
|
||||||
|
"strconv"
|
||||||
|
|
||||||
"github.com/prometheus/client_golang/prometheus"
|
"github.com/prometheus/client_golang/prometheus"
|
||||||
|
|
||||||
|
"openreplay/backend/pkg/metrics/common"
|
||||||
)
|
)
|
||||||
|
|
||||||
type Web interface {
|
type Web interface {
|
||||||
|
|
@ -11,11 +15,70 @@ type Web interface {
|
||||||
List() []prometheus.Collector
|
List() []prometheus.Collector
|
||||||
}
|
}
|
||||||
|
|
||||||
type webImpl struct{}
|
type webImpl struct {
|
||||||
|
httpRequestSize *prometheus.HistogramVec
|
||||||
|
httpRequestDuration *prometheus.HistogramVec
|
||||||
|
httpTotalRequests prometheus.Counter
|
||||||
|
}
|
||||||
|
|
||||||
func New(serviceName string) Web { return &webImpl{} }
|
func New(serviceName string) Web {
|
||||||
|
return &webImpl{
|
||||||
|
httpRequestSize: newRequestSizeMetric(serviceName),
|
||||||
|
httpRequestDuration: newRequestDurationMetric(serviceName),
|
||||||
|
httpTotalRequests: newTotalRequestsMetric(serviceName),
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
func (w *webImpl) List() []prometheus.Collector { return []prometheus.Collector{} }
|
func (w *webImpl) List() []prometheus.Collector {
|
||||||
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {}
|
return []prometheus.Collector{
|
||||||
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {}
|
w.httpRequestSize,
|
||||||
func (w *webImpl) IncreaseTotalRequests() {}
|
w.httpRequestDuration,
|
||||||
|
w.httpTotalRequests,
|
||||||
|
}
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRequestSizeMetric(serviceName string) *prometheus.HistogramVec {
|
||||||
|
return prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "request_size_bytes",
|
||||||
|
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||||
|
Buckets: common.DefaultSizeBuckets,
|
||||||
|
},
|
||||||
|
[]string{"url", "response_code"},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *webImpl) RecordRequestSize(size float64, url string, code int) {
|
||||||
|
w.httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newRequestDurationMetric(serviceName string) *prometheus.HistogramVec {
|
||||||
|
return prometheus.NewHistogramVec(
|
||||||
|
prometheus.HistogramOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "request_duration_seconds",
|
||||||
|
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||||
|
Buckets: common.DefaultDurationBuckets,
|
||||||
|
},
|
||||||
|
[]string{"url", "response_code"},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *webImpl) RecordRequestDuration(durMillis float64, url string, code int) {
|
||||||
|
w.httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||||
|
}
|
||||||
|
|
||||||
|
func newTotalRequestsMetric(serviceName string) prometheus.Counter {
|
||||||
|
return prometheus.NewCounter(
|
||||||
|
prometheus.CounterOpts{
|
||||||
|
Namespace: serviceName,
|
||||||
|
Name: "requests_total",
|
||||||
|
Help: "A counter displaying the number all HTTP requests.",
|
||||||
|
},
|
||||||
|
)
|
||||||
|
}
|
||||||
|
|
||||||
|
func (w *webImpl) IncreaseTotalRequests() {
|
||||||
|
w.httpTotalRequests.Inc()
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -11,8 +11,6 @@ import (
|
||||||
"openreplay/backend/pkg/metrics/database"
|
"openreplay/backend/pkg/metrics/database"
|
||||||
)
|
)
|
||||||
|
|
||||||
var ErrDisabledCache = errors.New("cache is disabled")
|
|
||||||
|
|
||||||
type Cache interface {
|
type Cache interface {
|
||||||
Set(project *Project) error
|
Set(project *Project) error
|
||||||
GetByID(projectID uint32) (*Project, error)
|
GetByID(projectID uint32) (*Project, error)
|
||||||
|
|
@ -20,16 +18,10 @@ type Cache interface {
|
||||||
}
|
}
|
||||||
|
|
||||||
type cacheImpl struct {
|
type cacheImpl struct {
|
||||||
db *redis.Client
|
db *redis.Client
|
||||||
metrics database.Database
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewCache(db *redis.Client, metrics database.Database) Cache {
|
var ErrDisabledCache = errors.New("cache is disabled")
|
||||||
return &cacheImpl{
|
|
||||||
db: db,
|
|
||||||
metrics: metrics,
|
|
||||||
}
|
|
||||||
}
|
|
||||||
|
|
||||||
func (c *cacheImpl) Set(project *Project) error {
|
func (c *cacheImpl) Set(project *Project) error {
|
||||||
if c.db == nil {
|
if c.db == nil {
|
||||||
|
|
@ -46,8 +38,8 @@ func (c *cacheImpl) Set(project *Project) error {
|
||||||
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
|
if _, err = c.db.Redis.Set(fmt.Sprintf("project:key:%s", project.ProjectKey), projectBytes, time.Minute*10).Result(); err != nil {
|
||||||
return err
|
return err
|
||||||
}
|
}
|
||||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "set", "project")
|
||||||
c.metrics.IncreaseRedisRequests("set", "project")
|
database.IncreaseRedisRequests("set", "project")
|
||||||
return nil
|
return nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -60,8 +52,8 @@ func (c *cacheImpl) GetByID(projectID uint32) (*Project, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||||
c.metrics.IncreaseRedisRequests("get", "project")
|
database.IncreaseRedisRequests("get", "project")
|
||||||
project := &Project{}
|
project := &Project{}
|
||||||
if err = json.Unmarshal([]byte(result), project); err != nil {
|
if err = json.Unmarshal([]byte(result), project); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
|
|
@ -78,11 +70,15 @@ func (c *cacheImpl) GetByKey(projectKey string) (*Project, error) {
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
c.metrics.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
database.RecordRedisRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "get", "project")
|
||||||
c.metrics.IncreaseRedisRequests("get", "project")
|
database.IncreaseRedisRequests("get", "project")
|
||||||
project := &Project{}
|
project := &Project{}
|
||||||
if err = json.Unmarshal([]byte(result), project); err != nil {
|
if err = json.Unmarshal([]byte(result), project); err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
return project, nil
|
return project, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
func NewCache(db *redis.Client) Cache {
|
||||||
|
return &cacheImpl{db: db}
|
||||||
|
}
|
||||||
|
|
|
||||||
|
|
@ -9,7 +9,6 @@ import (
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/db/redis"
|
"openreplay/backend/pkg/db/redis"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type Projects interface {
|
type Projects interface {
|
||||||
|
|
@ -25,8 +24,8 @@ type projectsImpl struct {
|
||||||
projectsByKeys cache.Cache
|
projectsByKeys cache.Cache
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(log logger.Logger, db pool.Pool, redis *redis.Client, metrics database.Database) Projects {
|
func New(log logger.Logger, db pool.Pool, redis *redis.Client) Projects {
|
||||||
cl := NewCache(redis, metrics)
|
cl := NewCache(redis)
|
||||||
return &projectsImpl{
|
return &projectsImpl{
|
||||||
log: log,
|
log: log,
|
||||||
db: db,
|
db: db,
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ package tracer
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"net/http"
|
"net/http"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
|
|
||||||
db "openreplay/backend/pkg/db/postgres/pool"
|
db "openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
|
|
@ -15,7 +14,7 @@ type Tracer interface {
|
||||||
|
|
||||||
type tracerImpl struct{}
|
type tracerImpl struct{}
|
||||||
|
|
||||||
func NewTracer(log logger.Logger, conn db.Pool, metrics database.Database) (Tracer, error) {
|
func NewTracer(log logger.Logger, conn db.Pool) (Tracer, error) {
|
||||||
return &tracerImpl{}, nil
|
return &tracerImpl{}, nil
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package sessions
|
||||||
import (
|
import (
|
||||||
"errors"
|
"errors"
|
||||||
"openreplay/backend/pkg/db/redis"
|
"openreplay/backend/pkg/db/redis"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
)
|
)
|
||||||
|
|
||||||
type cacheImpl struct{}
|
type cacheImpl struct{}
|
||||||
|
|
@ -26,6 +25,6 @@ func (c *cacheImpl) Get(sessionID uint64) (*Session, error) {
|
||||||
|
|
||||||
var ErrDisabledCache = errors.New("cache is disabled")
|
var ErrDisabledCache = errors.New("cache is disabled")
|
||||||
|
|
||||||
func NewCache(db *redis.Client, metrics database.Database) Cache {
|
func NewCache(db *redis.Client) Cache {
|
||||||
return &cacheImpl{}
|
return &cacheImpl{}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -3,7 +3,6 @@ package sessions
|
||||||
import (
|
import (
|
||||||
"context"
|
"context"
|
||||||
"fmt"
|
"fmt"
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
|
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/db/redis"
|
"openreplay/backend/pkg/db/redis"
|
||||||
|
|
@ -39,12 +38,12 @@ type sessionsImpl struct {
|
||||||
projects projects.Projects
|
projects projects.Projects
|
||||||
}
|
}
|
||||||
|
|
||||||
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client, metrics database.Database) Sessions {
|
func New(log logger.Logger, db pool.Pool, proj projects.Projects, redis *redis.Client) Sessions {
|
||||||
return &sessionsImpl{
|
return &sessionsImpl{
|
||||||
log: log,
|
log: log,
|
||||||
cache: NewInMemoryCache(log, NewCache(redis, metrics)),
|
cache: NewInMemoryCache(log, NewCache(redis)),
|
||||||
storage: NewStorage(db),
|
storage: NewStorage(db),
|
||||||
updates: NewSessionUpdates(log, db, metrics),
|
updates: NewSessionUpdates(log, db),
|
||||||
projects: proj,
|
projects: proj,
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -27,15 +27,13 @@ type updatesImpl struct {
|
||||||
log logger.Logger
|
log logger.Logger
|
||||||
db pool.Pool
|
db pool.Pool
|
||||||
updates map[uint64]*sessionUpdate
|
updates map[uint64]*sessionUpdate
|
||||||
metrics database.Database
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewSessionUpdates(log logger.Logger, db pool.Pool, metrics database.Database) Updates {
|
func NewSessionUpdates(log logger.Logger, db pool.Pool) Updates {
|
||||||
return &updatesImpl{
|
return &updatesImpl{
|
||||||
log: log,
|
log: log,
|
||||||
db: db,
|
db: db,
|
||||||
updates: make(map[uint64]*sessionUpdate),
|
updates: make(map[uint64]*sessionUpdate),
|
||||||
metrics: metrics,
|
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
@ -96,7 +94,7 @@ func (u *updatesImpl) Commit() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
// Record batch size
|
// Record batch size
|
||||||
u.metrics.RecordBatchElements(float64(b.Len()))
|
database.RecordBatchElements(float64(b.Len()))
|
||||||
|
|
||||||
start := time.Now()
|
start := time.Now()
|
||||||
|
|
||||||
|
|
@ -123,7 +121,7 @@ func (u *updatesImpl) Commit() {
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
u.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||||
u.updates = make(map[uint64]*sessionUpdate)
|
u.updates = make(map[uint64]*sessionUpdate)
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
|
||||||
|
|
@ -1,14 +1,12 @@
|
||||||
package spot
|
package spot
|
||||||
|
|
||||||
import (
|
import (
|
||||||
"openreplay/backend/pkg/metrics/database"
|
|
||||||
"time"
|
"time"
|
||||||
|
|
||||||
"openreplay/backend/internal/config/spot"
|
"openreplay/backend/internal/config/spot"
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/flakeid"
|
"openreplay/backend/pkg/flakeid"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
spotMetrics "openreplay/backend/pkg/metrics/spot"
|
|
||||||
"openreplay/backend/pkg/metrics/web"
|
"openreplay/backend/pkg/metrics/web"
|
||||||
"openreplay/backend/pkg/objectstorage/store"
|
"openreplay/backend/pkg/objectstorage/store"
|
||||||
"openreplay/backend/pkg/server/api"
|
"openreplay/backend/pkg/server/api"
|
||||||
|
|
@ -28,16 +26,16 @@ type ServicesBuilder struct {
|
||||||
SpotsAPI api.Handlers
|
SpotsAPI api.Handlers
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, spotMetrics spotMetrics.Spot, dbMetrics database.Database, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) {
|
func NewServiceBuilder(log logger.Logger, cfg *spot.Config, webMetrics web.Web, pgconn pool.Pool, prefix string) (*ServicesBuilder, error) {
|
||||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
||||||
spots := service.NewSpots(log, pgconn, flaker)
|
spots := service.NewSpots(log, pgconn, flaker)
|
||||||
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots, spotMetrics)
|
transcoder := transcoder.NewTranscoder(cfg, log, objStore, pgconn, spots)
|
||||||
keys := keys.NewKeys(log, pgconn)
|
keys := keys.NewKeys(log, pgconn)
|
||||||
auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
auditrail, err := tracer.NewTracer(log, pgconn)
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return nil, err
|
return nil, err
|
||||||
}
|
}
|
||||||
|
|
|
||||||
|
|
@ -16,7 +16,7 @@ import (
|
||||||
"openreplay/backend/internal/config/spot"
|
"openreplay/backend/internal/config/spot"
|
||||||
"openreplay/backend/pkg/db/postgres/pool"
|
"openreplay/backend/pkg/db/postgres/pool"
|
||||||
"openreplay/backend/pkg/logger"
|
"openreplay/backend/pkg/logger"
|
||||||
spotMetrics "openreplay/backend/pkg/metrics/spot"
|
metrics "openreplay/backend/pkg/metrics/spot"
|
||||||
"openreplay/backend/pkg/objectstorage"
|
"openreplay/backend/pkg/objectstorage"
|
||||||
workers "openreplay/backend/pkg/pool"
|
workers "openreplay/backend/pkg/pool"
|
||||||
"openreplay/backend/pkg/spot/service"
|
"openreplay/backend/pkg/spot/service"
|
||||||
|
|
@ -39,10 +39,9 @@ type transcoderImpl struct {
|
||||||
spots service.Spots
|
spots service.Spots
|
||||||
prepareWorkers workers.WorkerPool
|
prepareWorkers workers.WorkerPool
|
||||||
transcodeWorkers workers.WorkerPool
|
transcodeWorkers workers.WorkerPool
|
||||||
metrics spotMetrics.Spot
|
|
||||||
}
|
}
|
||||||
|
|
||||||
func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots, metrics spotMetrics.Spot) Transcoder {
|
func NewTranscoder(cfg *spot.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, conn pool.Pool, spots service.Spots) Transcoder {
|
||||||
tnsc := &transcoderImpl{
|
tnsc := &transcoderImpl{
|
||||||
cfg: cfg,
|
cfg: cfg,
|
||||||
log: log,
|
log: log,
|
||||||
|
|
@ -115,7 +114,7 @@ func (t *transcoderImpl) doneTask(task *Task) {
|
||||||
}
|
}
|
||||||
|
|
||||||
func (t *transcoderImpl) process(task *Task) {
|
func (t *transcoderImpl) process(task *Task) {
|
||||||
t.metrics.IncreaseVideosTotal()
|
metrics.IncreaseVideosTotal()
|
||||||
//spotID := task.SpotID
|
//spotID := task.SpotID
|
||||||
t.log.Info(context.Background(), "Processing spot %s", task.SpotID)
|
t.log.Info(context.Background(), "Processing spot %s", task.SpotID)
|
||||||
|
|
||||||
|
|
@ -201,11 +200,11 @@ func (t *transcoderImpl) downloadSpotVideo(spotID uint64, path string) error {
|
||||||
if fileInfo, err := originVideo.Stat(); err != nil {
|
if fileInfo, err := originVideo.Stat(); err != nil {
|
||||||
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
||||||
} else {
|
} else {
|
||||||
t.metrics.RecordOriginalVideoSize(float64(fileInfo.Size()))
|
metrics.RecordOriginalVideoSize(float64(fileInfo.Size()))
|
||||||
}
|
}
|
||||||
originVideo.Close()
|
originVideo.Close()
|
||||||
|
|
||||||
t.metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds())
|
metrics.RecordOriginalVideoDownloadDuration(time.Since(start).Seconds())
|
||||||
|
|
||||||
t.log.Info(context.Background(), "Saved origin video to disk, spot: %d in %v sec", spotID, time.Since(start).Seconds())
|
t.log.Info(context.Background(), "Saved origin video to disk, spot: %d in %v sec", spotID, time.Since(start).Seconds())
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -228,8 +227,8 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
||||||
if err != nil {
|
if err != nil {
|
||||||
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
|
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||||
}
|
}
|
||||||
t.metrics.IncreaseVideosCropped()
|
metrics.IncreaseVideosCropped()
|
||||||
t.metrics.RecordCroppingDuration(time.Since(start).Seconds())
|
metrics.RecordCroppingDuration(time.Since(start).Seconds())
|
||||||
|
|
||||||
t.log.Info(context.Background(), "Cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
t.log.Info(context.Background(), "Cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
||||||
|
|
||||||
|
|
@ -247,7 +246,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
||||||
if fileInfo, err := video.Stat(); err != nil {
|
if fileInfo, err := video.Stat(); err != nil {
|
||||||
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
t.log.Error(context.Background(), "Failed to get file info: %v", err)
|
||||||
} else {
|
} else {
|
||||||
t.metrics.RecordCroppedVideoSize(float64(fileInfo.Size()))
|
metrics.RecordCroppedVideoSize(float64(fileInfo.Size()))
|
||||||
}
|
}
|
||||||
|
|
||||||
err = t.objStorage.Upload(video, fmt.Sprintf("%d/video.webm", spotID), "video/webm", objectstorage.NoContentEncoding, objectstorage.NoCompression)
|
err = t.objStorage.Upload(video, fmt.Sprintf("%d/video.webm", spotID), "video/webm", objectstorage.NoContentEncoding, objectstorage.NoCompression)
|
||||||
|
|
@ -255,7 +254,7 @@ func (t *transcoderImpl) cropSpotVideo(spotID uint64, crop []int, path string) e
|
||||||
return fmt.Errorf("failed to upload cropped video: %v", err)
|
return fmt.Errorf("failed to upload cropped video: %v", err)
|
||||||
}
|
}
|
||||||
|
|
||||||
t.metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds())
|
metrics.RecordCroppedVideoUploadDuration(time.Since(start).Seconds())
|
||||||
|
|
||||||
t.log.Info(context.Background(), "Uploaded cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
t.log.Info(context.Background(), "Uploaded cropped spot %d in %v", spotID, time.Since(start).Seconds())
|
||||||
return nil
|
return nil
|
||||||
|
|
@ -280,8 +279,8 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
|
||||||
t.log.Error(context.Background(), "Failed to execute command: %v, stderr: %v", err, stderr.String())
|
t.log.Error(context.Background(), "Failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
t.metrics.IncreaseVideosTranscoded()
|
metrics.IncreaseVideosTranscoded()
|
||||||
t.metrics.RecordTranscodingDuration(time.Since(start).Seconds())
|
metrics.RecordTranscodingDuration(time.Since(start).Seconds())
|
||||||
t.log.Info(context.Background(), "Transcoded spot %d in %v", spotID, time.Since(start).Seconds())
|
t.log.Info(context.Background(), "Transcoded spot %d in %v", spotID, time.Since(start).Seconds())
|
||||||
|
|
||||||
start = time.Now()
|
start = time.Now()
|
||||||
|
|
@ -328,7 +327,7 @@ func (t *transcoderImpl) transcodeSpotVideo(spotID uint64, path string) (string,
|
||||||
return "", err
|
return "", err
|
||||||
}
|
}
|
||||||
}
|
}
|
||||||
t.metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds())
|
metrics.RecordTranscodedVideoUploadDuration(time.Since(start).Seconds())
|
||||||
|
|
||||||
t.log.Info(context.Background(), "Uploaded chunks for spot %d in %v", spotID, time.Since(start).Seconds())
|
t.log.Info(context.Background(), "Uploaded chunks for spot %d in %v", spotID, time.Since(start).Seconds())
|
||||||
return strings.Join(lines, "\n"), nil
|
return strings.Join(lines, "\n"), nil
|
||||||
|
|
|
||||||
8
ee/api/.gitignore
vendored
8
ee/api/.gitignore
vendored
|
|
@ -211,7 +211,7 @@ Pipfile.lock
|
||||||
/chalicelib/core/metadata.py
|
/chalicelib/core/metadata.py
|
||||||
/chalicelib/core/mobile.py
|
/chalicelib/core/mobile.py
|
||||||
/chalicelib/core/saved_search.py
|
/chalicelib/core/saved_search.py
|
||||||
/chalicelib/core/sessions/sessions_pg.py
|
/chalicelib/core/sessions/sessions.py
|
||||||
/chalicelib/core/sessions/sessions_ch.py
|
/chalicelib/core/sessions/sessions_ch.py
|
||||||
/chalicelib/core/sessions/sessions_devtool/sessions_devtool.py
|
/chalicelib/core/sessions/sessions_devtool/sessions_devtool.py
|
||||||
/chalicelib/core/sessions/sessions_favorite/sessions_favorite.py
|
/chalicelib/core/sessions/sessions_favorite/sessions_favorite.py
|
||||||
|
|
@ -225,7 +225,8 @@ Pipfile.lock
|
||||||
/chalicelib/core/sessions/unprocessed_sessions.py
|
/chalicelib/core/sessions/unprocessed_sessions.py
|
||||||
/chalicelib/core/metrics/modules
|
/chalicelib/core/metrics/modules
|
||||||
/chalicelib/core/socket_ios.py
|
/chalicelib/core/socket_ios.py
|
||||||
/chalicelib/core/sourcemaps
|
/chalicelib/core/sourcemaps.py
|
||||||
|
/chalicelib/core/sourcemaps_parser.py
|
||||||
/chalicelib/core/tags.py
|
/chalicelib/core/tags.py
|
||||||
/chalicelib/saml
|
/chalicelib/saml
|
||||||
/chalicelib/utils/__init__.py
|
/chalicelib/utils/__init__.py
|
||||||
|
|
@ -234,6 +235,7 @@ Pipfile.lock
|
||||||
/chalicelib/utils/dev.py
|
/chalicelib/utils/dev.py
|
||||||
/chalicelib/utils/email_handler.py
|
/chalicelib/utils/email_handler.py
|
||||||
/chalicelib/utils/email_helper.py
|
/chalicelib/utils/email_helper.py
|
||||||
|
/chalicelib/utils/errors_helper.py
|
||||||
/chalicelib/utils/event_filter_definition.py
|
/chalicelib/utils/event_filter_definition.py
|
||||||
/chalicelib/utils/github_client_v3.py
|
/chalicelib/utils/github_client_v3.py
|
||||||
/chalicelib/utils/helper.py
|
/chalicelib/utils/helper.py
|
||||||
|
|
@ -285,7 +287,7 @@ Pipfile.lock
|
||||||
/chalicelib/core/alerts/alerts_listener.py
|
/chalicelib/core/alerts/alerts_listener.py
|
||||||
/chalicelib/core/alerts/modules/helpers.py
|
/chalicelib/core/alerts/modules/helpers.py
|
||||||
/chalicelib/core/errors/modules/*
|
/chalicelib/core/errors/modules/*
|
||||||
/chalicelib/core/errors/errors_pg.py
|
/chalicelib/core/errors/errors.py
|
||||||
/chalicelib/core/errors/errors_ch.py
|
/chalicelib/core/errors/errors_ch.py
|
||||||
/chalicelib/core/errors/errors_details.py
|
/chalicelib/core/errors/errors_details.py
|
||||||
/chalicelib/utils/contextual_validators.py
|
/chalicelib/utils/contextual_validators.py
|
||||||
|
|
|
||||||
|
|
@ -1,31 +1,8 @@
|
||||||
FROM python:3.12-alpine
|
FROM python:3.12-alpine
|
||||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||||
WORKDIR /app
|
RUN apk add --no-cache build-base libressl libffi-dev libressl-dev libxslt-dev libxml2-dev xmlsec-dev xmlsec tini
|
||||||
|
|
||||||
COPY . .
|
|
||||||
RUN mv env.default .env
|
|
||||||
RUN apk add --no-cache tini xmlsec && \
|
|
||||||
export UV_SYSTEM_PYTHON=true && \
|
|
||||||
pip install uv && \
|
|
||||||
apk add --no-cache --virtual .build-deps \
|
|
||||||
build-base \
|
|
||||||
libressl \
|
|
||||||
libffi-dev \
|
|
||||||
libressl-dev \
|
|
||||||
libxslt-dev \
|
|
||||||
libxml2-dev \
|
|
||||||
xmlsec-dev && \
|
|
||||||
uv pip install --no-cache-dir --upgrade -r requirements.txt && \
|
|
||||||
# Solve the libxml2 library version mismatch by reinstalling lxml with matching libxml2
|
|
||||||
uv pip uninstall lxml && \
|
|
||||||
uv pip install --no-cache-dir --no-binary lxml lxml --force-reinstall && \
|
|
||||||
# Create non-root user
|
|
||||||
adduser -u 1001 openreplay -D && \
|
|
||||||
# Cleanup build dependencies
|
|
||||||
apk del .build-deps
|
|
||||||
|
|
||||||
ARG envarg
|
ARG envarg
|
||||||
ARG GIT_SHA
|
|
||||||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||||
APP_NAME=chalice \
|
APP_NAME=chalice \
|
||||||
LISTEN_PORT=8000 \
|
LISTEN_PORT=8000 \
|
||||||
|
|
@ -33,12 +10,17 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||||
ENTERPRISE_BUILD=${envarg} \
|
ENTERPRISE_BUILD=${envarg} \
|
||||||
GIT_SHA=$GIT_SHA
|
GIT_SHA=$GIT_SHA
|
||||||
|
|
||||||
WORKDIR /app
|
WORKDIR /work
|
||||||
|
COPY requirements.txt ./requirements.txt
|
||||||
|
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||||
|
# This code is used to solve 'lxml & xmlsec libxml2 library version mismatch' error
|
||||||
|
RUN pip uninstall -y lxml && pip install lxml
|
||||||
|
|
||||||
COPY . .
|
COPY . .
|
||||||
RUN mv env.default .env
|
RUN mv env.default .env
|
||||||
|
|
||||||
|
RUN adduser -u 1001 openreplay -D
|
||||||
USER 1001
|
USER 1001
|
||||||
|
|
||||||
ENTRYPOINT ["/sbin/tini", "--"]
|
ENTRYPOINT ["/sbin/tini", "--"]
|
||||||
CMD ["./entrypoint.sh"]
|
CMD ./entrypoint.sh
|
||||||
|
|
|
||||||
|
|
@ -21,6 +21,7 @@ gunicorn = "==23.0.0"
|
||||||
python-decouple = "==3.8"
|
python-decouple = "==3.8"
|
||||||
pydantic = {extras = ["email"], version = "==2.10.6"}
|
pydantic = {extras = ["email"], version = "==2.10.6"}
|
||||||
apscheduler = "==3.11.0"
|
apscheduler = "==3.11.0"
|
||||||
|
lxml = "!=4.7.0,<=5.2.1,>=4.6.5"
|
||||||
python3-saml = "==1.16.0"
|
python3-saml = "==1.16.0"
|
||||||
python-multipart = "==0.0.20"
|
python-multipart = "==0.0.20"
|
||||||
redis = "==5.2.1"
|
redis = "==5.2.1"
|
||||||
|
|
|
||||||
|
|
@ -1,3 +1,16 @@
|
||||||
|
from decouple import config
|
||||||
|
|
||||||
TENANT_ID = "tenant_id"
|
TENANT_ID = "tenant_id"
|
||||||
|
if config("EXP_ALERTS", cast=bool, default=False):
|
||||||
|
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||||
|
from chalicelib.core.sessions import sessions
|
||||||
|
else:
|
||||||
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||||
|
from chalicelib.core.sessions import sessions_ch as sessions
|
||||||
|
else:
|
||||||
|
from chalicelib.core.sessions import sessions
|
||||||
|
|
||||||
|
|
||||||
from . import helpers as alert_helpers
|
from . import helpers as alert_helpers
|
||||||
|
|
|
||||||
|
|
@ -86,8 +86,7 @@ def __generic_query(typename, value_length=None):
|
||||||
ORDER BY value"""
|
ORDER BY value"""
|
||||||
|
|
||||||
if value_length is None or value_length > 2:
|
if value_length is None or value_length > 2:
|
||||||
return f"""SELECT DISTINCT ON(value, type) value, type
|
return f"""(SELECT DISTINCT value, type
|
||||||
FROM ((SELECT DISTINCT value, type
|
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
project_id = %(project_id)s
|
project_id = %(project_id)s
|
||||||
|
|
@ -103,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
||||||
AND type='{typename.upper()}'
|
AND type='{typename.upper()}'
|
||||||
AND value ILIKE %(value)s
|
AND value ILIKE %(value)s
|
||||||
ORDER BY value
|
ORDER BY value
|
||||||
LIMIT 5)) AS raw;"""
|
LIMIT 5);"""
|
||||||
return f"""SELECT DISTINCT value, type
|
return f"""SELECT DISTINCT value, type
|
||||||
FROM {TABLE}
|
FROM {TABLE}
|
||||||
WHERE
|
WHERE
|
||||||
|
|
@ -258,7 +257,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
||||||
WHERE project_id = %(project_id)s
|
WHERE project_id = %(project_id)s
|
||||||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||||
with ch_client.ClickHouseClient() as cur:
|
with ch_client.ClickHouseClient() as cur:
|
||||||
query = cur.format(query=f"""SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
query = cur.format(query=f"""SELECT key, value, 'METADATA' AS TYPE
|
||||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||||
LIMIT 5;""", parameters={"project_id": project_id, "value": helper.string_to_sql_like(value),
|
LIMIT 5;""", parameters={"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||||
"svalue": helper.string_to_sql_like("^" + value)})
|
"svalue": helper.string_to_sql_like("^" + value)})
|
||||||
|
|
|
||||||
|
|
@ -4,11 +4,11 @@ from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
|
|
||||||
from . import errors_pg as errors_legacy
|
from . import errors as errors_legacy
|
||||||
|
|
||||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||||
logger.info(">>> Using experimental error search")
|
logger.info(">>> Using experimental error search")
|
||||||
from . import errors_ch as errors
|
from . import errors_ch as errors
|
||||||
from . import errors_details_exp as errors_details
|
from . import errors_details_exp as errors_details
|
||||||
else:
|
else:
|
||||||
from . import errors_pg as errors
|
from . import errors
|
||||||
|
|
|
||||||
|
|
@ -1,12 +1,31 @@
|
||||||
import logging
|
from decouple import config
|
||||||
|
|
||||||
from chalicelib.core.errors.modules import errors_helper
|
import schemas
|
||||||
|
from . import errors
|
||||||
|
from chalicelib.core import metrics, metadata
|
||||||
|
from chalicelib.core import sessions
|
||||||
from chalicelib.utils import ch_client, exp_ch_helper
|
from chalicelib.utils import ch_client, exp_ch_helper
|
||||||
from chalicelib.utils import helper
|
from chalicelib.utils import pg_client, helper
|
||||||
from chalicelib.utils.TimeUTC import TimeUTC
|
from chalicelib.utils.TimeUTC import TimeUTC
|
||||||
from chalicelib.utils.metrics_helper import get_step_size
|
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
|
||||||
|
def __flatten_sort_key_count_version(data, merge_nested=False):
|
||||||
|
if data is None:
|
||||||
|
return []
|
||||||
|
return sorted(
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": f"{o[0][0][0]}@{v[0]}",
|
||||||
|
"count": v[1]
|
||||||
|
} for o in data for v in o[2]
|
||||||
|
],
|
||||||
|
key=lambda o: o["count"], reverse=True) if merge_nested else \
|
||||||
|
[
|
||||||
|
{
|
||||||
|
"name": o[0][0][0],
|
||||||
|
"count": o[1][0][0],
|
||||||
|
} for o in data
|
||||||
|
]
|
||||||
|
|
||||||
|
|
||||||
def __transform_map_to_tag(data, key1, key2, requested_key):
|
def __transform_map_to_tag(data, key1, key2, requested_key):
|
||||||
|
|
@ -70,7 +89,13 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
|
MAIN_ERR_SESS_TABLE = exp_ch_helper.get_main_js_errors_sessions_table(0)
|
||||||
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
|
MAIN_EVENTS_TABLE = exp_ch_helper.get_main_events_table(0)
|
||||||
|
|
||||||
ch_basic_query = errors_helper.__get_basic_constraints_ch(time_constraint=False)
|
ch_sub_query24 = errors.__get_basic_constraints(startTime_arg_name="startDate24", endTime_arg_name="endDate24")
|
||||||
|
ch_sub_query24.append("error_id = %(error_id)s")
|
||||||
|
|
||||||
|
ch_sub_query30 = errors.__get_basic_constraints(startTime_arg_name="startDate30", endTime_arg_name="endDate30",
|
||||||
|
project_key="errors.project_id")
|
||||||
|
ch_sub_query30.append("error_id = %(error_id)s")
|
||||||
|
ch_basic_query = errors.__get_basic_constraints(time_constraint=False)
|
||||||
ch_basic_query.append("error_id = %(error_id)s")
|
ch_basic_query.append("error_id = %(error_id)s")
|
||||||
|
|
||||||
with ch_client.ClickHouseClient() as ch:
|
with ch_client.ClickHouseClient() as ch:
|
||||||
|
|
@ -80,9 +105,9 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
data["endDate30"] = TimeUTC.now()
|
data["endDate30"] = TimeUTC.now()
|
||||||
|
|
||||||
density24 = int(data.get("density24", 24))
|
density24 = int(data.get("density24", 24))
|
||||||
step_size24 = get_step_size(data["startDate24"], data["endDate24"], density24)
|
step_size24 = errors.get_step_size(data["startDate24"], data["endDate24"], density24)
|
||||||
density30 = int(data.get("density30", 30))
|
density30 = int(data.get("density30", 30))
|
||||||
step_size30 = get_step_size(data["startDate30"], data["endDate30"], density30)
|
step_size30 = errors.get_step_size(data["startDate30"], data["endDate30"], density30)
|
||||||
params = {
|
params = {
|
||||||
"startDate24": data['startDate24'],
|
"startDate24": data['startDate24'],
|
||||||
"endDate24": data['endDate24'],
|
"endDate24": data['endDate24'],
|
||||||
|
|
@ -96,25 +121,27 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
|
|
||||||
main_ch_query = f"""\
|
main_ch_query = f"""\
|
||||||
WITH pre_processed AS (SELECT error_id,
|
WITH pre_processed AS (SELECT error_id,
|
||||||
toString(`$properties`.name) AS name,
|
name,
|
||||||
toString(`$properties`.message) AS message,
|
message,
|
||||||
session_id,
|
session_id,
|
||||||
created_at AS datetime,
|
datetime,
|
||||||
`$user_id` AS user_id,
|
user_id,
|
||||||
`$browser` AS user_browser,
|
user_browser,
|
||||||
`$browser_version` AS user_browser_version,
|
user_browser_version,
|
||||||
`$os` AS user_os,
|
user_os,
|
||||||
'$os_version' AS user_os_version,
|
user_os_version,
|
||||||
toString(`$properties`.user_device_type) AS user_device_type,
|
user_device_type,
|
||||||
toString(`$properties`.user_device) AS user_device,
|
user_device,
|
||||||
`$country` AS user_country
|
user_country,
|
||||||
|
error_tags_keys,
|
||||||
|
error_tags_values
|
||||||
FROM {MAIN_ERR_SESS_TABLE} AS errors
|
FROM {MAIN_ERR_SESS_TABLE} AS errors
|
||||||
WHERE {" AND ".join(ch_basic_query)}
|
WHERE {" AND ".join(ch_basic_query)}
|
||||||
)
|
)
|
||||||
SELECT %(error_id)s AS error_id, name, message,users,
|
SELECT %(error_id)s AS error_id, name, message,users,
|
||||||
first_occurrence,last_occurrence,last_session_id,
|
first_occurrence,last_occurrence,last_session_id,
|
||||||
sessions,browsers_partition,os_partition,device_partition,
|
sessions,browsers_partition,os_partition,device_partition,
|
||||||
country_partition,chart24,chart30
|
country_partition,chart24,chart30,custom_tags
|
||||||
FROM (SELECT error_id,
|
FROM (SELECT error_id,
|
||||||
name,
|
name,
|
||||||
message
|
message
|
||||||
|
|
@ -129,7 +156,8 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
|
INNER JOIN (SELECT toUnixTimestamp(max(datetime)) * 1000 AS last_occurrence,
|
||||||
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
|
toUnixTimestamp(min(datetime)) * 1000 AS first_occurrence
|
||||||
FROM pre_processed) AS time_details ON TRUE
|
FROM pre_processed) AS time_details ON TRUE
|
||||||
INNER JOIN (SELECT session_id AS last_session_id
|
INNER JOIN (SELECT session_id AS last_session_id,
|
||||||
|
arrayMap((key, value)->(map(key, value)), error_tags_keys, error_tags_values) AS custom_tags
|
||||||
FROM pre_processed
|
FROM pre_processed
|
||||||
ORDER BY datetime DESC
|
ORDER BY datetime DESC
|
||||||
LIMIT 1) AS last_session_details ON TRUE
|
LIMIT 1) AS last_session_details ON TRUE
|
||||||
|
|
@ -175,35 +203,27 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
ORDER BY count DESC) AS count_per_country_details
|
ORDER BY count DESC) AS count_per_country_details
|
||||||
) AS mapped_country_details ON TRUE
|
) AS mapped_country_details ON TRUE
|
||||||
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
|
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart24
|
||||||
FROM (SELECT gs.generate_series AS timestamp,
|
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3756 second)) *
|
||||||
|
1000 AS timestamp,
|
||||||
COUNT(DISTINCT session_id) AS count
|
COUNT(DISTINCT session_id) AS count
|
||||||
FROM generate_series(%(startDate24)s, %(endDate24)s, %(step_size24)s) AS gs
|
FROM {MAIN_EVENTS_TABLE} AS errors
|
||||||
LEFT JOIN {MAIN_EVENTS_TABLE} AS errors ON(TRUE)
|
WHERE {" AND ".join(ch_sub_query24)}
|
||||||
WHERE project_id = toUInt16(%(project_id)s)
|
|
||||||
AND `$event_name` = 'ERROR'
|
|
||||||
AND events.created_at >= toDateTime(timestamp / 1000)
|
|
||||||
AND events.created_at < toDateTime((timestamp + %(step_size24)s) / 1000)
|
|
||||||
AND error_id = %(error_id)s
|
|
||||||
GROUP BY timestamp
|
GROUP BY timestamp
|
||||||
ORDER BY timestamp) AS chart_details
|
ORDER BY timestamp) AS chart_details
|
||||||
) AS chart_details24 ON TRUE
|
) AS chart_details24 ON TRUE
|
||||||
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
|
INNER JOIN (SELECT groupArray(map('timestamp', timestamp, 'count', count)) AS chart30
|
||||||
FROM (SELECT gs.generate_series AS timestamp,
|
FROM (SELECT toUnixTimestamp(toStartOfInterval(datetime, INTERVAL 3724 second)) *
|
||||||
|
1000 AS timestamp,
|
||||||
COUNT(DISTINCT session_id) AS count
|
COUNT(DISTINCT session_id) AS count
|
||||||
FROM generate_series(%(startDate30)s, %(endDate30)s, %(step_size30)s) AS gs
|
FROM {MAIN_EVENTS_TABLE} AS errors
|
||||||
LEFT JOIN {MAIN_EVENTS_TABLE} AS errors ON(TRUE)
|
WHERE {" AND ".join(ch_sub_query30)}
|
||||||
WHERE project_id = toUInt16(%(project_id)s)
|
|
||||||
AND `$event_name` = 'ERROR'
|
|
||||||
AND events.created_at >= toDateTime(timestamp / 1000)
|
|
||||||
AND events.created_at < toDateTime((timestamp + %(step_size30)s) / 1000)
|
|
||||||
AND error_id = %(error_id)s
|
|
||||||
GROUP BY timestamp
|
GROUP BY timestamp
|
||||||
ORDER BY timestamp) AS chart_details
|
ORDER BY timestamp) AS chart_details
|
||||||
) AS chart_details30 ON TRUE;"""
|
) AS chart_details30 ON TRUE;"""
|
||||||
|
|
||||||
logger.debug("--------------------")
|
# print("--------------------")
|
||||||
logging.debug(ch.format(query=main_ch_query, parameters=params))
|
# print(ch.format(main_ch_query, params))
|
||||||
logger.debug("--------------------")
|
# print("--------------------")
|
||||||
row = ch.execute(query=main_ch_query, parameters=params)
|
row = ch.execute(query=main_ch_query, parameters=params)
|
||||||
if len(row) == 0:
|
if len(row) == 0:
|
||||||
return {"errors": ["error not found"]}
|
return {"errors": ["error not found"]}
|
||||||
|
|
@ -220,12 +240,12 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
ORDER BY datetime DESC
|
ORDER BY datetime DESC
|
||||||
LIMIT 1;"""
|
LIMIT 1;"""
|
||||||
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
|
params = {"project_id": project_id, "session_id": row["last_session_id"], "userId": user_id}
|
||||||
logger.debug("--------------------")
|
# print("--------------------")
|
||||||
logging.debug(ch.format(query=query, parameters=params))
|
# print(ch.format(query, params))
|
||||||
logger.debug("--------------------")
|
# print("--------------------")
|
||||||
status = ch.execute(query=query, parameters=params)
|
status = ch.execute(query=query, parameters=params)
|
||||||
|
|
||||||
if status is not None and len(status) > 0:
|
if status is not None:
|
||||||
status = status[0]
|
status = status[0]
|
||||||
row["favorite"] = status.pop("favorite")
|
row["favorite"] = status.pop("favorite")
|
||||||
row["viewed"] = status.pop("viewed")
|
row["viewed"] = status.pop("viewed")
|
||||||
|
|
@ -234,4 +254,8 @@ def get_details(project_id, error_id, user_id, **data):
|
||||||
row["last_hydrated_session"] = None
|
row["last_hydrated_session"] = None
|
||||||
row["favorite"] = False
|
row["favorite"] = False
|
||||||
row["viewed"] = False
|
row["viewed"] = False
|
||||||
|
row["chart24"] = metrics.__complete_missing_steps(start_time=data["startDate24"], end_time=data["endDate24"],
|
||||||
|
density=density24, rows=row["chart24"], neutral={"count": 0})
|
||||||
|
row["chart30"] = metrics.__complete_missing_steps(start_time=data["startDate30"], end_time=data["endDate30"],
|
||||||
|
density=density30, rows=row["chart30"], neutral={"count": 0})
|
||||||
return {"data": helper.dict_to_camel_case(row)}
|
return {"data": helper.dict_to_camel_case(row)}
|
||||||
|
|
|
||||||
|
|
@ -2,7 +2,6 @@ import logging
|
||||||
|
|
||||||
import redis
|
import redis
|
||||||
import requests
|
import requests
|
||||||
|
|
||||||
# from confluent_kafka.admin import AdminClient
|
# from confluent_kafka.admin import AdminClient
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
|
|
@ -29,6 +28,7 @@ HEALTH_ENDPOINTS = {
|
||||||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||||
|
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||||
"sourcemapreader": app_connection_string(
|
"sourcemapreader": app_connection_string(
|
||||||
"sourcemapreader-openreplay", 8888, "health"
|
"sourcemapreader-openreplay", 8888, "health"
|
||||||
|
|
@ -40,7 +40,9 @@ HEALTH_ENDPOINTS = {
|
||||||
def __check_database_pg(*_):
|
def __check_database_pg(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["Postgres health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["Postgres health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
try:
|
try:
|
||||||
|
|
@ -62,26 +64,29 @@ def __check_database_pg(*_):
|
||||||
"details": {
|
"details": {
|
||||||
# "version": server_version["server_version"],
|
# "version": server_version["server_version"],
|
||||||
# "schema": schema_version["version"]
|
# "schema": schema_version["version"]
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __always_healthy(*_):
|
def __always_healthy(*_):
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_be_service(service_name):
|
def __check_be_service(service_name):
|
||||||
def fn(*_):
|
def fn(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["server health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||||
if results.status_code != 200:
|
if results.status_code != 200:
|
||||||
logger.error(
|
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
|
||||||
)
|
|
||||||
logger.error(results.text)
|
logger.error(results.text)
|
||||||
# fail_response["details"]["errors"].append(results.text)
|
# fail_response["details"]["errors"].append(results.text)
|
||||||
return fail_response
|
return fail_response
|
||||||
|
|
@ -99,7 +104,10 @@ def __check_be_service(service_name):
|
||||||
logger.error("couldn't get response")
|
logger.error("couldn't get response")
|
||||||
# fail_response["details"]["errors"].append(str(e))
|
# fail_response["details"]["errors"].append(str(e))
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
return fn
|
return fn
|
||||||
|
|
||||||
|
|
@ -107,7 +115,7 @@ def __check_be_service(service_name):
|
||||||
def __check_redis(*_):
|
def __check_redis(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {"errors": ["server health-check failed"]}
|
||||||
}
|
}
|
||||||
if config("REDIS_STRING", default=None) is None:
|
if config("REDIS_STRING", default=None) is None:
|
||||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||||
|
|
@ -126,14 +134,16 @@ def __check_redis(*_):
|
||||||
"health": True,
|
"health": True,
|
||||||
"details": {
|
"details": {
|
||||||
# "version": r.execute_command('INFO')['redis_version']
|
# "version": r.execute_command('INFO')['redis_version']
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
def __check_SSL(*_):
|
def __check_SSL(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
"details": {
|
||||||
|
"errors": ["SSL Certificate health-check failed"]
|
||||||
|
}
|
||||||
}
|
}
|
||||||
try:
|
try:
|
||||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||||
|
|
@ -141,7 +151,10 @@ def __check_SSL(*_):
|
||||||
logger.error("!! health failed: SSL Certificate")
|
logger.error("!! health failed: SSL Certificate")
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return fail_response
|
return fail_response
|
||||||
return {"health": True, "details": {}}
|
return {
|
||||||
|
"health": True,
|
||||||
|
"details": {}
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def __get_sessions_stats(tenant_id, *_):
|
def __get_sessions_stats(tenant_id, *_):
|
||||||
|
|
@ -149,34 +162,31 @@ def __get_sessions_stats(tenant_id, *_):
|
||||||
constraints = ["projects.deleted_at IS NULL"]
|
constraints = ["projects.deleted_at IS NULL"]
|
||||||
if tenant_id:
|
if tenant_id:
|
||||||
constraints.append("tenant_id=%(tenant_id)s")
|
constraints.append("tenant_id=%(tenant_id)s")
|
||||||
query = cur.mogrify(
|
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
|
||||||
COALESCE(SUM(events_count),0) AS e_c
|
COALESCE(SUM(events_count),0) AS e_c
|
||||||
FROM public.projects_stats
|
FROM public.projects_stats
|
||||||
INNER JOIN public.projects USING(project_id)
|
INNER JOIN public.projects USING(project_id)
|
||||||
WHERE {" AND ".join(constraints)};""",
|
WHERE {" AND ".join(constraints)};""",
|
||||||
{"tenant_id": tenant_id},
|
{"tenant_id": tenant_id})
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
return {
|
||||||
|
"numberOfSessionsCaptured": row["s_c"],
|
||||||
|
"numberOfEventCaptured": row["e_c"]
|
||||||
|
}
|
||||||
|
|
||||||
|
|
||||||
def get_health(tenant_id=None):
|
def get_health(tenant_id=None):
|
||||||
health_map = {
|
health_map = {
|
||||||
"databases": {
|
"databases": {
|
||||||
"postgres": __check_database_pg,
|
"postgres": __check_database_pg,
|
||||||
"clickhouse": __check_database_ch,
|
"clickhouse": __check_database_ch
|
||||||
},
|
},
|
||||||
"ingestionPipeline": {
|
"ingestionPipeline": {
|
||||||
**(
|
**({"redis": __check_redis} if config("REDIS_STRING", default=None)
|
||||||
{"redis": __check_redis}
|
and len(config("REDIS_STRING")) > 0 else {}),
|
||||||
if config("REDIS_STRING", default=None)
|
|
||||||
and len(config("REDIS_STRING")) > 0
|
|
||||||
else {}
|
|
||||||
),
|
|
||||||
# "kafka": __check_kafka
|
# "kafka": __check_kafka
|
||||||
"kafka": __always_healthy,
|
"kafka": __always_healthy
|
||||||
},
|
},
|
||||||
"backendServices": {
|
"backendServices": {
|
||||||
"alerts": __check_be_service("alerts"),
|
"alerts": __check_be_service("alerts"),
|
||||||
|
|
@ -190,13 +200,14 @@ def get_health(tenant_id=None):
|
||||||
"http": __check_be_service("http"),
|
"http": __check_be_service("http"),
|
||||||
"ingress-nginx": __always_healthy,
|
"ingress-nginx": __always_healthy,
|
||||||
"integrations": __check_be_service("integrations"),
|
"integrations": __check_be_service("integrations"),
|
||||||
|
"peers": __check_be_service("peers"),
|
||||||
# "quickwit": __check_be_service("quickwit"),
|
# "quickwit": __check_be_service("quickwit"),
|
||||||
"sink": __check_be_service("sink"),
|
"sink": __check_be_service("sink"),
|
||||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||||
"storage": __check_be_service("storage"),
|
"storage": __check_be_service("storage")
|
||||||
},
|
},
|
||||||
"details": __get_sessions_stats,
|
"details": __get_sessions_stats,
|
||||||
"ssl": __check_SSL,
|
"ssl": __check_SSL
|
||||||
}
|
}
|
||||||
return __process_health(tenant_id=tenant_id, health_map=health_map)
|
return __process_health(tenant_id=tenant_id, health_map=health_map)
|
||||||
|
|
||||||
|
|
@ -208,16 +219,10 @@ def __process_health(tenant_id, health_map):
|
||||||
response.pop(parent_key)
|
response.pop(parent_key)
|
||||||
elif isinstance(health_map[parent_key], dict):
|
elif isinstance(health_map[parent_key], dict):
|
||||||
for element_key in health_map[parent_key]:
|
for element_key in health_map[parent_key]:
|
||||||
if config(
|
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
|
||||||
cast=bool,
|
|
||||||
default=False,
|
|
||||||
):
|
|
||||||
response[parent_key].pop(element_key)
|
response[parent_key].pop(element_key)
|
||||||
else:
|
else:
|
||||||
response[parent_key][element_key] = health_map[parent_key][
|
response[parent_key][element_key] = health_map[parent_key][element_key](tenant_id)
|
||||||
element_key
|
|
||||||
](tenant_id)
|
|
||||||
else:
|
else:
|
||||||
response[parent_key] = health_map[parent_key](tenant_id)
|
response[parent_key] = health_map[parent_key](tenant_id)
|
||||||
return response
|
return response
|
||||||
|
|
@ -225,8 +230,7 @@ def __process_health(tenant_id, health_map):
|
||||||
|
|
||||||
def cron():
|
def cron():
|
||||||
with pg_client.PostgresClient() as cur:
|
with pg_client.PostgresClient() as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT projects.project_id,
|
||||||
"""SELECT projects.project_id,
|
|
||||||
projects.created_at,
|
projects.created_at,
|
||||||
projects.sessions_last_check_at,
|
projects.sessions_last_check_at,
|
||||||
projects.first_recorded_session_at,
|
projects.first_recorded_session_at,
|
||||||
|
|
@ -234,8 +238,7 @@ def cron():
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
|
|
@ -256,24 +259,20 @@ def cron():
|
||||||
count_start_from = r["last_update_at"]
|
count_start_from = r["last_update_at"]
|
||||||
|
|
||||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"start_ts": count_start_from,
|
||||||
"start_ts": count_start_from,
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts>=%(start_ts)s
|
AND start_ts>=%(start_ts)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
|
|
@ -281,77 +280,65 @@ def cron():
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
if insert:
|
if insert:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
|
||||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
else:
|
else:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||||
events_count=events_count+%(events_count)s,
|
events_count=events_count+%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
# this cron is used to correct the sessions&events count every week
|
# this cron is used to correct the sessions&events count every week
|
||||||
def weekly_cron():
|
def weekly_cron():
|
||||||
with pg_client.PostgresClient(long_query=True) as cur:
|
with pg_client.PostgresClient(long_query=True) as cur:
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT project_id,
|
||||||
"""SELECT project_id,
|
|
||||||
projects_stats.last_update_at
|
projects_stats.last_update_at
|
||||||
FROM public.projects
|
FROM public.projects
|
||||||
LEFT JOIN public.projects_stats USING (project_id)
|
LEFT JOIN public.projects_stats USING (project_id)
|
||||||
WHERE projects.deleted_at IS NULL
|
WHERE projects.deleted_at IS NULL
|
||||||
ORDER BY project_id;"""
|
ORDER BY project_id;""")
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
rows = cur.fetchall()
|
rows = cur.fetchall()
|
||||||
for r in rows:
|
for r in rows:
|
||||||
if r["last_update_at"] is None:
|
if r["last_update_at"] is None:
|
||||||
continue
|
continue
|
||||||
|
|
||||||
params = {
|
params = {"project_id": r["project_id"],
|
||||||
"project_id": r["project_id"],
|
"end_ts": TimeUTC.now(),
|
||||||
"end_ts": TimeUTC.now(),
|
"sessions_count": 0,
|
||||||
"sessions_count": 0,
|
"events_count": 0}
|
||||||
"events_count": 0,
|
|
||||||
}
|
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||||
"""SELECT COUNT(1) AS sessions_count,
|
|
||||||
COALESCE(SUM(events_count),0) AS events_count
|
COALESCE(SUM(events_count),0) AS events_count
|
||||||
FROM public.sessions
|
FROM public.sessions
|
||||||
WHERE project_id=%(project_id)s
|
WHERE project_id=%(project_id)s
|
||||||
AND start_ts<=%(end_ts)s
|
AND start_ts<=%(end_ts)s
|
||||||
AND duration IS NOT NULL;""",
|
AND duration IS NOT NULL;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
row = cur.fetchone()
|
row = cur.fetchone()
|
||||||
if row is not None:
|
if row is not None:
|
||||||
params["sessions_count"] = row["sessions_count"]
|
params["sessions_count"] = row["sessions_count"]
|
||||||
params["events_count"] = row["events_count"]
|
params["events_count"] = row["events_count"]
|
||||||
|
|
||||||
query = cur.mogrify(
|
query = cur.mogrify("""UPDATE public.projects_stats
|
||||||
"""UPDATE public.projects_stats
|
|
||||||
SET sessions_count=%(sessions_count)s,
|
SET sessions_count=%(sessions_count)s,
|
||||||
events_count=%(events_count)s,
|
events_count=%(events_count)s,
|
||||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||||
WHERE project_id=%(project_id)s;""",
|
WHERE project_id=%(project_id)s;""",
|
||||||
params,
|
params)
|
||||||
)
|
|
||||||
cur.execute(query)
|
cur.execute(query)
|
||||||
|
|
||||||
|
|
||||||
def __check_database_ch(*_):
|
def __check_database_ch(*_):
|
||||||
fail_response = {
|
fail_response = {
|
||||||
"health": False,
|
"health": False,
|
||||||
"details": {"errors": ["server health-check failed"]},
|
"details": {"errors": ["server health-check failed"]}
|
||||||
}
|
}
|
||||||
with ch_client.ClickHouseClient() as ch:
|
with ch_client.ClickHouseClient() as ch:
|
||||||
try:
|
try:
|
||||||
|
|
@ -361,11 +348,9 @@ def __check_database_ch(*_):
|
||||||
logger.exception(e)
|
logger.exception(e)
|
||||||
return fail_response
|
return fail_response
|
||||||
|
|
||||||
schema_version = ch.execute(
|
schema_version = ch.execute("""SELECT 1
|
||||||
"""SELECT 1
|
|
||||||
FROM system.functions
|
FROM system.functions
|
||||||
WHERE name = 'openreplay_version';"""
|
WHERE name = 'openreplay_version';""")
|
||||||
)
|
|
||||||
if len(schema_version) > 0:
|
if len(schema_version) > 0:
|
||||||
schema_version = ch.execute("SELECT openreplay_version() AS version;")
|
schema_version = ch.execute("SELECT openreplay_version() AS version;")
|
||||||
schema_version = schema_version[0]["version"]
|
schema_version = schema_version[0]["version"]
|
||||||
|
|
@ -380,10 +365,9 @@ def __check_database_ch(*_):
|
||||||
# "version": server_version[0]["server_version"],
|
# "version": server_version[0]["server_version"],
|
||||||
# "schema": schema_version,
|
# "schema": schema_version,
|
||||||
# **errors
|
# **errors
|
||||||
},
|
}
|
||||||
}
|
}
|
||||||
|
|
||||||
|
|
||||||
# def __check_kafka(*_):
|
# def __check_kafka(*_):
|
||||||
# fail_response = {
|
# fail_response = {
|
||||||
# "health": False,
|
# "health": False,
|
||||||
|
|
|
||||||
|
|
@ -3,15 +3,12 @@ import logging
|
||||||
from decouple import config
|
from decouple import config
|
||||||
|
|
||||||
logger = logging.getLogger(__name__)
|
logger = logging.getLogger(__name__)
|
||||||
from . import sessions_pg
|
from . import sessions as sessions_legacy
|
||||||
from . import sessions_pg as sessions_legacy
|
|
||||||
from . import sessions_ch
|
|
||||||
from . import sessions_search as sessions_search_legacy
|
|
||||||
|
|
||||||
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
if config("EXP_SESSIONS_SEARCH", cast=bool, default=False):
|
||||||
logger.info(">>> Using experimental sessions search")
|
logger.info(">>> Using experimental sessions search")
|
||||||
from . import sessions_ch as sessions
|
from . import sessions_ch as sessions
|
||||||
from . import sessions_search_exp as sessions_search
|
from . import sessions_search_exp as sessions_search
|
||||||
else:
|
else:
|
||||||
from . import sessions_pg as sessions
|
from . import sessions
|
||||||
from . import sessions_search as sessions_search
|
from . import sessions_search_exp
|
||||||
|
|
|
||||||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue