Compare commits

..

2 commits

Author SHA1 Message Date
nick-delirium
a35d0d06f2
tracker: rm idlecb 2025-03-03 17:38:50 +01:00
nick-delirium
13ae208462
tracker: testing workerless model 2025-03-03 17:38:49 +01:00
2438 changed files with 44367 additions and 70207 deletions

View file

@ -1,122 +0,0 @@
# This action will push the assist changes to aws
on:
workflow_dispatch:
inputs:
skip_security_checks:
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
required: false
default: "false"
push:
branches:
- dev
paths:
- "ee/assist-server/**"
name: Build and Deploy Assist-Server EE
jobs:
deploy:
name: Deploy
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- uses: ./.github/composite-actions/update-keys
with:
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
assist_key: ${{ secrets.ASSIST_KEY }}
domain_name: ${{ secrets.EE_DOMAIN_NAME }}
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
jwt_secret: ${{ secrets.EE_JWT_SECRET }}
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
license_key: ${{ secrets.EE_LICENSE_KEY }}
minio_access_key: ${{ secrets.EE_MINIO_ACCESS_KEY }}
minio_secret_key: ${{ secrets.EE_MINIO_SECRET_KEY }}
pg_password: ${{ secrets.EE_PG_PASSWORD }}
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
name: Update Keys
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Building and Pushing Assist-Server image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
cd assist-server
PUSH_IMAGE=0 bash -x ./build.sh ee
[[ "x$skip_security_checks" == "xtrue" ]] || {
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
images=("assist-server")
for image in ${images[*]};do
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
done
err_code=$?
[[ $err_code -ne 0 ]] && {
exit $err_code
}
} && {
echo "Skipping Security Checks"
}
images=("assist-server")
for image in ${images[*]};do
docker push $DOCKER_REPO/$image:$IMAGE_TAG
done
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
pwd
cd scripts/helmcharts/
# Update changed image tag
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mkdir -p /tmp/charts
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
rm -rf openreplay/charts/*
mv /tmp/charts/* openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
ENVIRONMENT: staging

View file

@ -1,189 +0,0 @@
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
on:
workflow_dispatch:
inputs:
services:
description: 'Comma separated names of services to build(in small letters).'
required: true
default: 'chalice,frontend'
tag:
description: 'Tag to update.'
required: true
type: string
branch:
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
required: true
type: string
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
jobs:
deploy:
name: Build Patch from old tag
runs-on: ubuntu-latest
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps:
- name: Checkout
uses: actions/checkout@v2
with:
fetch-depth: 4
ref: ${{ github.event.inputs.tag }}
- name: Set Remote with GITHUB_TOKEN
run: |
git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Create backup tag with timestamp
run: |
set -e # Exit immediately if a command exits with a non-zero status
TIMESTAMP=$(date +%Y%m%d%H%M%S)
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
echo "Created backup tag: $BACKUP_TAG"
# Get the oldest commit date from the last 3 commits in raw format
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
# Add 1 second to the timestamp
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
- name: Setup yq
uses: mikefarah/yq@master
# Configure AWS credentials for the first registry
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
uses: aws-actions/configure-aws-credentials@v1
with:
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
id: login-ecr-arm
run: |
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1
- name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
- name: Build
id: build-image
env:
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas
run: |
set -exo pipefail
git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
git checkout -b $BRANCH_NAME
working_dir=$(pwd)
function image_version(){
local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
current_version=$(yq eval '.AppVersion' $chart_path)
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
echo $new_version
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
}
function clone_msaas() {
[ -d $MSAAS_REPO_FOLDER ] || {
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
cd $MSAAS_REPO_FOLDER
cd openreplay && git fetch origin && git checkout $INPUT_TAG
git log -1
cd $MSAAS_REPO_FOLDER
bash git-init.sh
git checkout
}
}
function build_managed() {
local service=$1
local version=$2
echo building managed
clone_msaas
if [[ $service == 'chalice' ]]; then
cd $MSAAS_REPO_FOLDER/openreplay/api
else
cd $MSAAS_REPO_FOLDER/openreplay/$service
fi
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
}
# Checking for backend images
ls backend/cmd >> /tmp/backend.txt
echo Services: "${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
BUILD_SCRIPT_NAME="build.sh"
# Build FOSS
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
ee_build_args="ee"
fi
version=$(image_version $SERVICE)
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
else
build_managed $SERVICE $version
fi
cd $working_dir
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
git add $chart_path
git commit -m "Increment $SERVICE chart version"
done
- name: Change commit timestamp
run: |
# Convert the timestamp to a date format git can understand
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
echo "Setting commit date to: $NEW_DATE"
# Amend the commit with the new date
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
# Verify the change
git log -1 --pretty=format:"Commit now dated: %cD"
# git tag and push
git tag $INPUT_TAG -f
git push origin $INPUT_TAG -f
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
# MSAAS_REPO_FOLDER: /tmp/msaas
# with:
# limit-access-to-actor: true

View file

@ -2,6 +2,7 @@
on: on:
workflow_dispatch: workflow_dispatch:
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
inputs: inputs:
services: services:
description: 'Comma separated names of services to build(in small letters).' description: 'Comma separated names of services to build(in small letters).'
@ -19,20 +20,12 @@ jobs:
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }} DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v4 uses: actions/checkout@v2
with: with:
fetch-depth: 0 fetch-depth: 1
token: ${{ secrets.GITHUB_TOKEN }}
- name: Rebase with main branch, to make sure the code has latest main changes - name: Rebase with main branch, to make sure the code has latest main changes
if: github.ref != 'refs/heads/main'
run: | run: |
git remote -v git pull --rebase origin main
git config --global user.email "action@github.com"
git config --global user.name "GitHub Action"
git config --global rebase.autoStash true
git fetch origin main:main
git rebase main
git log -3
- name: Downloading yq - name: Downloading yq
run: | run: |
@ -55,8 +48,6 @@ jobs:
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }} aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
- uses: depot/setup-action@v1 - uses: depot/setup-action@v1
env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
- name: Get HEAD Commit ID - name: Get HEAD Commit ID
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
- name: Define Branch Name - name: Define Branch Name
@ -74,168 +65,78 @@ jobs:
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }} MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }} MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
MSAAS_REPO_FOLDER: /tmp/msaas MSAAS_REPO_FOLDER: /tmp/msaas
SERVICES_INPUT: ${{ github.event.inputs.services }}
run: | run: |
#!/bin/bash set -exo pipefail
set -euo pipefail git config --local user.email "action@github.com"
git config --local user.name "GitHub Action"
# Configuration git checkout -b $BRANCH_NAME
readonly WORKING_DIR=$(pwd) working_dir=$(pwd)
readonly BUILD_SCRIPT_NAME="build.sh" function image_version(){
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt" local service=$1
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
# Initialize git configuration current_version=$(yq eval '.AppVersion' $chart_path)
setup_git() { new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
git config --local user.email "action@github.com" echo $new_version
git config --local user.name "GitHub Action" # yq eval ".AppVersion = \"$new_version\"" -i $chart_path
git checkout -b "$BRANCH_NAME"
} }
function clone_msaas() {
# Get and increment image version [ -d $MSAAS_REPO_FOLDER ] || {
image_version() { git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
local service=$1 cd $MSAAS_REPO_FOLDER
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml" cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
local current_version new_version git log -1
cd $MSAAS_REPO_FOLDER
current_version=$(yq eval '.AppVersion' "$chart_path") bash git-init.sh
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}') git checkout
echo "$new_version" }
} }
function build_managed() {
# Clone MSAAS repository if not exists local service=$1
clone_msaas() { local version=$2
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then echo building managed
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER" clone_msaas
cd "$MSAAS_REPO_FOLDER" if [[ $service == 'chalice' ]]; then
cd openreplay && git fetch origin && git checkout main cd $MSAAS_REPO_FOLDER/openreplay/api
git log -1 else
cd "$MSAAS_REPO_FOLDER" cd $MSAAS_REPO_FOLDER/openreplay/$service
bash git-init.sh fi
git checkout IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
fi
} }
# Checking for backend images
# Build managed services ls backend/cmd >> /tmp/backend.txt
build_managed() { echo Services: "${{ github.event.inputs.services }}"
local service=$1 IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
local version=$2 BUILD_SCRIPT_NAME="build.sh"
# Build FOSS
echo "Building managed service: $service" for SERVICE in "${SERVICES[@]}"; do
clone_msaas # Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
if [[ $service == 'chalice' ]]; then cd backend
cd "$MSAAS_REPO_FOLDER/openreplay/api" foss_build_args="nil $SERVICE"
else ee_build_args="ee $SERVICE"
cd "$MSAAS_REPO_FOLDER/openreplay/$service" else
fi [[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh" ee_build_args="ee"
fi
echo "Executing: $build_cmd" version=$(image_version $SERVICE)
if ! eval "$build_cmd" 2>&1; then echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
echo "Build failed for $service" IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
exit 1 echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
fi IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
} if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
# Build service with given arguments echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
build_service() { else
local service=$1 build_managed $SERVICE $version
local version=$2 fi
local build_args=$3 cd $working_dir
local build_script=${4:-$BUILD_SCRIPT_NAME} chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
yq eval ".AppVersion = \"$version\"" -i $chart_path
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args" git add $chart_path
echo "Executing: $command" git commit -m "Increment $SERVICE chart version"
eval "$command" git push --set-upstream origin $BRANCH_NAME
} done
# Update chart version and commit changes
update_chart_version() {
local service=$1
local version=$2
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
# Ensure we're in the original working directory/repository
cd "$WORKING_DIR"
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
git add "$chart_path"
git commit -m "Increment $service chart version to $version"
git push --set-upstream origin "$BRANCH_NAME"
cd -
}
# Main execution
main() {
setup_git
# Get backend services list
ls backend/cmd >"$BACKEND_SERVICES_FILE"
# Parse services input (fix for GitHub Actions syntax)
echo "Services: ${SERVICES_INPUT:-$1}"
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
# Process each service
for service in "${services[@]}"; do
echo "Processing service: $service"
cd "$WORKING_DIR"
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
# Determine build configuration based on service type
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
# Backend service
cd backend
foss_build_args="nil $service"
ee_build_args="ee $service"
else
# Non-backend service
case "$service" in
chalice | alerts | crons)
cd "$WORKING_DIR/api"
;;
*)
cd "$service"
;;
esac
# Special build scripts for alerts/crons
if [[ $service == 'alerts' || $service == 'crons' ]]; then
build_script="build_${service}.sh"
fi
ee_build_args="ee"
fi
# Get version and build
local version
version=$(image_version "$service")
# Build FOSS and EE versions
build_service "$service" "$version" "$foss_build_args"
build_service "$service" "${version}-ee" "$ee_build_args"
# Build managed version for specific services
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
echo "Nothing to build in managed for service $service"
else
build_managed "$service" "$version"
fi
# Update chart and commit
update_chart_version "$service" "$version"
done
cd "$WORKING_DIR"
# Cleanup
rm -f "$BACKEND_SERVICES_FILE"
}
echo "Working directory: $WORKING_DIR"
# Run main function with all arguments
main "$SERVICES_INPUT"
- name: Create Pull Request - name: Create Pull Request
uses: repo-sync/pull-request@v2 uses: repo-sync/pull-request@v2
@ -246,7 +147,8 @@ jobs:
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}" pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
pr_body: | pr_body: |
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID. This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
Once this PR is merged, tag update job will run automatically. Once this PR is merged, To update the latest tag, run the following workflow.
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
# - name: Debug Job # - name: Debug Job
# if: ${{ failure() }} # if: ${{ failure() }}

View file

@ -1,103 +0,0 @@
name: Release Deployment
on:
workflow_dispatch:
inputs:
services:
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
required: true
branch:
description: 'Branch to deploy (defaults to dev)'
required: false
default: 'dev'
env:
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
jobs:
deploy:
runs-on: ubuntu-latest
steps:
- name: Checkout code
uses: actions/checkout@v3
with:
ref: ${{ github.event.inputs.branch }}
- name: Docker login
run: |
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
- name: Set image tag with branch info
run: |
SHORT_SHA=$(git rev-parse --short HEAD)
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
echo "Using image tag: $IMAGE_TAG"
- uses: depot/setup-action@v1
- name: Build and push Docker images
run: |
# Parse the comma-separated services list into an array
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
working_dir=$(pwd)
# Define backend services (consider moving this to workflow inputs or repo config)
ls backend/cmd >> /tmp/backend.txt
BUILD_SCRIPT_NAME="build.sh"
for SERVICE in "${SERVICES[@]}"; do
# Check if service is backend
if grep -q $SERVICE /tmp/backend.txt; then
cd $working_dir/backend
foss_build_args="nil $SERVICE"
ee_build_args="ee $SERVICE"
else
cd $working_dir
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
ee_build_args="ee"
fi
{
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
}&
{
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
}&
done
wait
- uses: azure/k8s-set-context@v1
name: Using ee release cluster
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
- name: Deploy to ee release Kubernetes
run: |
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
for SERVICE in "${SERVICES[@]}"; do
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
done
- uses: azure/k8s-set-context@v1
name: Using foss release cluster
with:
method: kubeconfig
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
- name: Deploy to FOSS release Kubernetes
run: |
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
for SERVICE in "${SERVICES[@]}"; do
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
done

View file

@ -1,42 +1,35 @@
on: on:
pull_request: workflow_dispatch:
types: [closed] description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
branches: inputs:
- main services:
name: Release tag update --force description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
required: true
default: "false"
name: Force Push tag with main branch HEAD
jobs: jobs:
deploy: deploy:
name: Build Patch from main name: Build Patch from main
runs-on: ubuntu-latest runs-on: ubuntu-latest
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }} env:
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
steps: steps:
- name: Checkout - name: Checkout
uses: actions/checkout@v2 uses: actions/checkout@v2
- name: Get latest release tag using GitHub API
id: get-latest-tag
run: |
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
| jq -r .tag_name)
# Fallback to git command if API doesn't return a tag
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
echo "Not found latest tag"
exit 100
fi
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
echo "Latest tag: $LATEST_TAG"
- name: Set Remote with GITHUB_TOKEN - name: Set Remote with GITHUB_TOKEN
run: | run: |
git config --unset http.https://github.com/.extraheader git config --unset http.https://github.com/.extraheader
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }} git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
- name: Push main branch to tag - name: Push main branch to tag
run: | run: |
git fetch --tags
git checkout main git checkout main
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main" git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force # - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# with:
# limit-access-to-actor: true

View file

@ -1,17 +1,10 @@
FROM python:3.12-alpine AS builder
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
RUN apk add --no-cache build-base
WORKDIR /work
COPY requirements.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade uv && \
export UV_SYSTEM_PYTHON=true && \
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
uv pip install --no-cache-dir --upgrade -r requirements.txt
FROM python:3.12-alpine FROM python:3.12-alpine
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
ARG GIT_SHA ARG GIT_SHA
LABEL GIT_SHA=$GIT_SHA
RUN apk add --no-cache build-base tini
ARG envarg ARG envarg
# Add Tini # Add Tini
# Startup daemon # Startup daemon
@ -21,11 +14,19 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
PRIVATE_ENDPOINTS=false \ PRIVATE_ENDPOINTS=false \
ENTERPRISE_BUILD=${envarg} \ ENTERPRISE_BUILD=${envarg} \
GIT_SHA=$GIT_SHA GIT_SHA=$GIT_SHA
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
COPY --from=builder /usr/local/bin /usr/local/bin
WORKDIR /work WORKDIR /work
COPY requirements.txt ./requirements.txt
RUN pip install --no-cache-dir --upgrade uv
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
COPY . . COPY . .
RUN apk add --no-cache tini && mv env.default .env RUN mv env.default .env
RUN adduser -u 1001 openreplay -D
USER 1001
ENTRYPOINT ["/sbin/tini", "--"] ENTRYPOINT ["/sbin/tini", "--"]
CMD ["./entrypoint.sh"] CMD ./entrypoint.sh

View file

@ -4,8 +4,7 @@ from pydantic_core._pydantic_core import ValidationError
import schemas import schemas
from chalicelib.core.alerts import alerts, alerts_listener from chalicelib.core.alerts import alerts, alerts_listener
from chalicelib.core.alerts.modules import alert_helpers from chalicelib.core.alerts.modules import sessions, alert_helpers
from chalicelib.core.sessions import sessions_pg as sessions
from chalicelib.utils import pg_client from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
@ -132,7 +131,6 @@ def Build(a):
def process(): def process():
logger.info("> processing alerts on PG")
notifications = [] notifications = []
all_alerts = alerts_listener.get_all_alerts() all_alerts = alerts_listener.get_all_alerts()
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:

View file

@ -3,11 +3,10 @@ import logging
from pydantic_core._pydantic_core import ValidationError from pydantic_core._pydantic_core import ValidationError
import schemas import schemas
from chalicelib.core.alerts import alerts, alerts_listener
from chalicelib.core.alerts.modules import sessions, alert_helpers
from chalicelib.utils import pg_client, ch_client, exp_ch_helper from chalicelib.utils import pg_client, ch_client, exp_ch_helper
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.core.alerts import alerts, alerts_listener
from chalicelib.core.alerts.modules import alert_helpers
from chalicelib.core.sessions import sessions_ch as sessions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -156,7 +155,6 @@ def Build(a):
def process(): def process():
logger.info("> processing alerts on CH")
notifications = [] notifications = []
all_alerts = alerts_listener.get_all_alerts() all_alerts = alerts_listener.get_all_alerts()
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur: with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:

View file

@ -1,3 +1,9 @@
from decouple import config
TENANT_ID = "-1" TENANT_ID = "-1"
if config("EXP_ALERTS", cast=bool, default=False):
from chalicelib.core.sessions import sessions_ch as sessions
else:
from chalicelib.core.sessions import sessions
from . import helpers as alert_helpers from . import helpers as alert_helpers

View file

@ -37,7 +37,8 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
logger.debug("! JWT Expired signature") logger.debug("! JWT Expired signature")
return None return None
except BaseException as e: except BaseException as e:
logger.warning("! JWT Base Exception", exc_info=e) logger.warning("! JWT Base Exception")
logger.debug(e)
return None return None
return payload return payload
@ -55,7 +56,8 @@ def jwt_refresh_authorizer(scheme: str, token: str):
logger.debug("! JWT-refresh Expired signature") logger.debug("! JWT-refresh Expired signature")
return None return None
except BaseException as e: except BaseException as e:
logger.error("! JWT-refresh Base Exception", exc_info=e) logger.warning("! JWT-refresh Base Exception")
logger.debug(e)
return None return None
return payload return payload

View file

@ -85,8 +85,7 @@ def __generic_query(typename, value_length=None):
ORDER BY value""" ORDER BY value"""
if value_length is None or value_length > 2: if value_length is None or value_length > 2:
return f"""SELECT DISTINCT ON(value,type) value, type return f"""(SELECT DISTINCT value, type
((SELECT DISTINCT value, type
FROM {TABLE} FROM {TABLE}
WHERE WHERE
project_id = %(project_id)s project_id = %(project_id)s
@ -102,7 +101,7 @@ def __generic_query(typename, value_length=None):
AND type='{typename.upper()}' AND type='{typename.upper()}'
AND value ILIKE %(value)s AND value ILIKE %(value)s
ORDER BY value ORDER BY value
LIMIT 5)) AS raw;""" LIMIT 5);"""
return f"""SELECT DISTINCT value, type return f"""SELECT DISTINCT value, type
FROM {TABLE} FROM {TABLE}
WHERE WHERE
@ -125,7 +124,7 @@ def __generic_autocomplete(event: Event):
return f return f
def generic_autocomplete_metas(typename): def __generic_autocomplete_metas(typename):
def f(project_id, text): def f(project_id, text):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
params = {"project_id": project_id, "value": helper.string_to_sql_like(text), params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
@ -327,7 +326,7 @@ def __search_metadata(project_id, value, key=None, source=None):
AND {colname} ILIKE %(svalue)s LIMIT 5)""") AND {colname} ILIKE %(svalue)s LIMIT 5)""")
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
cur.execute(cur.mogrify(f"""\ cur.execute(cur.mogrify(f"""\
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE SELECT key, value, 'METADATA' AS TYPE
FROM({" UNION ALL ".join(sub_from)}) AS all_metas FROM({" UNION ALL ".join(sub_from)}) AS all_metas
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value), LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)})) "svalue": helper.string_to_sql_like("^" + value)}))

View file

@ -13,18 +13,15 @@ def get_state(tenant_id):
if len(pids) > 0: if len(pids) > 0:
cur.execute( cur.execute(
cur.mogrify( cur.mogrify("""SELECT EXISTS(( SELECT 1
"""SELECT EXISTS(( SELECT 1
FROM public.sessions AS s FROM public.sessions AS s
WHERE s.project_id IN %(ids)s)) AS exists;""", WHERE s.project_id IN %(ids)s)) AS exists;""",
{"ids": tuple(pids)}, {"ids": tuple(pids)})
)
) )
recorded = cur.fetchone()["exists"] recorded = cur.fetchone()["exists"]
meta = False meta = False
if recorded: if recorded:
query = cur.mogrify( query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
f"""SELECT EXISTS((SELECT 1
FROM public.projects AS p FROM public.projects AS p
LEFT JOIN LATERAL ( SELECT 1 LEFT JOIN LATERAL ( SELECT 1
FROM public.sessions FROM public.sessions
@ -39,35 +36,26 @@ def get_state(tenant_id):
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
OR p.metadata_10 IS NOT NULL ) OR p.metadata_10 IS NOT NULL )
)) AS exists;""", )) AS exists;""",
{"tenant_id": tenant_id}, {"tenant_id": tenant_id})
)
cur.execute(query) cur.execute(query)
meta = cur.fetchone()["exists"] meta = cur.fetchone()["exists"]
return [ return [
{ {"task": "Install OpenReplay",
"task": "Install OpenReplay", "done": recorded,
"done": recorded, "URL": "https://docs.openreplay.com/getting-started/quick-start"},
"URL": "https://docs.openreplay.com/getting-started/quick-start", {"task": "Identify Users",
}, "done": meta,
{ "URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
"task": "Identify Users", {"task": "Invite Team Members",
"done": meta, "done": len(users.get_members(tenant_id=tenant_id)) > 1,
"URL": "https://docs.openreplay.com/data-privacy-security/metadata", "URL": "https://app.openreplay.com/client/manage-users"},
}, {"task": "Integrations",
{ "done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
"task": "Invite Team Members", or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
"done": len(users.get_members(tenant_id=tenant_id)) > 1, or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://app.openreplay.com/client/manage-users", "URL": "https://docs.openreplay.com/integrations"}
},
{
"task": "Integrations",
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
or len(sentry.get_all(tenant_id=tenant_id)) > 0
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
"URL": "https://docs.openreplay.com/integrations",
},
] ]
@ -78,26 +66,21 @@ def get_state_installing(tenant_id):
if len(pids) > 0: if len(pids) > 0:
cur.execute( cur.execute(
cur.mogrify( cur.mogrify("""SELECT EXISTS(( SELECT 1
"""SELECT EXISTS(( SELECT 1
FROM public.sessions AS s FROM public.sessions AS s
WHERE s.project_id IN %(ids)s)) AS exists;""", WHERE s.project_id IN %(ids)s)) AS exists;""",
{"ids": tuple(pids)}, {"ids": tuple(pids)})
)
) )
recorded = cur.fetchone()["exists"] recorded = cur.fetchone()["exists"]
return { return {"task": "Install OpenReplay",
"task": "Install OpenReplay", "done": recorded,
"done": recorded, "URL": "https://docs.openreplay.com/getting-started/quick-start"}
"URL": "https://docs.openreplay.com/getting-started/quick-start",
}
def get_state_identify_users(tenant_id): def get_state_identify_users(tenant_id):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
query = cur.mogrify( query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
f"""SELECT EXISTS((SELECT 1
FROM public.projects AS p FROM public.projects AS p
LEFT JOIN LATERAL ( SELECT 1 LEFT JOIN LATERAL ( SELECT 1
FROM public.sessions FROM public.sessions
@ -112,32 +95,25 @@ def get_state_identify_users(tenant_id):
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
OR p.metadata_10 IS NOT NULL ) OR p.metadata_10 IS NOT NULL )
)) AS exists;""", )) AS exists;""",
{"tenant_id": tenant_id}, {"tenant_id": tenant_id})
)
cur.execute(query) cur.execute(query)
meta = cur.fetchone()["exists"] meta = cur.fetchone()["exists"]
return { return {"task": "Identify Users",
"task": "Identify Users", "done": meta,
"done": meta, "URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
}
def get_state_manage_users(tenant_id): def get_state_manage_users(tenant_id):
return { return {"task": "Invite Team Members",
"task": "Invite Team Members", "done": len(users.get_members(tenant_id=tenant_id)) > 1,
"done": len(users.get_members(tenant_id=tenant_id)) > 1, "URL": "https://app.openreplay.com/client/manage-users"}
"URL": "https://app.openreplay.com/client/manage-users",
}
def get_state_integrations(tenant_id): def get_state_integrations(tenant_id):
return { return {"task": "Integrations",
"task": "Integrations", "done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
or len(sentry.get_all(tenant_id=tenant_id)) > 0 or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0, "URL": "https://docs.openreplay.com/integrations"}
"URL": "https://docs.openreplay.com/integrations",
}

View file

@ -4,10 +4,10 @@ from decouple import config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
from . import errors_pg as errors_legacy from . import errors as errors_legacy
if config("EXP_ERRORS_SEARCH", cast=bool, default=False): if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
logger.info(">>> Using experimental error search") logger.info(">>> Using experimental error search")
from . import errors_ch as errors from . import errors_ch as errors
else: else:
from . import errors_pg as errors from . import errors

View file

@ -1,8 +1,7 @@
import json import json
from typing import List from typing import Optional, List
import schemas import schemas
from chalicelib.core.errors.modules import errors_helper
from chalicelib.core.sessions import sessions_search from chalicelib.core.sessions import sessions_search
from chalicelib.core.sourcemaps import sourcemaps from chalicelib.core.sourcemaps import sourcemaps
from chalicelib.utils import pg_client, helper from chalicelib.utils import pg_client, helper
@ -52,6 +51,27 @@ def get_batch(error_ids):
return helper.list_to_camel_case(errors) return helper.list_to_camel_case(errors)
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
chart: bool = False, step_size_name: str = "step_size",
project_key: Optional[str] = "project_id"):
if project_key is None:
ch_sub_query = []
else:
ch_sub_query = [f"{project_key} =%(project_id)s"]
if time_constraint:
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
f"timestamp < %({endTime_arg_name})s"]
if chart:
ch_sub_query += [f"timestamp >= generated_timestamp",
f"timestamp < generated_timestamp + %({step_size_name})s"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_sort_key(key): def __get_sort_key(key):
return { return {
schemas.ErrorSort.OCCURRENCE: "max_datetime", schemas.ErrorSort.OCCURRENCE: "max_datetime",
@ -70,13 +90,12 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
for f in data.filters: for f in data.filters:
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0: if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
platform = f.value[0] platform = f.value[0]
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id") pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'", pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"] "pe.project_id=%(project_id)s"]
# To ignore Script error # To ignore Script error
pg_sub_query.append("pe.message!='Script error.'") pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True, pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
project_key=None)
if platform: if platform:
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"] pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
pg_sub_query_chart.append("errors.error_id =details.error_id") pg_sub_query_chart.append("errors.error_id =details.error_id")

View file

@ -1,11 +1,10 @@
import schemas import schemas
from chalicelib.core import metadata from chalicelib.core import metadata
from chalicelib.core.errors import errors_legacy
from chalicelib.core.errors.modules import errors_helper
from chalicelib.core.errors.modules import sessions from chalicelib.core.errors.modules import sessions
from chalicelib.utils import ch_client, exp_ch_helper from chalicelib.utils import ch_client, exp_ch_helper
from chalicelib.utils import helper, metrics_helper from chalicelib.utils import helper, metrics_helper
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from . import errors as errors_legacy
def _multiple_values(values, value_key="value"): def _multiple_values(values, value_key="value"):
@ -62,6 +61,25 @@ def get_batch(error_ids):
return errors_legacy.get_batch(error_ids=error_ids) return errors_legacy.get_batch(error_ids=error_ids)
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", type_condition=True, project_key="project_id", table_name=None):
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
if table_name is not None:
table_name = table_name + "."
else:
table_name = ""
if type_condition:
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
if time_constraint:
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate", def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", type_condition=True, project_key="project_id", endTime_arg_name="endDate", type_condition=True, project_key="project_id",
table_name=None): table_name=None):
@ -98,7 +116,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
for f in data.filters: for f in data.filters:
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0: if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
platform = f.value[0] platform = f.value[0]
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False) ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
# ignore platform for errors table # ignore platform for errors table
ch_sub_query = __get_basic_constraints_events(None, type_condition=True) ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'") ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
@ -130,8 +148,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
if len(data.events) > errors_condition_count: if len(data.events) > errors_condition_count:
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status, subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
errors_only=True, errors_only=True,
project_id=project.project_id, project_id=project.project_id, user_id=user_id,
user_id=user_id,
issue=None, issue=None,
favorite_only=False) favorite_only=False)
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)" subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
@ -338,14 +355,14 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
SELECT details.error_id as error_id, SELECT details.error_id as error_id,
name, message, users, total, name, message, users, total,
sessions, last_occurrence, first_occurrence, chart sessions, last_occurrence, first_occurrence, chart
FROM (SELECT error_id, FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
JSONExtractString(toString(`$properties`), 'name') AS name, JSONExtractString(toString(`$properties`), 'name') AS name,
JSONExtractString(toString(`$properties`), 'message') AS message, JSONExtractString(toString(`$properties`), 'message') AS message,
COUNT(DISTINCT user_id) AS users, COUNT(DISTINCT user_id) AS users,
COUNT(DISTINCT events.session_id) AS sessions, COUNT(DISTINCT events.session_id) AS sessions,
MAX(created_at) AS max_datetime, MAX(created_at) AS max_datetime,
MIN(created_at) AS min_datetime, MIN(created_at) AS min_datetime,
COUNT(DISTINCT error_id) COUNT(DISTINCT JSONExtractString(toString(`$properties`), 'error_id'))
OVER() AS total OVER() AS total
FROM {MAIN_EVENTS_TABLE} AS events FROM {MAIN_EVENTS_TABLE} AS events
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
@ -357,7 +374,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
GROUP BY error_id, name, message GROUP BY error_id, name, message
ORDER BY {sort} {order} ORDER BY {sort} {order}
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
INNER JOIN (SELECT error_id, INNER JOIN (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence, toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
FROM {MAIN_EVENTS_TABLE} FROM {MAIN_EVENTS_TABLE}
@ -366,7 +383,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
GROUP BY error_id) AS time_details GROUP BY error_id) AS time_details
ON details.error_id=time_details.error_id ON details.error_id=time_details.error_id
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
FROM (SELECT error_id, FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
gs.generate_series AS timestamp, gs.generate_series AS timestamp,
COUNT(DISTINCT session_id) AS count COUNT(DISTINCT session_id) AS count
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs

View file

@ -1,5 +1,5 @@
from chalicelib.core.errors.modules import errors_helper from chalicelib.core.errors import errors_legacy as errors
from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils.metrics_helper import get_step_size from chalicelib.utils.metrics_helper import get_step_size
@ -40,29 +40,26 @@ def __process_tags(row):
def get_details(project_id, error_id, user_id, **data): def get_details(project_id, error_id, user_id, **data):
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True, pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
step_size_name="step_size24")
pg_sub_query24.append("error_id = %(error_id)s") pg_sub_query24.append("error_id = %(error_id)s")
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False, pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30", startTime_arg_name="startDate30",
endTime_arg_name="endDate30", endTime_arg_name="endDate30",
project_key="sessions.project_id") project_key="sessions.project_id")
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s") pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s") pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_session.append("error_id = %(error_id)s") pg_sub_query30_session.append("error_id = %(error_id)s")
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False, pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
startTime_arg_name="startDate30", startTime_arg_name="startDate30",
endTime_arg_name="endDate30", endTime_arg_name="endDate30", project_key="errors.project_id")
project_key="errors.project_id")
pg_sub_query30_err.append("sessions.project_id = %(project_id)s") pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s") pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s") pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
pg_sub_query30_err.append("error_id = %(error_id)s") pg_sub_query30_err.append("error_id = %(error_id)s")
pg_sub_query30_err.append("source ='js_exception'") pg_sub_query30_err.append("source ='js_exception'")
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True, pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
step_size_name="step_size30")
pg_sub_query30.append("error_id = %(error_id)s") pg_sub_query30.append("error_id = %(error_id)s")
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False) pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
pg_basic_query.append("error_id = %(error_id)s") pg_basic_query.append("error_id = %(error_id)s")
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
data["startDate24"] = TimeUTC.now(-1) data["startDate24"] = TimeUTC.now(-1)
@ -98,7 +95,8 @@ def get_details(project_id, error_id, user_id, **data):
device_partition, device_partition,
country_partition, country_partition,
chart24, chart24,
chart30 chart30,
custom_tags
FROM (SELECT error_id, FROM (SELECT error_id,
name, name,
message, message,
@ -113,8 +111,15 @@ def get_details(project_id, error_id, user_id, **data):
MIN(timestamp) AS first_occurrence MIN(timestamp) AS first_occurrence
FROM events.errors FROM events.errors
WHERE error_id = %(error_id)s) AS time_details ON (TRUE) WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
INNER JOIN (SELECT session_id AS last_session_id INNER JOIN (SELECT session_id AS last_session_id,
coalesce(custom_tags, '[]')::jsonb AS custom_tags
FROM events.errors FROM events.errors
LEFT JOIN LATERAL (
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
FROM errors_tags
WHERE errors_tags.error_id = %(error_id)s
AND errors_tags.session_id = errors.session_id
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
WHERE error_id = %(error_id)s WHERE error_id = %(error_id)s
ORDER BY errors.timestamp DESC ORDER BY errors.timestamp DESC
LIMIT 1) AS last_session_details ON (TRUE) LIMIT 1) AS last_session_details ON (TRUE)

View file

@ -3,9 +3,8 @@ import logging
from decouple import config from decouple import config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
from . import helper as errors_helper
if config("EXP_ERRORS_SEARCH", cast=bool, default=False): if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
import chalicelib.core.sessions.sessions_ch as sessions from chalicelib.core.sessions import sessions_ch as sessions
else: else:
import chalicelib.core.sessions.sessions_pg as sessions from chalicelib.core.sessions import sessions

View file

@ -1,58 +0,0 @@
from typing import Optional
import schemas
from chalicelib.core.sourcemaps import sourcemaps
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
chart: bool = False, step_size_name: str = "step_size",
project_key: Optional[str] = "project_id"):
if project_key is None:
ch_sub_query = []
else:
ch_sub_query = [f"{project_key} =%(project_id)s"]
if time_constraint:
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
f"timestamp < %({endTime_arg_name})s"]
if chart:
ch_sub_query += [f"timestamp >= generated_timestamp",
f"timestamp < generated_timestamp + %({step_size_name})s"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
table_name=None):
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
if table_name is not None:
table_name = table_name + "."
else:
table_name = ""
if type_condition:
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
if time_constraint:
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
if platform == schemas.PlatformType.MOBILE:
ch_sub_query.append("user_device_type = 'mobile'")
elif platform == schemas.PlatformType.DESKTOP:
ch_sub_query.append("user_device_type = 'desktop'")
return ch_sub_query
def format_first_stack_frame(error):
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
for s in error["stack"]:
for c in s.get("context", []):
for sci, sc in enumerate(c):
if isinstance(sc, str) and len(sc) > 1000:
c[sci] = sc[:1000]
# convert bytes to string:
if isinstance(s["filename"], bytes):
s["filename"] = s["filename"].decode("utf-8")
return error

View file

@ -1,9 +1,8 @@
from functools import cache
from typing import Optional from typing import Optional
import schemas import schemas
from chalicelib.core import issues
from chalicelib.core.autocomplete import autocomplete from chalicelib.core.autocomplete import autocomplete
from chalicelib.core import issues
from chalicelib.core.sessions import sessions_metas from chalicelib.core.sessions import sessions_metas
from chalicelib.utils import pg_client, helper from chalicelib.utils import pg_client, helper
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
@ -138,57 +137,52 @@ class EventType:
column=None) # column=None because errors are searched by name or message column=None) # column=None because errors are searched by name or message
@cache SUPPORTED_TYPES = {
def supported_types(): EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
return { query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK), EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)), query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT), EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)), query=autocomplete.__generic_query(
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION), typename=EventType.LOCATION.ui_type)),
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
query=autocomplete.__generic_query(
typename=EventType.REQUEST.ui_type)),
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
query=autocomplete.__generic_query(
typename=EventType.GRAPHQL.ui_type)),
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
query=autocomplete.__generic_query(
typename=EventType.STATEACTION.ui_type)),
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
query=None),
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
query=None),
# MOBILE
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
query=autocomplete.__generic_query( query=autocomplete.__generic_query(
typename=EventType.LOCATION.ui_type)), typename=EventType.CLICK_MOBILE.ui_type)),
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM), EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
query=autocomplete.__generic_query( query=autocomplete.__generic_query(
typename=EventType.CUSTOM.ui_type)), typename=EventType.SWIPE_MOBILE.ui_type)),
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST), EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.INPUT_MOBILE.ui_type)),
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
query=autocomplete.__generic_query( query=autocomplete.__generic_query(
typename=EventType.REQUEST.ui_type)), typename=EventType.VIEW_MOBILE.ui_type)),
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL), EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
query=autocomplete.__generic_query( query=autocomplete.__generic_query(
typename=EventType.GRAPHQL.ui_type)), typename=EventType.CUSTOM_MOBILE.ui_type)),
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION), EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
query=autocomplete.__generic_query( query=autocomplete.__generic_query(
typename=EventType.STATEACTION.ui_type)), typename=EventType.REQUEST_MOBILE.ui_type)),
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None), EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
query=None),
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
query=None), query=None),
# MOBILE }
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.CLICK_MOBILE.ui_type)),
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.SWIPE_MOBILE.ui_type)),
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.INPUT_MOBILE.ui_type)),
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.VIEW_MOBILE.ui_type)),
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.CUSTOM_MOBILE.ui_type)),
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
query=autocomplete.__generic_query(
typename=EventType.REQUEST_MOBILE.ui_type)),
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
query=None),
}
def get_errors_by_session_id(session_id, project_id): def get_errors_by_session_id(session_id, project_id):
@ -208,17 +202,17 @@ def search(text, event_type, project_id, source, key):
if not event_type: if not event_type:
return {"data": autocomplete.__get_autocomplete_table(text, project_id)} return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
if event_type in supported_types().keys(): if event_type in SUPPORTED_TYPES.keys():
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source) rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
elif event_type + "_MOBILE" in supported_types().keys(): elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source) rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
elif event_type in sessions_metas.supported_types().keys(): elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id) return sessions_metas.search(text, event_type, project_id)
elif event_type.endswith("_IOS") \ elif event_type.endswith("_IOS") \
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys(): and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id) return sessions_metas.search(text, event_type, project_id)
elif event_type.endswith("_MOBILE") \ elif event_type.endswith("_MOBILE") \
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys(): and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
return sessions_metas.search(text, event_type, project_id) return sessions_metas.search(text, event_type, project_id)
else: else:
return {"errors": ["unsupported event"]} return {"errors": ["unsupported event"]}

View file

@ -27,6 +27,7 @@ HEALTH_ENDPOINTS = {
"http": app_connection_string("http-openreplay", 8888, "metrics"), "http": app_connection_string("http-openreplay", 8888, "metrics"),
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"), "ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"), "integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
"peers": app_connection_string("peers-openreplay", 8888, "health"),
"sink": app_connection_string("sink-openreplay", 8888, "metrics"), "sink": app_connection_string("sink-openreplay", 8888, "metrics"),
"sourcemapreader": app_connection_string( "sourcemapreader": app_connection_string(
"sourcemapreader-openreplay", 8888, "health" "sourcemapreader-openreplay", 8888, "health"
@ -38,7 +39,9 @@ HEALTH_ENDPOINTS = {
def __check_database_pg(*_): def __check_database_pg(*_):
fail_response = { fail_response = {
"health": False, "health": False,
"details": {"errors": ["Postgres health-check failed"]}, "details": {
"errors": ["Postgres health-check failed"]
}
} }
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
try: try:
@ -60,26 +63,29 @@ def __check_database_pg(*_):
"details": { "details": {
# "version": server_version["server_version"], # "version": server_version["server_version"],
# "schema": schema_version["version"] # "schema": schema_version["version"]
}, }
} }
def __always_healthy(*_): def __always_healthy(*_):
return {"health": True, "details": {}} return {
"health": True,
"details": {}
}
def __check_be_service(service_name): def __check_be_service(service_name):
def fn(*_): def fn(*_):
fail_response = { fail_response = {
"health": False, "health": False,
"details": {"errors": ["server health-check failed"]}, "details": {
"errors": ["server health-check failed"]
}
} }
try: try:
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2) results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
if results.status_code != 200: if results.status_code != 200:
logger.error( logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
f"!! issue with the {service_name}-health code:{results.status_code}"
)
logger.error(results.text) logger.error(results.text)
# fail_response["details"]["errors"].append(results.text) # fail_response["details"]["errors"].append(results.text)
return fail_response return fail_response
@ -97,7 +103,10 @@ def __check_be_service(service_name):
logger.error("couldn't get response") logger.error("couldn't get response")
# fail_response["details"]["errors"].append(str(e)) # fail_response["details"]["errors"].append(str(e))
return fail_response return fail_response
return {"health": True, "details": {}} return {
"health": True,
"details": {}
}
return fn return fn
@ -105,7 +114,7 @@ def __check_be_service(service_name):
def __check_redis(*_): def __check_redis(*_):
fail_response = { fail_response = {
"health": False, "health": False,
"details": {"errors": ["server health-check failed"]}, "details": {"errors": ["server health-check failed"]}
} }
if config("REDIS_STRING", default=None) is None: if config("REDIS_STRING", default=None) is None:
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars") # fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
@ -124,14 +133,16 @@ def __check_redis(*_):
"health": True, "health": True,
"details": { "details": {
# "version": r.execute_command('INFO')['redis_version'] # "version": r.execute_command('INFO')['redis_version']
}, }
} }
def __check_SSL(*_): def __check_SSL(*_):
fail_response = { fail_response = {
"health": False, "health": False,
"details": {"errors": ["SSL Certificate health-check failed"]}, "details": {
"errors": ["SSL Certificate health-check failed"]
}
} }
try: try:
requests.get(config("SITE_URL"), verify=True, allow_redirects=True) requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
@ -139,28 +150,36 @@ def __check_SSL(*_):
logger.error("!! health failed: SSL Certificate") logger.error("!! health failed: SSL Certificate")
logger.exception(e) logger.exception(e)
return fail_response return fail_response
return {"health": True, "details": {}} return {
"health": True,
"details": {}
}
def __get_sessions_stats(*_): def __get_sessions_stats(*_):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
constraints = ["projects.deleted_at IS NULL"] constraints = ["projects.deleted_at IS NULL"]
query = cur.mogrify( query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
COALESCE(SUM(events_count),0) AS e_c COALESCE(SUM(events_count),0) AS e_c
FROM public.projects_stats FROM public.projects_stats
INNER JOIN public.projects USING(project_id) INNER JOIN public.projects USING(project_id)
WHERE {" AND ".join(constraints)};""" WHERE {" AND ".join(constraints)};""")
)
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]} return {
"numberOfSessionsCaptured": row["s_c"],
"numberOfEventCaptured": row["e_c"]
}
def get_health(tenant_id=None): def get_health(tenant_id=None):
health_map = { health_map = {
"databases": {"postgres": __check_database_pg}, "databases": {
"ingestionPipeline": {"redis": __check_redis}, "postgres": __check_database_pg
},
"ingestionPipeline": {
"redis": __check_redis
},
"backendServices": { "backendServices": {
"alerts": __check_be_service("alerts"), "alerts": __check_be_service("alerts"),
"assets": __check_be_service("assets"), "assets": __check_be_service("assets"),
@ -173,12 +192,13 @@ def get_health(tenant_id=None):
"http": __check_be_service("http"), "http": __check_be_service("http"),
"ingress-nginx": __always_healthy, "ingress-nginx": __always_healthy,
"integrations": __check_be_service("integrations"), "integrations": __check_be_service("integrations"),
"peers": __check_be_service("peers"),
"sink": __check_be_service("sink"), "sink": __check_be_service("sink"),
"sourcemapreader": __check_be_service("sourcemapreader"), "sourcemapreader": __check_be_service("sourcemapreader"),
"storage": __check_be_service("storage"), "storage": __check_be_service("storage")
}, },
"details": __get_sessions_stats, "details": __get_sessions_stats,
"ssl": __check_SSL, "ssl": __check_SSL
} }
return __process_health(health_map=health_map) return __process_health(health_map=health_map)
@ -190,16 +210,10 @@ def __process_health(health_map):
response.pop(parent_key) response.pop(parent_key)
elif isinstance(health_map[parent_key], dict): elif isinstance(health_map[parent_key], dict):
for element_key in health_map[parent_key]: for element_key in health_map[parent_key]:
if config( if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
cast=bool,
default=False,
):
response[parent_key].pop(element_key) response[parent_key].pop(element_key)
else: else:
response[parent_key][element_key] = health_map[parent_key][ response[parent_key][element_key] = health_map[parent_key][element_key]()
element_key
]()
else: else:
response[parent_key] = health_map[parent_key]() response[parent_key] = health_map[parent_key]()
return response return response
@ -207,8 +221,7 @@ def __process_health(health_map):
def cron(): def cron():
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
query = cur.mogrify( query = cur.mogrify("""SELECT projects.project_id,
"""SELECT projects.project_id,
projects.created_at, projects.created_at,
projects.sessions_last_check_at, projects.sessions_last_check_at,
projects.first_recorded_session_at, projects.first_recorded_session_at,
@ -216,8 +229,7 @@ def cron():
FROM public.projects FROM public.projects
LEFT JOIN public.projects_stats USING (project_id) LEFT JOIN public.projects_stats USING (project_id)
WHERE projects.deleted_at IS NULL WHERE projects.deleted_at IS NULL
ORDER BY project_id;""" ORDER BY project_id;""")
)
cur.execute(query) cur.execute(query)
rows = cur.fetchall() rows = cur.fetchall()
for r in rows: for r in rows:
@ -238,24 +250,20 @@ def cron():
count_start_from = r["last_update_at"] count_start_from = r["last_update_at"]
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from) count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
params = { params = {"project_id": r["project_id"],
"project_id": r["project_id"], "start_ts": count_start_from,
"start_ts": count_start_from, "end_ts": TimeUTC.now(),
"end_ts": TimeUTC.now(), "sessions_count": 0,
"sessions_count": 0, "events_count": 0}
"events_count": 0,
}
query = cur.mogrify( query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
"""SELECT COUNT(1) AS sessions_count,
COALESCE(SUM(events_count),0) AS events_count COALESCE(SUM(events_count),0) AS events_count
FROM public.sessions FROM public.sessions
WHERE project_id=%(project_id)s WHERE project_id=%(project_id)s
AND start_ts>=%(start_ts)s AND start_ts>=%(start_ts)s
AND start_ts<=%(end_ts)s AND start_ts<=%(end_ts)s
AND duration IS NOT NULL;""", AND duration IS NOT NULL;""",
params, params)
)
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
if row is not None: if row is not None:
@ -263,68 +271,56 @@ def cron():
params["events_count"] = row["events_count"] params["events_count"] = row["events_count"]
if insert: if insert:
query = cur.mogrify( query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""", VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
params, params)
)
else: else:
query = cur.mogrify( query = cur.mogrify("""UPDATE public.projects_stats
"""UPDATE public.projects_stats
SET sessions_count=sessions_count+%(sessions_count)s, SET sessions_count=sessions_count+%(sessions_count)s,
events_count=events_count+%(events_count)s, events_count=events_count+%(events_count)s,
last_update_at=(now() AT TIME ZONE 'utc'::text) last_update_at=(now() AT TIME ZONE 'utc'::text)
WHERE project_id=%(project_id)s;""", WHERE project_id=%(project_id)s;""",
params, params)
)
cur.execute(query) cur.execute(query)
# this cron is used to correct the sessions&events count every week # this cron is used to correct the sessions&events count every week
def weekly_cron(): def weekly_cron():
with pg_client.PostgresClient(long_query=True) as cur: with pg_client.PostgresClient(long_query=True) as cur:
query = cur.mogrify( query = cur.mogrify("""SELECT project_id,
"""SELECT project_id,
projects_stats.last_update_at projects_stats.last_update_at
FROM public.projects FROM public.projects
LEFT JOIN public.projects_stats USING (project_id) LEFT JOIN public.projects_stats USING (project_id)
WHERE projects.deleted_at IS NULL WHERE projects.deleted_at IS NULL
ORDER BY project_id;""" ORDER BY project_id;""")
)
cur.execute(query) cur.execute(query)
rows = cur.fetchall() rows = cur.fetchall()
for r in rows: for r in rows:
if r["last_update_at"] is None: if r["last_update_at"] is None:
continue continue
params = { params = {"project_id": r["project_id"],
"project_id": r["project_id"], "end_ts": TimeUTC.now(),
"end_ts": TimeUTC.now(), "sessions_count": 0,
"sessions_count": 0, "events_count": 0}
"events_count": 0,
}
query = cur.mogrify( query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
"""SELECT COUNT(1) AS sessions_count,
COALESCE(SUM(events_count),0) AS events_count COALESCE(SUM(events_count),0) AS events_count
FROM public.sessions FROM public.sessions
WHERE project_id=%(project_id)s WHERE project_id=%(project_id)s
AND start_ts<=%(end_ts)s AND start_ts<=%(end_ts)s
AND duration IS NOT NULL;""", AND duration IS NOT NULL;""",
params, params)
)
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
if row is not None: if row is not None:
params["sessions_count"] = row["sessions_count"] params["sessions_count"] = row["sessions_count"]
params["events_count"] = row["events_count"] params["events_count"] = row["events_count"]
query = cur.mogrify( query = cur.mogrify("""UPDATE public.projects_stats
"""UPDATE public.projects_stats
SET sessions_count=%(sessions_count)s, SET sessions_count=%(sessions_count)s,
events_count=%(events_count)s, events_count=%(events_count)s,
last_update_at=(now() AT TIME ZONE 'utc'::text) last_update_at=(now() AT TIME ZONE 'utc'::text)
WHERE project_id=%(project_id)s;""", WHERE project_id=%(project_id)s;""",
params, params)
)
cur.execute(query) cur.execute(query)

View file

@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
cur.execute( cur.execute(
cur.mogrify( cur.mogrify(
"""SELECT username, token, url """SELECT username, token, url
FROM public.jira_cloud FROM public.jira_cloud
WHERE user_id = %(user_id)s;""", WHERE user_id=%(user_id)s;""",
{"user_id": self._user_id}) {"user_id": self._user_id})
) )
data = helper.dict_to_camel_case(cur.fetchone()) data = helper.dict_to_camel_case(cur.fetchone())
@ -95,9 +95,10 @@ class JIRAIntegration(base.BaseIntegration):
def add(self, username, token, url, obfuscate=False): def add(self, username, token, url, obfuscate=False):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
cur.execute( cur.execute(
cur.mogrify(""" \ cur.mogrify("""\
INSERT INTO public.jira_cloud(username, token, user_id, url) INSERT INTO public.jira_cloud(username, token, user_id,url)
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""", VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
RETURNING username, token, url;""",
{"user_id": self._user_id, "username": username, {"user_id": self._user_id, "username": username,
"token": token, "url": url}) "token": token, "url": url})
) )
@ -111,10 +112,9 @@ class JIRAIntegration(base.BaseIntegration):
def delete(self): def delete(self):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
cur.execute( cur.execute(
cur.mogrify(""" \ cur.mogrify("""\
DELETE DELETE FROM public.jira_cloud
FROM public.jira_cloud WHERE user_id=%(user_id)s;""",
WHERE user_id = %(user_id)s;""",
{"user_id": self._user_id}) {"user_id": self._user_id})
) )
return {"state": "success"} return {"state": "success"}
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
changes={ changes={
"username": data.username, "username": data.username,
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \ "token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
else self.integration["token"], else self.integration.token,
"url": str(data.url) "url": str(data.url)
}, },
obfuscate=True obfuscate=True

View file

@ -6,5 +6,8 @@ logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False): if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental metrics") logger.info(">>> Using experimental metrics")
from chalicelib.core.metrics import heatmaps_ch as heatmaps
from chalicelib.core.metrics import product_analytics_ch as product_analytics
else: else:
pass from chalicelib.core.metrics import heatmaps
from chalicelib.core.metrics import product_analytics

View file

@ -352,100 +352,6 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id) return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
params = {
"project_id": project_id,
"user_id": user_id,
"offset": (data.page - 1) * data.limit,
"limit": data.limit,
}
if data.mine_only:
constraints.append("user_id = %(user_id)s")
else:
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
if data.shared_only:
constraints.append("is_public")
if data.filter is not None:
if data.filter.type:
constraints.append("metrics.metric_type = %(filter_type)s")
params["filter_type"] = data.filter.type
if data.filter.query and len(data.filter.query) > 0:
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
params["filter_query"] = helper.values_for_operator(
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
)
with pg_client.PostgresClient() as cur:
sub_join = ""
if include_series:
sub_join = """LEFT JOIN LATERAL (
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
FROM metric_series
WHERE metric_series.metric_id = metrics.metric_id
AND metric_series.deleted_at ISNULL
) AS metric_series ON (TRUE)"""
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
else "created_at"
# change ascend to asc and descend to desc
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
if sort_order == "ascend":
sort_order = "asc"
elif sort_order == "descend":
sort_order = "desc"
query = cur.mogrify(
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
dashboards, owner_email, owner_name, default_config AS config, thumbnail
FROM metrics
{sub_join}
LEFT JOIN LATERAL (
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
FROM (
SELECT DISTINCT dashboard_id, name, is_public
FROM dashboards
INNER JOIN dashboard_widgets USING (dashboard_id)
WHERE deleted_at ISNULL
AND dashboard_widgets.metric_id = metrics.metric_id
AND project_id = %(project_id)s
AND ((dashboards.user_id = %(user_id)s OR is_public))
) AS connected_dashboards
) AS connected_dashboards ON (TRUE)
LEFT JOIN LATERAL (
SELECT email AS owner_email, name AS owner_name
FROM users
WHERE deleted_at ISNULL
AND users.user_id = metrics.user_id
) AS owner ON (TRUE)
WHERE {" AND ".join(constraints)}
ORDER BY {sort_column} {sort_order}
LIMIT %(limit)s OFFSET %(offset)s;""",
params
)
cur.execute(query)
rows = cur.fetchall()
if len(rows) > 0:
total = rows[0]["total"]
if include_series:
for r in rows:
r.pop("total")
for s in r.get("series", []):
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
else:
for r in rows:
r.pop("total")
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
rows = helper.list_to_camel_case(rows)
else:
total = 0
return {"total": total, "list": rows}
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False): def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
constraints = ["metrics.project_id = %(project_id)s", constraints = ["metrics.project_id = %(project_id)s",
"metrics.deleted_at ISNULL"] "metrics.deleted_at ISNULL"]

View file

@ -1,11 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental heatmaps")
from .heatmaps_ch import *
else:
from .heatmaps import *

View file

@ -5,8 +5,8 @@ from decouple import config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False): if config("EXP_METRICS", cast=bool, default=False):
import chalicelib.core.sessions.sessions_ch as sessions from chalicelib.core.sessions import sessions_ch as sessions
else: else:
import chalicelib.core.sessions.sessions_pg as sessions from chalicelib.core.sessions import sessions
from chalicelib.core.sessions import sessions_mobs from chalicelib.core.sessions import sessions_mobs

View file

@ -1,10 +0,0 @@
import logging
from decouple import config
logger = logging.getLogger(__name__)
if config("EXP_METRICS", cast=bool, default=False):
logger.info(">>> Using experimental product-analytics")
from .product_analytics_ch import *
else:
from .product_analytics import *

View file

@ -3,7 +3,7 @@ from time import time
import schemas import schemas
from chalicelib.core import metadata from chalicelib.core import metadata
from .product_analytics import __transform_journey from chalicelib.core.metrics.product_analytics import __transform_journey
from chalicelib.utils import ch_client, exp_ch_helper from chalicelib.utils import ch_client, exp_ch_helper
from chalicelib.utils import helper from chalicelib.utils import helper
from chalicelib.utils import sql_helper as sh from chalicelib.utils import sql_helper as sh
@ -85,9 +85,10 @@ def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22) # compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
# if start-point is selected, the selected event is ranked n°1 # if start-point is selected, the selected event is ranked n°1
def path_analysis(project_id: int, data: schemas.CardPathAnalysis): def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
if not data.hide_excess: # # This code is used for testing only
data.hide_excess = True # return __get_test_data()
data.rows = 50
# ------ end of testing code ---
sub_events = [] sub_events = []
start_points_conditions = [] start_points_conditions = []
step_0_conditions = [] step_0_conditions = []

View file

@ -3,11 +3,9 @@ import logging
from decouple import config from decouple import config
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
from . import sessions_pg from . import sessions as sessions_legacy
from . import sessions_pg as sessions_legacy
from . import sessions_ch
if config("EXP_METRICS", cast=bool, default=False): if config("EXP_METRICS", cast=bool, default=False):
from . import sessions_ch as sessions from . import sessions_ch as sessions
else: else:
from . import sessions_pg as sessions from . import sessions

View file

@ -3,7 +3,7 @@ from typing import List, Union
import schemas import schemas
from chalicelib.core import events, metadata from chalicelib.core import events, metadata
from . import performance_event from chalicelib.core.sessions import performance_event
from chalicelib.utils import pg_client, helper, metrics_helper from chalicelib.utils import pg_client, helper, metrics_helper
from chalicelib.utils import sql_helper as sh from chalicelib.utils import sql_helper as sh
@ -148,7 +148,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
"isEvent": True, "isEvent": True,
"value": [], "value": [],
"operator": e.operator, "operator": e.operator,
"filters": e.filters "filters": []
}) })
for v in e.value: for v in e.value:
if v not in extra_conditions[e.operator].value: if v not in extra_conditions[e.operator].value:
@ -165,7 +165,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
"isEvent": True, "isEvent": True,
"value": [], "value": [],
"operator": e.operator, "operator": e.operator,
"filters": e.filters "filters": []
}) })
for v in e.value: for v in e.value:
if v not in extra_conditions[e.operator].value: if v not in extra_conditions[e.operator].value:
@ -989,7 +989,7 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s", sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
c.value, value_key=e_k)) c.value, value_key=e_k))
else: else:
logger.warning(f"unsupported extra_event type: {c.type}") logger.warning(f"unsupported extra_event type:${c.type}")
if len(_extra_or_condition) > 0: if len(_extra_or_condition) > 0:
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")") extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
query_part = f"""\ query_part = f"""\

View file

@ -1,11 +1,9 @@
import json
from decouple import config from decouple import config
from chalicelib.core.issue_tracking import integrations_manager, base_issue
from chalicelib.utils import helper from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from chalicelib.utils import pg_client
from chalicelib.core.issue_tracking import integrations_manager, base_issue
import json
def __get_saved_data(project_id, session_id, issue_id, tool): def __get_saved_data(project_id, session_id, issue_id, tool):

View file

@ -3,7 +3,7 @@ from typing import List, Union
import schemas import schemas
from chalicelib.core import events, metadata from chalicelib.core import events, metadata
from . import performance_event, sessions_legacy from chalicelib.core.sessions import performance_event, sessions_legacy
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
from chalicelib.utils import sql_helper as sh from chalicelib.utils import sql_helper as sh
@ -153,7 +153,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
"isEvent": True, "isEvent": True,
"value": [], "value": [],
"operator": e.operator, "operator": e.operator,
"filters": e.filters "filters": []
}) })
for v in e.value: for v in e.value:
if v not in extra_conditions[e.operator].value: if v not in extra_conditions[e.operator].value:
@ -178,7 +178,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
"isEvent": True, "isEvent": True,
"value": [], "value": [],
"operator": e.operator, "operator": e.operator,
"filters": e.filters "filters": []
}) })
for v in e.value: for v in e.value:
if v not in extra_conditions[e.operator].value: if v not in extra_conditions[e.operator].value:
@ -870,12 +870,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
events_conditions[-1]["condition"] = [] events_conditions[-1]["condition"] = []
if not is_any and event.value not in [None, "*", ""]: if not is_any and event.value not in [None, "*", ""]:
event_where.append( event_where.append(
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)", sh.multi_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
event.value, value_key=e_k)) event.value, value_key=e_k))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
events_extra_join += f" AND {event_where[-1]}" events_extra_join += f" AND {event_where[-1]}"
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]: if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k)) event_where.append(sh.multi_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
events_extra_join += f" AND {event_where[-1]}" events_extra_join += f" AND {event_where[-1]}"
@ -1108,12 +1108,8 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
is_any = sh.isAny_opreator(f.operator) is_any = sh.isAny_opreator(f.operator)
if is_any or len(f.value) == 0: if is_any or len(f.value) == 0:
continue continue
is_negative_operator = sh.is_negation_operator(f.operator)
f.value = helper.values_for_operator(value=f.value, op=f.operator) f.value = helper.values_for_operator(value=f.value, op=f.operator)
op = sh.get_sql_operator(f.operator) op = sh.get_sql_operator(f.operator)
r_op = ""
if is_negative_operator:
r_op = sh.reverse_sql_operator(op)
e_k_f = e_k + f"_fetch{j}" e_k_f = e_k + f"_fetch{j}"
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)} full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
if f.type == schemas.FetchFilterType.FETCH_URL: if f.type == schemas.FetchFilterType.FETCH_URL:
@ -1122,12 +1118,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
)) ))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
apply = True apply = True
if is_negative_operator:
events_conditions_not.append(
{
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
events_conditions_not[-1]["condition"] = sh.multi_conditions(
f"sub.`$properties`.url_path {r_op} %({e_k_f})s", f.value, value_key=e_k_f)
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE: elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
event_where.append(json_condition( event_where.append(json_condition(
"main", "$properties", 'status', op, f.value, e_k_f, True, True "main", "$properties", 'status', op, f.value, e_k_f, True, True
@ -1140,13 +1130,6 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
)) ))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
apply = True apply = True
if is_negative_operator:
events_conditions_not.append(
{
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
events_conditions_not[-1]["condition"] = sh.multi_conditions(
f"sub.`$properties`.method {r_op} %({e_k_f})s", f.value,
value_key=e_k_f)
elif f.type == schemas.FetchFilterType.FETCH_DURATION: elif f.type == schemas.FetchFilterType.FETCH_DURATION:
event_where.append( event_where.append(
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value, sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
@ -1159,26 +1142,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
)) ))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
apply = True apply = True
if is_negative_operator:
events_conditions_not.append(
{
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
events_conditions_not[-1]["condition"] = sh.multi_conditions(
f"sub.`$properties`.request_body {r_op} %({e_k_f})s", f.value,
value_key=e_k_f)
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY: elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
event_where.append(json_condition( event_where.append(json_condition(
"main", "$properties", 'response_body', op, f.value, e_k_f "main", "$properties", 'response_body', op, f.value, e_k_f
)) ))
events_conditions[-1]["condition"].append(event_where[-1]) events_conditions[-1]["condition"].append(event_where[-1])
apply = True apply = True
if is_negative_operator:
events_conditions_not.append(
{
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
events_conditions_not[-1]["condition"] = sh.multi_conditions(
f"sub.`$properties`.response_body {r_op} %({e_k_f})s", f.value,
value_key=e_k_f)
else: else:
logging.warning(f"undefined FETCH filter: {f.type}") logging.warning(f"undefined FETCH filter: {f.type}")
if not apply: if not apply:
@ -1426,30 +1395,17 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
if extra_conditions and len(extra_conditions) > 0: if extra_conditions and len(extra_conditions) > 0:
_extra_or_condition = [] _extra_or_condition = []
for i, c in enumerate(extra_conditions): for i, c in enumerate(extra_conditions):
if sh.isAny_opreator(c.operator) and c.type != schemas.EventType.REQUEST_DETAILS.value: if sh.isAny_opreator(c.operator):
continue continue
e_k = f"ec_value{i}" e_k = f"ec_value{i}"
op = sh.get_sql_operator(c.operator) op = sh.get_sql_operator(c.operator)
c.value = helper.values_for_operator(value=c.value, op=c.operator) c.value = helper.values_for_operator(value=c.value, op=c.operator)
full_args = {**full_args, full_args = {**full_args,
**sh.multi_values(c.value, value_key=e_k)} **sh.multi_values(c.value, value_key=e_k)}
if c.type in (schemas.EventType.LOCATION.value, schemas.EventType.REQUEST.value): if c.type == events.EventType.LOCATION.ui_type:
_extra_or_condition.append( _extra_or_condition.append(
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s", sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
c.value, value_key=e_k)) c.value, value_key=e_k))
elif c.type == schemas.EventType.REQUEST_DETAILS.value:
for j, c_f in enumerate(c.filters):
if sh.isAny_opreator(c_f.operator) or len(c_f.value) == 0:
continue
e_k += f"_{j}"
op = sh.get_sql_operator(c_f.operator)
c_f.value = helper.values_for_operator(value=c_f.value, op=c_f.operator)
full_args = {**full_args,
**sh.multi_values(c_f.value, value_key=e_k)}
if c_f.type == schemas.FetchFilterType.FETCH_URL.value:
_extra_or_condition.append(
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
c_f.value, value_key=e_k))
else: else:
logging.warning(f"unsupported extra_event type:${c.type}") logging.warning(f"unsupported extra_event type:${c.type}")
if len(_extra_or_condition) > 0: if len(_extra_or_condition) > 0:
@ -1460,10 +1416,9 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}""" query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}"""
else: else:
if len(events_query_part) > 0: if len(events_query_part) > 0:
extra_join += f"""INNER JOIN (SELECT DISTINCT ON (session_id) * extra_join += f"""INNER JOIN (SELECT *
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event} FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
WHERE {" AND ".join(extra_constraints)} WHERE {" AND ".join(extra_constraints)}) AS s ON(s.session_id=f.session_id)"""
ORDER BY _timestamp DESC) AS s ON(s.session_id=f.session_id)"""
else: else:
deduplication_keys = ["session_id"] + extra_deduplication deduplication_keys = ["session_id"] + extra_deduplication
extra_join = f"""(SELECT * extra_join = f"""(SELECT *

View file

@ -4,7 +4,7 @@ import schemas
from chalicelib.utils.storage import StorageClient from chalicelib.utils.storage import StorageClient
def get_devtools_keys(project_id, session_id): def __get_devtools_keys(project_id, session_id):
params = { params = {
"sessionId": session_id, "sessionId": session_id,
"projectId": project_id "projectId": project_id
@ -16,7 +16,7 @@ def get_devtools_keys(project_id, session_id):
def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True): def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True):
results = [] results = []
for k in get_devtools_keys(project_id=project_id, session_id=session_id): for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k): if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
continue continue
results.append(StorageClient.get_presigned_url_for_sharing( results.append(StorageClient.get_presigned_url_for_sharing(
@ -29,5 +29,5 @@ def get_urls(session_id, project_id, context: schemas.CurrentContext, check_exis
def delete_mobs(project_id, session_ids): def delete_mobs(project_id, session_ids):
for session_id in session_ids: for session_id in session_ids:
for k in get_devtools_keys(project_id=project_id, session_id=session_id): for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k) StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)

View file

@ -1 +0,0 @@
from .sessions_devtool import *

View file

@ -1 +0,0 @@
from .sessions_favorite import *

View file

@ -1,81 +1,76 @@
from functools import cache
import schemas import schemas
from chalicelib.core.autocomplete import autocomplete from chalicelib.core.autocomplete import autocomplete
from chalicelib.utils.event_filter_definition import SupportedFilter from chalicelib.utils.event_filter_definition import SupportedFilter
SUPPORTED_TYPES = {
schemas.FilterType.USER_OS: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
schemas.FilterType.USER_BROWSER: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
schemas.FilterType.USER_DEVICE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
schemas.FilterType.USER_COUNTRY: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
schemas.FilterType.USER_CITY: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
schemas.FilterType.USER_STATE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
schemas.FilterType.USER_ID: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
schemas.FilterType.REV_ID: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
schemas.FilterType.REFERRER: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
schemas.FilterType.UTM_SOURCE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
# Mobile
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(
typename=schemas.FilterType.USER_DEVICE_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
@cache }
def supported_types():
return {
schemas.FilterType.USER_OS: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
schemas.FilterType.USER_BROWSER: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
schemas.FilterType.USER_DEVICE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
schemas.FilterType.USER_COUNTRY: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
schemas.FilterType.USER_CITY: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
schemas.FilterType.USER_STATE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
schemas.FilterType.USER_ID: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
schemas.FilterType.REV_ID: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
schemas.FilterType.REFERRER: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
schemas.FilterType.UTM_SOURCE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
# Mobile
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(
typename=schemas.FilterType.USER_DEVICE_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
}
def search(text: str, meta_type: schemas.FilterType, project_id: int): def search(text: str, meta_type: schemas.FilterType, project_id: int):
rows = [] rows = []
if meta_type not in list(supported_types().keys()): if meta_type not in list(SUPPORTED_TYPES.keys()):
return {"errors": ["unsupported type"]} return {"errors": ["unsupported type"]}
rows += supported_types()[meta_type].get(project_id=project_id, text=text) rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
# for IOS events autocomplete # for IOS events autocomplete
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()): # if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text) # rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)

View file

@ -1,8 +1,8 @@
import schemas import schemas
from chalicelib.core import events, metadata, events_mobile, \ from chalicelib.core import events, metadata, events_mobile, \
issues, assist, canvas, user_testing issues, assist, canvas, user_testing
from . import sessions_mobs, sessions_devtool from chalicelib.core.sessions import sessions_mobs, sessions_devtool
from chalicelib.core.errors.modules import errors_helper from chalicelib.utils import errors_helper
from chalicelib.utils import pg_client, helper from chalicelib.utils import pg_client, helper
from chalicelib.core.modules import MOB_KEY, get_file_key from chalicelib.core.modules import MOB_KEY, get_file_key

View file

@ -1,9 +1,11 @@
import logging import logging
from typing import List, Union
import schemas import schemas
from chalicelib.core import metadata, projects from chalicelib.core import events, metadata, projects
from . import sessions_favorite, sessions_legacy from chalicelib.core.sessions import sessions_favorite, performance_event, sessions_legacy
from chalicelib.utils import pg_client, helper from chalicelib.utils import pg_client, helper, metrics_helper
from chalicelib.utils import sql_helper as sh
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
@ -43,13 +45,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
count_only=False, issue=None, ids_only=False, platform="web"): count_only=False, issue=None, ids_only=False, platform="web"):
if data.bookmarked: if data.bookmarked:
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id) data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
if data.startTimestamp is None:
logger.debug(f"No vault sessions found for project:{project.project_id}")
return {
'total': 0,
'sessions': [],
'src': 1
}
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status, full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
errors_only=errors_only, errors_only=errors_only,
favorite_only=data.bookmarked, issue=issue, favorite_only=data.bookmarked, issue=issue,
@ -122,10 +118,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
sort = 'session_id' sort = 'session_id'
if data.sort is not None and data.sort != "session_id": if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort) # sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
if data.sort == 'datetime': sort = helper.key_to_snake_case(data.sort)
sort = 'start_ts'
else:
sort = helper.key_to_snake_case(data.sort)
meta_keys = metadata.get(project_id=project.project_id) meta_keys = metadata.get(project_id=project.project_id)
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count, main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
@ -175,8 +168,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
# reverse=data.order.upper() == "DESC") # reverse=data.order.upper() == "DESC")
return { return {
'total': total, 'total': total,
'sessions': helper.list_to_camel_case(sessions), 'sessions': helper.list_to_camel_case(sessions)
'src': 1
} }

View file

@ -1 +0,0 @@
from .sessions_viewed import *

View file

@ -1,7 +1,7 @@
import logging import logging
from chalicelib.core import assist from chalicelib.core import assist
from . import sessions from chalicelib.core.sessions import sessions
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)

View file

@ -18,7 +18,7 @@ def refresh_spot_jwt_iat_jti(user_id):
{"user_id": user_id}) {"user_id": user_id})
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
return users.RefreshSpotJWTs(**row) return row.get("spot_jwt_iat"), row.get("spot_jwt_refresh_jti"), row.get("spot_jwt_refresh_iat")
def logout(user_id: int): def logout(user_id: int):
@ -26,13 +26,13 @@ def logout(user_id: int):
def refresh(user_id: int, tenant_id: int = -1) -> dict: def refresh(user_id: int, tenant_id: int = -1) -> dict:
j = refresh_spot_jwt_iat_jti(user_id=user_id) spot_jwt_iat, spot_jwt_r_jti, spot_jwt_r_iat = refresh_spot_jwt_iat_jti(user_id=user_id)
return { return {
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_iat, "jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_iat,
aud=AUDIENCE, for_spot=True), aud=AUDIENCE, for_spot=True),
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_refresh_iat, "refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_r_iat,
aud=AUDIENCE, jwt_jti=j.spot_jwt_refresh_jti, for_spot=True), aud=AUDIENCE, jwt_jti=spot_jwt_r_jti, for_spot=True),
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (j.spot_jwt_iat - j.spot_jwt_refresh_iat) "refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (spot_jwt_iat - spot_jwt_r_iat)
} }

View file

@ -1,14 +1,13 @@
import json import json
import secrets import secrets
from typing import Optional
from decouple import config from decouple import config
from fastapi import BackgroundTasks from fastapi import BackgroundTasks
from pydantic import BaseModel, model_validator from pydantic import BaseModel
import schemas import schemas
from chalicelib.core import authorizers from chalicelib.core import authorizers, metadata
from chalicelib.core import tenants, spot from chalicelib.core import tenants, spot, scope
from chalicelib.utils import email_helper from chalicelib.utils import email_helper
from chalicelib.utils import helper from chalicelib.utils import helper
from chalicelib.utils import pg_client from chalicelib.utils import pg_client
@ -84,6 +83,7 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
"name": name, "invitation_token": invitation_token}) "name": name, "invitation_token": invitation_token})
cur.execute(query) cur.execute(query)
result = cur.fetchone() result = cur.fetchone()
cur.execute(query)
result["created_at"] = TimeUTC.datetime_to_timestamp(result["created_at"]) result["created_at"] = TimeUTC.datetime_to_timestamp(result["created_at"])
return helper.dict_to_camel_case(result) return helper.dict_to_camel_case(result)
@ -284,7 +284,7 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
if editor_id != user_id_to_update: if editor_id != user_id_to_update:
admin = get_user_role(tenant_id=tenant_id, user_id=editor_id) admin = get_user_role(tenant_id=tenant_id, user_id=editor_id)
if not admin["superAdmin"] and not admin["admin"]: if not admin["superAdmin"] and not admin["admin"]:
return {"errors": ["unauthorized, you must have admin privileges"]} return {"errors": ["unauthorized"]}
if admin["admin"] and user["superAdmin"]: if admin["admin"] and user["superAdmin"]:
return {"errors": ["only the owner can edit his own details"]} return {"errors": ["only the owner can edit his own details"]}
else: else:
@ -552,35 +552,14 @@ def refresh_auth_exists(user_id, jwt_jti=None):
return r is not None return r is not None
class FullLoginJWTs(BaseModel): class ChangeJwt(BaseModel):
jwt_iat: int jwt_iat: int
jwt_refresh_jti: str jwt_refresh_jti: int
jwt_refresh_iat: int jwt_refresh_iat: int
spot_jwt_iat: int spot_jwt_iat: int
spot_jwt_refresh_jti: str spot_jwt_refresh_jti: int
spot_jwt_refresh_iat: int spot_jwt_refresh_iat: int
@model_validator(mode="before")
@classmethod
def _transform_data(cls, values):
if values.get("jwt_refresh_jti") is not None:
values["jwt_refresh_jti"] = str(values["jwt_refresh_jti"])
if values.get("spot_jwt_refresh_jti") is not None:
values["spot_jwt_refresh_jti"] = str(values["spot_jwt_refresh_jti"])
return values
class RefreshLoginJWTs(FullLoginJWTs):
spot_jwt_iat: Optional[int] = None
spot_jwt_refresh_jti: Optional[str] = None
spot_jwt_refresh_iat: Optional[int] = None
class RefreshSpotJWTs(FullLoginJWTs):
jwt_iat: Optional[int] = None
jwt_refresh_jti: Optional[str] = None
jwt_refresh_iat: Optional[int] = None
def change_jwt_iat_jti(user_id): def change_jwt_iat_jti(user_id):
with pg_client.PostgresClient() as cur: with pg_client.PostgresClient() as cur:
@ -601,7 +580,7 @@ def change_jwt_iat_jti(user_id):
{"user_id": user_id}) {"user_id": user_id})
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
return FullLoginJWTs(**row) return ChangeJwt(**row)
def refresh_jwt_iat_jti(user_id): def refresh_jwt_iat_jti(user_id):
@ -616,7 +595,7 @@ def refresh_jwt_iat_jti(user_id):
{"user_id": user_id}) {"user_id": user_id})
cur.execute(query) cur.execute(query)
row = cur.fetchone() row = cur.fetchone()
return RefreshLoginJWTs(**row) return row.get("jwt_iat"), row.get("jwt_refresh_jti"), row.get("jwt_refresh_iat")
def authenticate(email, password, for_change_password=False) -> dict | bool | None: def authenticate(email, password, for_change_password=False) -> dict | bool | None:
@ -648,12 +627,9 @@ def authenticate(email, password, for_change_password=False) -> dict | bool | No
response = { response = {
"jwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], iat=j_r.jwt_iat, "jwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], iat=j_r.jwt_iat,
aud=AUDIENCE), aud=AUDIENCE),
"refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'], "refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'], tenant_id=r['tenantId'],
tenant_id=r['tenantId'], iat=j_r.jwt_refresh_iat, aud=AUDIENCE,
iat=j_r.jwt_refresh_iat, jwt_jti=j_r.jwt_refresh_jti),
aud=AUDIENCE,
jwt_jti=j_r.jwt_refresh_jti,
for_spot=False),
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int), "refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int),
"email": email, "email": email,
"spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], "spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'],
@ -684,13 +660,13 @@ def logout(user_id: int):
def refresh(user_id: int, tenant_id: int = -1) -> dict: def refresh(user_id: int, tenant_id: int = -1) -> dict:
j = refresh_jwt_iat_jti(user_id=user_id) jwt_iat, jwt_r_jti, jwt_r_iat = refresh_jwt_iat_jti(user_id=user_id)
return { return {
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_iat, "jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=jwt_iat,
aud=AUDIENCE), aud=AUDIENCE),
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_refresh_iat, "refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=jwt_r_iat,
aud=AUDIENCE, jwt_jti=j.jwt_refresh_jti), aud=AUDIENCE, jwt_jti=jwt_r_jti),
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (j.jwt_iat - j.jwt_refresh_iat), "refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (jwt_iat - jwt_r_iat)
} }

View file

@ -34,10 +34,7 @@ if config("CH_COMPRESSION", cast=bool, default=True):
def transform_result(self, original_function): def transform_result(self, original_function):
@wraps(original_function) @wraps(original_function)
def wrapper(*args, **kwargs): def wrapper(*args, **kwargs):
if kwargs.get("parameters"): logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
elif len(args) > 0:
logger.debug(str.encode(args[0]))
result = original_function(*args, **kwargs) result = original_function(*args, **kwargs)
if isinstance(result, clickhouse_connect.driver.query.QueryResult): if isinstance(result, clickhouse_connect.driver.query.QueryResult):
column_names = result.column_names column_names = result.column_names
@ -149,11 +146,13 @@ class ClickHouseClient:
def __enter__(self): def __enter__(self):
return self.__client return self.__client
def format(self, query, parameters=None): def format(self, query, *, parameters=None):
if parameters: if parameters is None:
ctx = QueryContext(query=query, parameters=parameters) return query
return ctx.final_query return query % {
return query key: f"'{value}'" if isinstance(value, str) else value
for key, value in parameters.items()
}
def __exit__(self, *args): def __exit__(self, *args):
if config('CH_POOL', cast=bool, default=True): if config('CH_POOL', cast=bool, default=True):

View file

@ -0,0 +1,14 @@
from chalicelib.core.sourcemaps import sourcemaps
def format_first_stack_frame(error):
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
for s in error["stack"]:
for c in s.get("context", []):
for sci, sc in enumerate(c):
if isinstance(sc, str) and len(sc) > 1000:
c[sci] = sc[:1000]
# convert bytes to string:
if isinstance(s["filename"], bytes):
s["filename"] = s["filename"].decode("utf-8")
return error

View file

@ -2,8 +2,6 @@ from typing import List
def get_step_size(startTimestamp, endTimestamp, density, decimal=False, factor=1000): def get_step_size(startTimestamp, endTimestamp, density, decimal=False, factor=1000):
if endTimestamp == 0:
raise Exception("endTimestamp cannot be 0 in order to get step size")
step_size = (endTimestamp // factor - startTimestamp // factor) step_size = (endTimestamp // factor - startTimestamp // factor)
if density <= 1: if density <= 1:
return step_size return step_size

View file

@ -19,16 +19,6 @@ PG_CONFIG = dict(_PG_CONFIG)
if config("PG_TIMEOUT", cast=int, default=0) > 0: if config("PG_TIMEOUT", cast=int, default=0) > 0:
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}" PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
if config('PG_POOL', cast=bool, default=True):
PG_CONFIG = {
**PG_CONFIG,
# Keepalive settings
"keepalives": 1, # Enable keepalives
"keepalives_idle": 300, # Seconds before sending keepalive
"keepalives_interval": 10, # Seconds between keepalives
"keepalives_count": 3 # Number of keepalives before giving up
}
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool): class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
def __init__(self, minconn, maxconn, *args, **kwargs): def __init__(self, minconn, maxconn, *args, **kwargs):
@ -65,7 +55,6 @@ RETRY = 0
def make_pool(): def make_pool():
if not config('PG_POOL', cast=bool, default=True): if not config('PG_POOL', cast=bool, default=True):
logger.info("PG_POOL is disabled, not creating a new one")
return return
global postgreSQL_pool global postgreSQL_pool
global RETRY global RETRY
@ -84,7 +73,7 @@ def make_pool():
logger.error("Error while connecting to PostgreSQL", exc_info=error) logger.error("Error while connecting to PostgreSQL", exc_info=error)
if RETRY < RETRY_MAX: if RETRY < RETRY_MAX:
RETRY += 1 RETRY += 1
logger.info(f"Waiting for {RETRY_INTERVAL}s before retry n°{RETRY}") logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
time.sleep(RETRY_INTERVAL) time.sleep(RETRY_INTERVAL)
make_pool() make_pool()
else: else:
@ -108,17 +97,13 @@ class PostgresClient:
elif long_query: elif long_query:
long_config = dict(_PG_CONFIG) long_config = dict(_PG_CONFIG)
long_config["application_name"] += "-LONG" long_config["application_name"] += "-LONG"
if config('PG_TIMEOUT_LONG', cast=int, default=1) > 0: long_config["options"] = f"-c statement_timeout=" \
long_config["options"] = f"-c statement_timeout=" \ f"{config('pg_long_timeout', cast=int, default=5 * 60) * 1000}"
f"{config('PG_TIMEOUT_LONG', cast=int, default=5 * 60) * 1000}"
else:
logger.info("Disabled timeout for long query")
self.connection = psycopg2.connect(**long_config) self.connection = psycopg2.connect(**long_config)
elif not use_pool or not config('PG_POOL', cast=bool, default=True): elif not use_pool or not config('PG_POOL', cast=bool, default=True):
single_config = dict(_PG_CONFIG) single_config = dict(_PG_CONFIG)
single_config["application_name"] += "-NOPOOL" single_config["application_name"] += "-NOPOOL"
if config('PG_TIMEOUT', cast=int, default=1) > 0: single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
self.connection = psycopg2.connect(**single_config) self.connection = psycopg2.connect(**single_config)
else: else:
self.connection = postgreSQL_pool.getconn() self.connection = postgreSQL_pool.getconn()
@ -187,7 +172,8 @@ class PostgresClient:
async def init(): async def init():
logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}") logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}")
make_pool() if config('PG_POOL', cast=bool, default=True):
make_pool()
async def terminate(): async def terminate():

View file

@ -4,41 +4,37 @@ import schemas
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]): def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
if isinstance(op, Enum):
op = op.value
return { return {
schemas.SearchEventOperator.IS.value: "=", schemas.SearchEventOperator.IS: "=",
schemas.SearchEventOperator.ON.value: "=", schemas.SearchEventOperator.ON: "=",
schemas.SearchEventOperator.ON_ANY.value: "IN", schemas.SearchEventOperator.ON_ANY: "IN",
schemas.SearchEventOperator.IS_NOT.value: "!=", schemas.SearchEventOperator.IS_NOT: "!=",
schemas.SearchEventOperator.NOT_ON.value: "!=", schemas.SearchEventOperator.NOT_ON: "!=",
schemas.SearchEventOperator.CONTAINS.value: "ILIKE", schemas.SearchEventOperator.CONTAINS: "ILIKE",
schemas.SearchEventOperator.NOT_CONTAINS.value: "NOT ILIKE", schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
schemas.SearchEventOperator.STARTS_WITH.value: "ILIKE", schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
schemas.SearchEventOperator.ENDS_WITH.value: "ILIKE", schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
# Selector operators: # Selector operators:
schemas.ClickEventExtraOperator.IS.value: "=", schemas.ClickEventExtraOperator.IS: "=",
schemas.ClickEventExtraOperator.IS_NOT.value: "!=", schemas.ClickEventExtraOperator.IS_NOT: "!=",
schemas.ClickEventExtraOperator.CONTAINS.value: "ILIKE", schemas.ClickEventExtraOperator.CONTAINS: "ILIKE",
schemas.ClickEventExtraOperator.NOT_CONTAINS.value: "NOT ILIKE", schemas.ClickEventExtraOperator.NOT_CONTAINS: "NOT ILIKE",
schemas.ClickEventExtraOperator.STARTS_WITH.value: "ILIKE", schemas.ClickEventExtraOperator.STARTS_WITH: "ILIKE",
schemas.ClickEventExtraOperator.ENDS_WITH.value: "ILIKE", schemas.ClickEventExtraOperator.ENDS_WITH: "ILIKE",
schemas.MathOperator.GREATER.value: ">", schemas.MathOperator.GREATER: ">",
schemas.MathOperator.GREATER_EQ.value: ">=", schemas.MathOperator.GREATER_EQ: ">=",
schemas.MathOperator.LESS.value: "<", schemas.MathOperator.LESS: "<",
schemas.MathOperator.LESS_EQ.value: "<=", schemas.MathOperator.LESS_EQ: "<=",
}.get(op, "=") }.get(op, "=")
def is_negation_operator(op: schemas.SearchEventOperator): def is_negation_operator(op: schemas.SearchEventOperator):
if isinstance(op, Enum): return op in [schemas.SearchEventOperator.IS_NOT,
op = op.value schemas.SearchEventOperator.NOT_ON,
return op in [schemas.SearchEventOperator.IS_NOT.value, schemas.SearchEventOperator.NOT_CONTAINS,
schemas.SearchEventOperator.NOT_ON.value, schemas.ClickEventExtraOperator.IS_NOT,
schemas.SearchEventOperator.NOT_CONTAINS.value, schemas.ClickEventExtraOperator.NOT_CONTAINS]
schemas.ClickEventExtraOperator.IS_NOT.value,
schemas.ClickEventExtraOperator.NOT_CONTAINS.value]
def reverse_sql_operator(op): def reverse_sql_operator(op):

View file

@ -0,0 +1,591 @@
-- -- Original Q3
-- WITH ranked_events AS (SELECT *
-- FROM ranked_events_1736344377403),
-- n1 AS (SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- COUNT(1) AS sessions_count
-- FROM ranked_events
-- WHERE event_number_in_session = 1
-- AND isNotNull(next_value)
-- GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n2 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n1
-- INNER JOIN ranked_events AS re
-- ON (n1.next_value = re.e_value AND n1.next_type = re.event_type)
-- WHERE re.event_number_in_session = 2
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n3 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n2
-- INNER JOIN ranked_events AS re
-- ON (n2.next_value = re.e_value AND n2.next_type = re.event_type)
-- WHERE re.event_number_in_session = 3
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n4 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n3
-- INNER JOIN ranked_events AS re
-- ON (n3.next_value = re.e_value AND n3.next_type = re.event_type)
-- WHERE re.event_number_in_session = 4
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8),
-- n5 AS (SELECT *
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
-- re.event_type AS event_type,
-- re.e_value AS e_value,
-- re.next_type AS next_type,
-- re.next_value AS next_value,
-- COUNT(1) AS sessions_count
-- FROM n4
-- INNER JOIN ranked_events AS re
-- ON (n4.next_value = re.e_value AND n4.next_type = re.event_type)
-- WHERE re.event_number_in_session = 5
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
-- re.next_value) AS sub_level
-- ORDER BY sessions_count DESC
-- LIMIT 8)
-- SELECT *
-- FROM (SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n1
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n2
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n3
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n4
-- UNION ALL
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- next_type,
-- next_value,
-- sessions_count
-- FROM n5) AS chart_steps
-- ORDER BY event_number_in_session;
-- Q1
-- CREATE TEMPORARY TABLE pre_ranked_events_1736344377403 AS
CREATE TABLE pre_ranked_events_1736344377403 ENGINE = Memory AS
(WITH initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
FROM experimental.events AS events
WHERE ((event_type = 'LOCATION' AND (url_path = '/en/deployment/')))
AND events.project_id = toUInt16(65)
AND events.datetime >= toDateTime(1735599600000 / 1000)
AND events.datetime < toDateTime(1736290799999 / 1000)
GROUP BY 1),
pre_ranked_events AS (SELECT *
FROM (SELECT session_id,
event_type,
datetime,
url_path AS e_value,
row_number() OVER (PARTITION BY session_id
ORDER BY datetime ,
message_id ) AS event_number_in_session
FROM experimental.events AS events
INNER JOIN initial_event ON (events.session_id = initial_event.session_id)
WHERE events.project_id = toUInt16(65)
AND events.datetime >= toDateTime(1735599600000 / 1000)
AND events.datetime < toDateTime(1736290799999 / 1000)
AND (events.event_type = 'LOCATION')
AND events.datetime >= initial_event.start_event_timestamp
) AS full_ranked_events
WHERE event_number_in_session <= 5)
SELECT *
FROM pre_ranked_events);
;
SELECT *
FROM pre_ranked_events_1736344377403
WHERE event_number_in_session < 3;
-- ---------Q2-----------
-- CREATE TEMPORARY TABLE ranked_events_1736344377403 AS
DROP TABLE ranked_events_1736344377403;
CREATE TABLE ranked_events_1736344377403 ENGINE = Memory AS
(WITH pre_ranked_events AS (SELECT *
FROM pre_ranked_events_1736344377403),
start_points AS (SELECT DISTINCT session_id
FROM pre_ranked_events
WHERE ((event_type = 'LOCATION' AND (e_value = '/en/deployment/')))
AND pre_ranked_events.event_number_in_session = 1),
ranked_events AS (SELECT pre_ranked_events.*,
leadInFrame(e_value)
OVER (PARTITION BY session_id ORDER BY datetime
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
leadInFrame(toNullable(event_type))
OVER (PARTITION BY session_id ORDER BY datetime
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
FROM start_points
INNER JOIN pre_ranked_events USING (session_id))
SELECT *
FROM ranked_events);
-- ranked events
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 2
-- AND e_value='/en/deployment/deploy-docker/'
-- AND next_value NOT IN ('/en/deployment/','/en/plugins/','/en/using-or/')
-- AND e_value NOT IN ('/en/deployment/deploy-docker/','/en/getting-started/','/en/deployment/deploy-ubuntu/')
AND isNotNull(next_value)
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY event_number_in_session, sessions_count DESC;
SELECT event_number_in_session,
event_type,
e_value,
COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value
ORDER BY event_number_in_session, sessions_count DESC;
SELECT COUNT(1) AS sessions_count
FROM ranked_events_1736344377403
WHERE event_number_in_session = 2
AND isNull(next_value)
;
-- ---------Q3 MORE -----------
WITH ranked_events AS (SELECT *
FROM ranked_events_1736344377403),
n1 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n2 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 2
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n3 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 3
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
drop_n AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n1
WHERE isNull(n1.next_type)
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n2
WHERE isNull(n2.next_type)),
-- TODO: make this as top_steps, where every step will go to next as top/others
top_n1 AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n1
WHERE isNotNull(next_type)
ORDER BY sessions_count DESC
LIMIT 3),
top_n2 AS (-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n2
WHERE (event_type, e_value) IN (SELECT event_type,
e_value
FROM n2
WHERE isNotNull(next_type)
GROUP BY event_type, e_value
ORDER BY SUM(sessions_count) DESC
LIMIT 3)
ORDER BY sessions_count DESC),
top_n AS (SELECT *
FROM top_n1
UNION ALL
SELECT *
FROM top_n2),
u_top_n AS (SELECT DISTINCT event_number_in_session,
event_type,
e_value
FROM top_n),
others_n AS (
-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n1
WHERE isNotNull(next_type)
ORDER BY sessions_count DESC
LIMIT 1000000 OFFSET 3
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
sessions_count
FROM n2
WHERE isNotNull(next_type)
-- GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 1000000 OFFSET 3)
SELECT *
FROM (
-- Top
SELECT *
FROM top_n
-- UNION ALL
-- -- Others
-- SELECT event_number_in_session,
-- event_type,
-- e_value,
-- 'OTHER' AS next_type,
-- NULL AS next_value,
-- SUM(sessions_count)
-- FROM others_n
-- GROUP BY event_number_in_session, event_type, e_value
-- UNION ALL
-- -- Top go to Drop
-- SELECT drop_n.event_number_in_session,
-- drop_n.event_type,
-- drop_n.e_value,
-- drop_n.next_type,
-- drop_n.next_value,
-- drop_n.sessions_count
-- FROM drop_n
-- INNER JOIN u_top_n ON (drop_n.event_number_in_session = u_top_n.event_number_in_session
-- AND drop_n.event_type = u_top_n.event_type
-- AND drop_n.e_value = u_top_n.e_value)
-- ORDER BY drop_n.event_number_in_session
-- -- -- UNION ALL
-- -- -- Top go to Others
-- SELECT top_n.event_number_in_session,
-- top_n.event_type,
-- top_n.e_value,
-- 'OTHER' AS next_type,
-- NULL AS next_value,
-- SUM(top_n.sessions_count) AS sessions_count
-- FROM top_n
-- LEFT JOIN others_n ON (others_n.event_number_in_session = (top_n.event_number_in_session + 1)
-- AND top_n.next_type = others_n.event_type
-- AND top_n.next_value = others_n.e_value)
-- WHERE others_n.event_number_in_session IS NULL
-- AND top_n.next_type IS NOT NULL
-- GROUP BY event_number_in_session, event_type, e_value
-- UNION ALL
-- -- Others got to Top
-- SELECT others_n.event_number_in_session,
-- 'OTHER' AS event_type,
-- NULL AS e_value,
-- others_n.s_next_type AS next_type,
-- others_n.s_next_value AS next_value,
-- SUM(sessions_count) AS sessions_count
-- FROM others_n
-- INNER JOIN top_n ON (others_n.event_number_in_session = top_n.event_number_in_session + 1 AND
-- others_n.s_next_type = top_n.event_type AND
-- others_n.s_next_value = top_n.event_type)
-- GROUP BY others_n.event_number_in_session, next_type, next_value
-- UNION ALL
-- -- TODO: find if this works or not
-- -- Others got to Others
-- SELECT others_n.event_number_in_session,
-- 'OTHER' AS event_type,
-- NULL AS e_value,
-- 'OTHERS' AS next_type,
-- NULL AS next_value,
-- SUM(sessions_count) AS sessions_count
-- FROM others_n
-- LEFT JOIN u_top_n ON ((others_n.event_number_in_session + 1) = u_top_n.event_number_in_session
-- AND others_n.s_next_type = u_top_n.event_type
-- AND others_n.s_next_value = u_top_n.e_value)
-- WHERE u_top_n.event_number_in_session IS NULL
-- GROUP BY others_n.event_number_in_session
)
ORDER BY event_number_in_session;
-- ---------Q3 TOP ON VALUE ONLY -----------
WITH ranked_events AS (SELECT *
FROM ranked_events_1736344377403),
n1 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 1
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n2 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 2
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
n3 AS (SELECT event_number_in_session,
event_type,
e_value,
next_type,
next_value,
COUNT(1) AS sessions_count
FROM ranked_events
WHERE event_number_in_session = 3
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
ORDER BY sessions_count DESC),
drop_n AS (-- STEP 1
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n1
WHERE isNull(n1.next_type)
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
'DROP' AS next_type,
NULL AS next_value,
sessions_count
FROM n2
WHERE isNull(n2.next_type)),
top_n AS (SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n1
GROUP BY event_number_in_session, event_type, e_value
LIMIT 1
UNION ALL
-- STEP 2
SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n2
GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 3
UNION ALL
-- STEP 3
SELECT event_number_in_session,
event_type,
e_value,
SUM(sessions_count) AS sessions_count
FROM n3
GROUP BY event_number_in_session, event_type, e_value
ORDER BY sessions_count DESC
LIMIT 3),
top_n_with_next AS (SELECT n1.*
FROM n1
UNION ALL
SELECT n2.*
FROM n2
INNER JOIN top_n ON (n2.event_number_in_session = top_n.event_number_in_session
AND n2.event_type = top_n.event_type
AND n2.e_value = top_n.e_value)),
others_n AS (
-- STEP 2
SELECT n2.*
FROM n2
WHERE (n2.event_number_in_session, n2.event_type, n2.e_value) NOT IN
(SELECT event_number_in_session, event_type, e_value
FROM top_n
WHERE top_n.event_number_in_session = 2)
UNION ALL
-- STEP 3
SELECT n3.*
FROM n3
WHERE (n3.event_number_in_session, n3.event_type, n3.e_value) NOT IN
(SELECT event_number_in_session, event_type, e_value
FROM top_n
WHERE top_n.event_number_in_session = 3))
SELECT *
FROM (
-- SELECT sum(top_n_with_next.sessions_count)
-- FROM top_n_with_next
-- WHERE event_number_in_session = 1
-- -- AND isNotNull(next_value)
-- AND (next_type, next_value) IN
-- (SELECT others_n.event_type, others_n.e_value FROM others_n WHERE others_n.event_number_in_session = 2)
-- -- SELECT * FROM others_n
-- -- SELECT * FROM n2
-- SELECT *
-- FROM top_n
-- );
-- Top to Top: valid
SELECT top_n_with_next.*
FROM top_n_with_next
INNER JOIN top_n
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
AND top_n_with_next.next_type = top_n.event_type
AND top_n_with_next.next_value = top_n.e_value)
UNION ALL
-- Top to Others: valid
SELECT top_n_with_next.event_number_in_session,
top_n_with_next.event_type,
top_n_with_next.e_value,
'OTHER' AS next_type,
NULL AS next_value,
SUM(top_n_with_next.sessions_count) AS sessions_count
FROM top_n_with_next
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
UNION ALL
-- Top go to Drop: valid
SELECT drop_n.event_number_in_session,
drop_n.event_type,
drop_n.e_value,
drop_n.next_type,
drop_n.next_value,
drop_n.sessions_count
FROM drop_n
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
AND drop_n.event_type = top_n.event_type
AND drop_n.e_value = top_n.e_value)
ORDER BY drop_n.event_number_in_session
UNION ALL
-- Others got to Drop: valid
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
'DROP' AS next_type,
NULL AS next_value,
SUM(others_n.sessions_count) AS sessions_count
FROM others_n
WHERE isNull(others_n.next_type)
AND others_n.event_number_in_session < 3
GROUP BY others_n.event_number_in_session, next_type, next_value
UNION ALL
-- Others got to Top:valid
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
others_n.next_type,
others_n.next_value,
SUM(others_n.sessions_count) AS sessions_count
FROM others_n
WHERE isNotNull(others_n.next_type)
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
UNION ALL
-- Others got to Others
SELECT others_n.event_number_in_session,
'OTHER' AS event_type,
NULL AS e_value,
'OTHERS' AS next_type,
NULL AS next_value,
SUM(sessions_count) AS sessions_count
FROM others_n
WHERE isNotNull(others_n.next_type)
AND others_n.event_number_in_session < 3
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
(SELECT event_number_in_session, event_type, e_value FROM top_n)
GROUP BY others_n.event_number_in_session)
ORDER BY event_number_in_session, sessions_count
DESC;

View file

@ -7,30 +7,27 @@ from fastapi import HTTPException, status
from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response
import schemas import schemas
from chalicelib.core import assist, signup, feature_flags
from chalicelib.core import scope from chalicelib.core import scope
from chalicelib.core import assist, signup, feature_flags
from chalicelib.core.metrics import heatmaps
from chalicelib.core.errors import errors, errors_details
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
sessions_assignments, unprocessed_sessions, sessions_search
from chalicelib.core import tenants, users, projects, license from chalicelib.core import tenants, users, projects, license
from chalicelib.core import webhook from chalicelib.core import webhook
from chalicelib.core.collaborations.collaboration_slack import Slack from chalicelib.core.collaborations.collaboration_slack import Slack
from chalicelib.core.errors import errors, errors_details
from chalicelib.core.metrics import heatmaps
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
sessions_assignments, unprocessed_sessions, sessions_search
from chalicelib.utils import captcha, smtp from chalicelib.utils import captcha, smtp
from chalicelib.utils import contextual_validators
from chalicelib.utils import helper from chalicelib.utils import helper
from chalicelib.utils.TimeUTC import TimeUTC from chalicelib.utils.TimeUTC import TimeUTC
from or_dependencies import OR_context, OR_role from or_dependencies import OR_context, OR_role
from routers.base import get_routers from routers.base import get_routers
from routers.subs import spot from routers.subs import spot
from chalicelib.utils import contextual_validators
logger = logging.getLogger(__name__) logger = logging.getLogger(__name__)
public_app, app, app_apikey = get_routers() public_app, app, app_apikey = get_routers()
if config("LOCAL_DEV", cast=bool, default=False): COOKIE_PATH = "/api/refresh"
COOKIE_PATH = "/refresh"
else:
COOKIE_PATH = "/api/refresh"
@public_app.get('/signup', tags=['signup']) @public_app.get('/signup', tags=['signup'])

View file

@ -9,244 +9,172 @@ from routers.base import get_routers
public_app, app, app_apikey = get_routers() public_app, app, app_apikey = get_routers()
@app.post("/{projectId}/dashboards", tags=["dashboard"]) @app.post('/{projectId}/dashboards', tags=["dashboard"])
def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...), def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return dashboards.create_dashboard( return dashboards.create_dashboard(project_id=projectId, user_id=context.user_id, data=data)
project_id=projectId, user_id=context.user_id, data=data
)
@app.get("/{projectId}/dashboards", tags=["dashboard"]) @app.get('/{projectId}/dashboards', tags=["dashboard"])
def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)}
"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)
}
@app.get("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"]) @app.get('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)): def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
data = dashboards.get_dashboard( data = dashboards.get_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
)
if data is None: if data is None:
return {"errors": ["dashboard not found"]} return {"errors": ["dashboard not found"]}
return {"data": data} return {"data": data}
@app.put("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"]) @app.put('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...), def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": dashboards.update_dashboard(project_id=projectId, user_id=context.user_id,
"data": dashboards.update_dashboard( dashboard_id=dashboardId, data=data)}
project_id=projectId,
user_id=context.user_id,
dashboard_id=dashboardId,
data=data,
)
}
@app.delete("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"]) @app.delete('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
def delete_dashboard(projectId: int, dashboardId: int, _=Body(None), def delete_dashboard(projectId: int, dashboardId: int, _=Body(None),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return dashboards.delete_dashboard( return dashboards.delete_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
)
@app.get("/{projectId}/dashboards/{dashboardId}/pin", tags=["dashboard"]) @app.get('/{projectId}/dashboards/{dashboardId}/pin', tags=["dashboard"])
def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)): def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": dashboards.pin_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)}
"data": dashboards.pin_dashboard(
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
)
}
@app.post("/{projectId}/dashboards/{dashboardId}/cards", tags=["cards"]) @app.post('/{projectId}/dashboards/{dashboardId}/cards', tags=["cards"])
def add_card_to_dashboard(projectId: int, dashboardId: int, data: schemas.AddWidgetToDashboardPayloadSchema = Body(...), def add_card_to_dashboard(projectId: int, dashboardId: int,
data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": dashboards.add_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
"data": dashboards.add_widget( data=data)}
project_id=projectId,
user_id=context.user_id,
dashboard_id=dashboardId,
data=data,
)
}
@app.post("/{projectId}/dashboards/{dashboardId}/metrics", tags=["dashboard"]) @app.post('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
# @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"]) # @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int, data: schemas.CardSchema = Body(...), def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int,
data: schemas.CardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": dashboards.create_metric_add_widget(project=context.project, user_id=context.user_id,
"data": dashboards.create_metric_add_widget( dashboard_id=dashboardId, data=data)}
project=context.project,
user_id=context.user_id,
dashboard_id=dashboardId,
data=data,
)
}
@app.put("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"]) @app.put('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int, def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
data: schemas.UpdateWidgetPayloadSchema = Body(...), data: schemas.UpdateWidgetPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return dashboards.update_widget( return dashboards.update_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
project_id=projectId, widget_id=widgetId, data=data)
user_id=context.user_id,
dashboard_id=dashboardId,
widget_id=widgetId,
data=data,
)
@app.delete("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"]) @app.delete('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int, _=Body(None), def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int, _=Body(None),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return dashboards.remove_widget( return dashboards.remove_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
project_id=projectId, widget_id=widgetId)
user_id=context.user_id,
dashboard_id=dashboardId,
widget_id=widgetId,
)
@app.post("/{projectId}/cards/try", tags=["cards"]) @app.post('/{projectId}/cards/try', tags=["cards"])
def try_card(projectId: int, data: schemas.CardSchema = Body(...), def try_card(projectId: int, data: schemas.CardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": custom_metrics.get_chart(project=context.project, data=data, user_id=context.user_id)}
"data": custom_metrics.get_chart(
project=context.project, data=data, user_id=context.user_id
)
}
@app.post("/{projectId}/cards/try/sessions", tags=["cards"]) @app.post('/{projectId}/cards/try/sessions', tags=["cards"])
def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(...), def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_sessions( data = custom_metrics.get_sessions(project=context.project, user_id=context.user_id, data=data)
project=context.project, user_id=context.user_id, data=data
)
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/try/issues", tags=["cards"]) @app.post('/{projectId}/cards/try/issues', tags=["cards"])
def try_card_issues(projectId: int, data: schemas.CardSchema = Body(...), def try_card_issues(projectId: int, data: schemas.CardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": custom_metrics.get_issues(project=context.project, user_id=context.user_id, data=data)}
"data": custom_metrics.get_issues(
project=context.project, user_id=context.user_id, data=data
)
}
@app.get("/{projectId}/cards", tags=["cards"]) @app.get('/{projectId}/cards', tags=["cards"])
def get_cards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)): def get_cards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)
}
@app.post("/{projectId}/cards", tags=["cards"]) @app.post('/{projectId}/cards', tags=["cards"])
def create_card(projectId: int, data: schemas.CardSchema = Body(...), def create_card(projectId: int, data: schemas.CardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return custom_metrics.create_card( return custom_metrics.create_card(project=context.project, user_id=context.user_id, data=data)
project=context.project, user_id=context.user_id, data=data
)
@app.post("/{projectId}/cards/search", tags=["cards"]) @app.post('/{projectId}/cards/search', tags=["cards"])
def search_cards(projectId: int, data: schemas.MetricSearchSchema = Body(...), def search_cards(projectId: int, data: schemas.SearchCardsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {"data": custom_metrics.search_all(project_id=projectId, user_id=context.user_id, data=data)}
"data": custom_metrics.search_metrics(
project_id=projectId, user_id=context.user_id, data=data
)
}
@app.get("/{projectId}/cards/{metric_id}", tags=["cards"]) @app.get('/{projectId}/cards/{metric_id}', tags=["cards"])
def get_card(projectId: int, metric_id: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)): def get_card(projectId: int, metric_id: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)):
if metric_id.isnumeric(): if metric_id.isnumeric():
metric_id = int(metric_id) metric_id = int(metric_id)
else: else:
return {"errors": ["invalid card_id"]} return {"errors": ["invalid card_id"]}
data = custom_metrics.get_card( data = custom_metrics.get_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
project_id=projectId, user_id=context.user_id, metric_id=metric_id
)
if data is None: if data is None:
return {"errors": ["card not found"]} return {"errors": ["card not found"]}
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/{metric_id}/sessions", tags=["cards"]) @app.post('/{projectId}/cards/{metric_id}/sessions', tags=["cards"])
def get_card_sessions(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...), def get_card_sessions(projectId: int, metric_id: int,
data: schemas.CardSessionsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_sessions_by_card_id( data = custom_metrics.get_sessions_by_card_id(project=context.project, user_id=context.user_id, metric_id=metric_id,
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data data=data)
)
if data is None: if data is None:
return {"errors": ["custom metric not found"]} return {"errors": ["custom metric not found"]}
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/{metric_id}/issues/{issueId}/sessions", tags=["dashboard"]) @app.post('/{projectId}/cards/{metric_id}/issues/{issueId}/sessions', tags=["dashboard"])
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str, def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
data: schemas.CardSessionsSchema = Body(...), data: schemas.CardSessionsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_funnel_sessions_by_issue( data = custom_metrics.get_funnel_sessions_by_issue(project_id=projectId, user_id=context.user_id,
project_id=projectId, metric_id=metric_id, issue_id=issueId, data=data)
user_id=context.user_id,
metric_id=metric_id,
issue_id=issueId,
data=data,
)
if data is None: if data is None:
return {"errors": ["custom metric not found"]} return {"errors": ["custom metric not found"]}
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/{metric_id}/chart", tags=["card"]) @app.post('/{projectId}/cards/{metric_id}/chart', tags=["card"])
def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...), def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.make_chart_from_card( data = custom_metrics.make_chart_from_card(project=context.project, user_id=context.user_id, metric_id=metric_id,
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data data=data)
)
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/{metric_id}", tags=["dashboard"]) @app.post('/{projectId}/cards/{metric_id}', tags=["dashboard"])
def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(...), def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.update_card( data = custom_metrics.update_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data
)
if data is None: if data is None:
return {"errors": ["custom metric not found"]} return {"errors": ["custom metric not found"]}
return {"data": data} return {"data": data}
@app.post("/{projectId}/cards/{metric_id}/status", tags=["dashboard"]) @app.post('/{projectId}/cards/{metric_id}/status', tags=["dashboard"])
def update_card_state(projectId: int, metric_id: int, data: schemas.UpdateCardStatusSchema = Body(...), def update_card_state(projectId: int, metric_id: int,
data: schemas.UpdateCardStatusSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)): context: schemas.CurrentContext = Depends(OR_context)):
return { return {
"data": custom_metrics.change_state( "data": custom_metrics.change_state(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
project_id=projectId, status=data.active)}
user_id=context.user_id,
metric_id=metric_id,
status=data.active,
)
}
@app.delete("/{projectId}/cards/{metric_id}", tags=["dashboard"]) @app.delete('/{projectId}/cards/{metric_id}', tags=["dashboard"])
def delete_card(projectId: int, metric_id: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)): def delete_card(projectId: int, metric_id: int, _=Body(None),
return { context: schemas.CurrentContext = Depends(OR_context)):
"data": custom_metrics.delete_card( return {"data": custom_metrics.delete_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
project_id=projectId, user_id=context.user_id, metric_id=metric_id
)
}

View file

@ -1,4 +1,3 @@
from decouple import config
from fastapi import Depends from fastapi import Depends
from starlette.responses import JSONResponse, Response from starlette.responses import JSONResponse, Response
@ -9,10 +8,7 @@ from routers.base import get_routers
public_app, app, app_apikey = get_routers(prefix="/spot", tags=["spot"]) public_app, app, app_apikey = get_routers(prefix="/spot", tags=["spot"])
if config("LOCAL_DEV", cast=bool, default=False): COOKIE_PATH = "/api/spot/refresh"
COOKIE_PATH = "/spot/refresh"
else:
COOKIE_PATH = "/api/spot/refresh"
@app.get('/logout') @app.get('/logout')

View file

@ -960,6 +960,36 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
return self return self
# We don't need this as the UI is expecting filters to override the full series' filters
# @model_validator(mode="after")
# def __merge_out_filters_with_series(self):
# for f in self.filters:
# for s in self.series:
# found = False
#
# if f.is_event:
# sub = s.filter.events
# else:
# sub = s.filter.filters
#
# for e in sub:
# if f.type == e.type and f.operator == e.operator:
# found = True
# if f.is_event:
# # If extra event: append value
# for v in f.value:
# if v not in e.value:
# e.value.append(v)
# else:
# # If extra filter: override value
# e.value = f.value
# if not found:
# sub.append(f)
#
# self.filters = []
#
# return self
# UI is expecting filters to override the full series' filters # UI is expecting filters to override the full series' filters
@model_validator(mode="after") @model_validator(mode="after")
def __override_series_filters_with_outer_filters(self): def __override_series_filters_with_outer_filters(self):
@ -1030,16 +1060,6 @@ class CardTable(__CardSchema):
values["metricValue"] = [] values["metricValue"] = []
return values return values
@model_validator(mode="after")
def __enforce_AND_operator(self):
self.metric_of = MetricOfTable(self.metric_of)
if self.metric_of in (MetricOfTable.VISITED_URL, MetricOfTable.FETCH, \
MetricOfTable.VISITED_URL.value, MetricOfTable.FETCH.value):
for s in self.series:
if s.filter is not None:
s.filter.events_order = SearchEventOrder.AND
return self
@model_validator(mode="after") @model_validator(mode="after")
def __transform(self): def __transform(self):
self.metric_of = MetricOfTable(self.metric_of) self.metric_of = MetricOfTable(self.metric_of)
@ -1115,7 +1135,7 @@ class CardPathAnalysis(__CardSchema):
view_type: MetricOtherViewType = Field(...) view_type: MetricOtherViewType = Field(...)
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list) metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
density: int = Field(default=4, ge=2, le=10) density: int = Field(default=4, ge=2, le=10)
rows: int = Field(default=5, ge=1, le=10) rows: int = Field(default=3, ge=1, le=10)
start_type: Literal["start", "end"] = Field(default="start") start_type: Literal["start", "end"] = Field(default="start")
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list) start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
@ -1348,42 +1368,6 @@ class SearchCardsSchema(_PaginatedSchema):
query: Optional[str] = Field(default=None) query: Optional[str] = Field(default=None)
class MetricSortColumnType(str, Enum):
NAME = "name"
METRIC_TYPE = "metric_type"
METRIC_OF = "metric_of"
IS_PUBLIC = "is_public"
CREATED_AT = "created_at"
EDITED_AT = "edited_at"
class MetricFilterColumnType(str, Enum):
NAME = "name"
METRIC_TYPE = "metric_type"
METRIC_OF = "metric_of"
IS_PUBLIC = "is_public"
USER_ID = "user_id"
CREATED_AT = "created_at"
EDITED_AT = "edited_at"
class MetricListSort(BaseModel):
field: Optional[str] = Field(default=None)
order: Optional[str] = Field(default=SortOrderType.DESC)
class MetricFilter(BaseModel):
type: Optional[str] = Field(default=None)
query: Optional[str] = Field(default=None)
class MetricSearchSchema(_PaginatedSchema):
filter: Optional[MetricFilter] = Field(default=None)
sort: Optional[MetricListSort] = Field(default=MetricListSort())
shared_only: bool = Field(default=False)
mine_only: bool = Field(default=False)
class _HeatMapSearchEventRaw(SessionSearchEventSchema2): class _HeatMapSearchEventRaw(SessionSearchEventSchema2):
type: Literal[EventType.LOCATION] = Field(...) type: Literal[EventType.LOCATION] = Field(...)

View file

@ -19,16 +19,14 @@ const EVENTS_DEFINITION = {
} }
}; };
EVENTS_DEFINITION.emit = { EVENTS_DEFINITION.emit = {
NEW_AGENT: "NEW_AGENT", NEW_AGENT: "NEW_AGENT",
NO_AGENTS: "NO_AGENT", NO_AGENTS: "NO_AGENT",
AGENT_DISCONNECT: "AGENT_DISCONNECTED", AGENT_DISCONNECT: "AGENT_DISCONNECTED",
AGENTS_CONNECTED: "AGENTS_CONNECTED", AGENTS_CONNECTED: "AGENTS_CONNECTED",
AGENTS_INFO_CONNECTED: "AGENTS_INFO_CONNECTED", NO_SESSIONS: "SESSION_DISCONNECTED",
NO_SESSIONS: "SESSION_DISCONNECTED", SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED", SESSION_RECONNECTED: "SESSION_RECONNECTED",
SESSION_RECONNECTED: "SESSION_RECONNECTED", UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT,
WEBRTC_CONFIG: "WEBRTC_CONFIG",
}; };
const BASE_sessionInfo = { const BASE_sessionInfo = {

View file

@ -27,14 +27,9 @@ const respond = function (req, res, data) {
res.setHeader('Content-Type', 'application/json'); res.setHeader('Content-Type', 'application/json');
res.end(JSON.stringify(result)); res.end(JSON.stringify(result));
} else { } else {
if (!res.aborted) { res.cork(() => {
res.cork(() => { res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result)); });
});
} else {
logger.debug("response aborted");
return;
}
} }
const duration = performance.now() - req.startTs; const duration = performance.now() - req.startTs;
IncreaseTotalRequests(); IncreaseTotalRequests();

View file

@ -42,7 +42,7 @@ const findSessionSocketId = async (io, roomId, tabId) => {
}; };
async function getRoomData(io, roomID) { async function getRoomData(io, roomID) {
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [], config = null, agentInfos = []; let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
const connected_sockets = await io.in(roomID).fetchSockets(); const connected_sockets = await io.in(roomID).fetchSockets();
if (connected_sockets.length > 0) { if (connected_sockets.length > 0) {
for (let socket of connected_sockets) { for (let socket of connected_sockets) {
@ -52,19 +52,13 @@ async function getRoomData(io, roomID) {
} else { } else {
agentsCount++; agentsCount++;
agentIDs.push(socket.id); agentIDs.push(socket.id);
agentInfos.push({ ...socket.handshake.query.agentInfo, socketId: socket.id });
if (socket.handshake.query.config !== undefined) {
config = socket.handshake.query.config;
}
} }
} }
} else { } else {
tabsCount = -1; tabsCount = -1;
agentsCount = -1; agentsCount = -1;
agentInfos = [];
agentIDs = [];
} }
return {tabsCount, agentsCount, tabIDs, agentIDs, config, agentInfos}; return {tabsCount, agentsCount, tabIDs, agentIDs};
} }
function processNewSocket(socket) { function processNewSocket(socket) {
@ -84,7 +78,7 @@ async function onConnect(socket) {
IncreaseOnlineConnections(socket.handshake.query.identity); IncreaseOnlineConnections(socket.handshake.query.identity);
const io = getServer(); const io = getServer();
const {tabsCount, agentsCount, tabIDs, agentInfos, agentIDs, config} = await getRoomData(io, socket.handshake.query.roomId); const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId);
if (socket.handshake.query.identity === IDENTITIES.session) { if (socket.handshake.query.identity === IDENTITIES.session) {
// Check if session with the same tabID already connected, if so, refuse new connexion // Check if session with the same tabID already connected, if so, refuse new connexion
@ -106,9 +100,7 @@ async function onConnect(socket) {
// Inform all connected agents about reconnected session // Inform all connected agents about reconnected session
if (agentsCount > 0) { if (agentsCount > 0) {
logger.debug(`notifying new session about agent-existence`); logger.debug(`notifying new session about agent-existence`);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, config);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs); io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_INFO_CONNECTED, agentInfos);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id); socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
} }
} else if (tabsCount <= 0) { } else if (tabsCount <= 0) {
@ -126,8 +118,7 @@ async function onConnect(socket) {
// Stats // Stats
startAssist(socket, socket.handshake.query.agentID); startAssist(socket, socket.handshake.query.agentID);
} }
io.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, socket.handshake.query.config); socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, { ...socket.handshake.query.agentInfo });
} }
// Set disconnect handler // Set disconnect handler

View file

@ -114,9 +114,9 @@ ENV TZ=UTC \
RUN if [ "$SERVICE_NAME" = "http" ]; then \ RUN if [ "$SERVICE_NAME" = "http" ]; then \
wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\ wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\
wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \ wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \
elif [ "$SERVICE_NAME" = "images" ]; then \ elif [ "$SERVICE_NAME" = "imagestorage" ]; then \
apk add --no-cache zstd; \ apk add --no-cache zstd; \
elif [ "$SERVICE_NAME" = "canvases" ]; then \ elif [ "$SERVICE_NAME" = "canvas-handler" ]; then \
apk add --no-cache zstd; \ apk add --no-cache zstd; \
elif [ "$SERVICE_NAME" = "spot" ]; then \ elif [ "$SERVICE_NAME" = "spot" ]; then \
apk add --no-cache ffmpeg; \ apk add --no-cache ffmpeg; \

View file

@ -8,7 +8,8 @@ import (
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database" analyticsMetrics "openreplay/backend/pkg/metrics/analytics"
databaseMetrics "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server" "openreplay/backend/pkg/server"
"openreplay/backend/pkg/server/api" "openreplay/backend/pkg/server/api"
@ -18,18 +19,16 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := analyticsConfig.New(log) cfg := analyticsConfig.New(log)
// Observability
webMetrics := web.New("analytics") webMetrics := web.New("analytics")
dbMetrics := database.New("analytics") metrics.New(log, append(webMetrics.List(), append(analyticsMetrics.List(), databaseMetrics.List()...)...))
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn) builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, pgConn)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }

View file

@ -22,15 +22,13 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, assetsMetrics.List())
assetMetrics := assetsMetrics.New("assets")
metrics.New(log, assetMetrics.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
cacher, err := cacher.NewCacher(cfg, objStore, assetMetrics) cacher, err := cacher.NewCacher(cfg, objStore)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init cacher: %s", err) log.Fatal(ctx, "can't init cacher: %s", err)
} }
@ -39,7 +37,7 @@ func main() {
switch m := msg.(type) { switch m := msg.(type) {
case *messages.AssetCache: case *messages.AssetCache:
cacher.CacheURL(m.SessionID(), m.URL) cacher.CacheURL(m.SessionID(), m.URL)
assetMetrics.IncreaseProcessesSessions() assetsMetrics.IncreaseProcessesSessions()
case *messages.JSException: case *messages.JSException:
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload) sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
if err != nil { if err != nil {

View file

@ -8,12 +8,12 @@ import (
"syscall" "syscall"
"time" "time"
"openreplay/backend/internal/canvases" "openreplay/backend/internal/canvas-handler"
config "openreplay/backend/internal/config/canvases" config "openreplay/backend/internal/config/canvas-handler"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
canvasesMetrics "openreplay/backend/pkg/metrics/canvas" storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
"openreplay/backend/pkg/objectstorage/store" "openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
) )
@ -22,28 +22,22 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, storageMetrics.List())
canvasMetrics := canvasesMetrics.New("canvases")
metrics.New(log, canvasMetrics.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
producer := queue.NewProducer(cfg.MessageSizeLimit, true) srv, err := canvas_handler.New(cfg, log, objStore)
defer producer.Close(15000)
srv, err := canvases.New(cfg, log, objStore, producer, canvasMetrics)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init canvases service: %s", err) log.Fatal(ctx, "can't init canvas service: %s", err)
} }
canvasConsumer := queue.NewConsumer( canvasConsumer := queue.NewConsumer(
cfg.GroupCanvasImage, cfg.GroupCanvasImage,
[]string{ []string{
cfg.TopicCanvasImages, cfg.TopicCanvasImages,
cfg.TopicCanvasTrigger,
}, },
messages.NewImagesMessageIterator(func(data []byte, sessID uint64) { messages.NewImagesMessageIterator(func(data []byte, sessID uint64) {
isSessionEnd := func(data []byte) bool { isSessionEnd := func(data []byte) bool {
@ -61,34 +55,14 @@ func main() {
} }
return true return true
} }
isTriggerEvent := func(data []byte) (string, string, bool) {
reader := messages.NewBytesReader(data)
msgType, err := reader.ReadUint()
if err != nil {
return "", "", false
}
if msgType != messages.MsgCustomEvent {
return "", "", false
}
msg, err := messages.ReadMessage(msgType, reader)
if err != nil {
return "", "", false
}
customEvent := msg.(*messages.CustomEvent)
return customEvent.Payload, customEvent.Name, true
}
sessCtx := context.WithValue(context.Background(), "sessionID", sessID) sessCtx := context.WithValue(context.Background(), "sessionID", sessID)
if isSessionEnd(data) { if isSessionEnd(data) {
if err := srv.PrepareSessionCanvases(sessCtx, sessID); err != nil { if err := srv.PackSessionCanvases(sessCtx, sessID); err != nil {
if !strings.Contains(err.Error(), "no such file or directory") { if !strings.Contains(err.Error(), "no such file or directory") {
log.Error(sessCtx, "can't pack session's canvases: %s", err) log.Error(sessCtx, "can't pack session's canvases: %s", err)
} }
} }
} else if path, name, ok := isTriggerEvent(data); ok {
if err := srv.ProcessSessionCanvas(sessCtx, sessID, path, name); err != nil {
log.Error(sessCtx, "can't process session's canvas: %s", err)
}
} else { } else {
if err := srv.SaveCanvasToDisk(sessCtx, sessID, data); err != nil { if err := srv.SaveCanvasToDisk(sessCtx, sessID, data); err != nil {
log.Error(sessCtx, "can't process canvas image: %s", err) log.Error(sessCtx, "can't process canvas image: %s", err)
@ -99,7 +73,7 @@ func main() {
cfg.MessageSizeLimit, cfg.MessageSizeLimit,
) )
log.Info(ctx, "canvases service started") log.Info(ctx, "canvas handler service started")
sigchan := make(chan os.Signal, 1) sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)

View file

@ -14,7 +14,7 @@ import (
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database" databaseMetrics "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/sessions" "openreplay/backend/pkg/sessions"
@ -26,24 +26,22 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, databaseMetrics.List())
dbMetric := database.New("db")
metrics.New(log, dbMetric.List())
pgConn, err := pool.New(dbMetric, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
chConn := clickhouse.NewConnector(cfg.Clickhouse, dbMetric) chConn := clickhouse.NewConnector(cfg.Clickhouse)
if err := chConn.Prepare(); err != nil { if err := chConn.Prepare(); err != nil {
log.Fatal(ctx, "can't prepare clickhouse: %s", err) log.Fatal(ctx, "can't prepare clickhouse: %s", err)
} }
defer chConn.Stop() defer chConn.Stop()
// Init db proxy module (postgres + clickhouse + batches) // Init db proxy module (postgres + clickhouse + batches)
dbProxy := postgres.NewConn(log, pgConn, chConn, dbMetric) dbProxy := postgres.NewConn(log, pgConn, chConn)
defer dbProxy.Close() defer dbProxy.Close()
// Init redis connection // Init redis connection
@ -53,8 +51,8 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
projManager := projects.New(log, pgConn, redisClient, dbMetric) projManager := projects.New(log, pgConn, redisClient)
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric) sessManager := sessions.New(log, pgConn, projManager, redisClient)
tagsManager := tags.New(log, pgConn) tagsManager := tags.New(log, pgConn)
// Init data saver // Init data saver
@ -66,7 +64,7 @@ func main() {
messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd, messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr, messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
messages.MsgJSException, messages.MsgResourceTiming, messages.MsgCustomEvent, messages.MsgCustomIssue, messages.MsgJSException, messages.MsgResourceTiming, messages.MsgCustomEvent, messages.MsgCustomIssue,
messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction, messages.MsgMouseClick, messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction, messages.MsgMouseClick,
messages.MsgMouseClickDeprecated, messages.MsgSetPageLocation, messages.MsgSetPageLocationDeprecated, messages.MsgMouseClickDeprecated, messages.MsgSetPageLocation, messages.MsgSetPageLocationDeprecated,
messages.MsgPageLoadTiming, messages.MsgPageRenderTiming, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming,
messages.MsgPageEvent, messages.MsgPageEventDeprecated, messages.MsgMouseThrashing, messages.MsgInputChange, messages.MsgPageEvent, messages.MsgPageEventDeprecated, messages.MsgMouseThrashing, messages.MsgInputChange,

View file

@ -19,7 +19,7 @@ import (
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database" databaseMetrics "openreplay/backend/pkg/metrics/database"
enderMetrics "openreplay/backend/pkg/metrics/ender" enderMetrics "openreplay/backend/pkg/metrics/ender"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
@ -31,12 +31,9 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := ender.New(log) cfg := ender.New(log)
// Observability metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...))
dbMetric := database.New("ender")
enderMetric := enderMetrics.New("ender")
metrics.New(log, append(enderMetric.List(), dbMetric.List()...))
pgConn, err := pool.New(dbMetric, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
@ -48,10 +45,10 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
projManager := projects.New(log, pgConn, redisClient, dbMetric) projManager := projects.New(log, pgConn, redisClient)
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric) sessManager := sessions.New(log, pgConn, projManager, redisClient)
sessionEndGenerator, err := sessionender.New(enderMetric, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber) sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init ender service: %s", err) log.Fatal(ctx, "can't init ender service: %s", err)
} }

View file

@ -23,9 +23,7 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, heuristicsMetrics.List())
heuristicsMetric := heuristicsMetrics.New("heuristics")
metrics.New(log, heuristicsMetric.List())
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message. // HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor { handlersFabric := func() []handlers.MessageProcessor {
@ -64,7 +62,7 @@ func main() {
} }
// Run service and wait for TERM signal // Run service and wait for TERM signal
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager, heuristicsMetric) service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager)
log.Info(ctx, "Heuristics service started") log.Info(ctx, "Heuristics service started")
terminator.Wait(log, service) terminator.Wait(log, service)
} }

View file

@ -9,7 +9,7 @@ import (
"openreplay/backend/pkg/db/redis" "openreplay/backend/pkg/db/redis"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/database" databaseMetrics "openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/server" "openreplay/backend/pkg/server"
@ -20,15 +20,13 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := http.New(log) cfg := http.New(log)
// Observability
webMetrics := web.New("http") webMetrics := web.New("http")
dbMetric := database.New("http") metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...))
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
producer := queue.NewProducer(cfg.MessageSizeLimit, true) producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000) defer producer.Close(15000)
pgConn, err := pool.New(dbMetric, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
@ -40,7 +38,7 @@ func main() {
} }
defer redisClient.Close() defer redisClient.Close()
builder, err := services.New(log, cfg, webMetrics, dbMetric, producer, pgConn, redisClient) builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient)
if err != nil { if err != nil {
log.Fatal(ctx, "failed while creating services: %s", err) log.Fatal(ctx, "failed while creating services: %s", err)
} }

View file

@ -9,12 +9,12 @@ import (
"syscall" "syscall"
"time" "time"
config "openreplay/backend/internal/config/images" config "openreplay/backend/internal/config/imagestorage"
"openreplay/backend/internal/images" "openreplay/backend/internal/screenshot-handler"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
imagesMetrics "openreplay/backend/pkg/metrics/images" storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
"openreplay/backend/pkg/objectstorage/store" "openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
) )
@ -23,18 +23,16 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, storageMetrics.List())
imageMetrics := imagesMetrics.New("images")
metrics.New(log, imageMetrics.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
srv, err := images.New(cfg, log, objStore, imageMetrics) srv, err := screenshot_handler.New(cfg, log, objStore)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init images service: %s", err) log.Fatal(ctx, "can't init storage service: %s", err)
} }
workDir := cfg.FSDir workDir := cfg.FSDir
@ -76,7 +74,7 @@ func main() {
cfg.MessageSizeLimit, cfg.MessageSizeLimit,
) )
log.Info(ctx, "Images service started") log.Info(ctx, "Image storage service started")
sigchan := make(chan os.Signal, 1) sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM) signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)

View file

@ -18,18 +18,16 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability
webMetrics := web.New("integrations") webMetrics := web.New("integrations")
dbMetric := database.New("integrations") metrics.New(log, append(webMetrics.List(), database.List()...))
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
pgConn, err := pool.New(dbMetric, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn) builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }

View file

@ -9,14 +9,14 @@ import (
"syscall" "syscall"
"time" "time"
config "openreplay/backend/internal/config/sink" "openreplay/backend/internal/config/sink"
"openreplay/backend/internal/sink/assetscache" "openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/sessionwriter" "openreplay/backend/internal/sink/sessionwriter"
"openreplay/backend/internal/storage" "openreplay/backend/internal/storage"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics" "openreplay/backend/pkg/metrics"
"openreplay/backend/pkg/metrics/sink" sinkMetrics "openreplay/backend/pkg/metrics/sink"
"openreplay/backend/pkg/queue" "openreplay/backend/pkg/queue"
"openreplay/backend/pkg/url/assets" "openreplay/backend/pkg/url/assets"
) )
@ -24,9 +24,7 @@ import (
func main() { func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := sink.New(log)
// Observability
sinkMetrics := sink.New("sink")
metrics.New(log, sinkMetrics.List()) metrics.New(log, sinkMetrics.List())
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) { if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
@ -41,7 +39,7 @@ func main() {
if err != nil { if err != nil {
log.Fatal(ctx, "can't init rewriter: %s", err) log.Fatal(ctx, "can't init rewriter: %s", err)
} }
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer, sinkMetrics) assetMessageHandler := assetscache.New(log, cfg, rewriter, producer)
counter := storage.NewLogCounter() counter := storage.NewLogCounter()
var ( var (
@ -100,6 +98,7 @@ func main() {
// Process assets // Process assets
if msg.TypeID() == messages.MsgSetNodeAttributeURLBased || if msg.TypeID() == messages.MsgSetNodeAttributeURLBased ||
msg.TypeID() == messages.MsgSetCSSDataURLBased || msg.TypeID() == messages.MsgSetCSSDataURLBased ||
msg.TypeID() == messages.MsgCSSInsertRuleURLBased ||
msg.TypeID() == messages.MsgAdoptedSSReplaceURLBased || msg.TypeID() == messages.MsgAdoptedSSReplaceURLBased ||
msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased { msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased {
m := msg.Decode() m := msg.Decode()
@ -193,7 +192,7 @@ func main() {
cfg.TopicRawWeb, cfg.TopicRawWeb,
cfg.TopicRawMobile, cfg.TopicRawMobile,
}, },
messages.NewSinkMessageIterator(log, msgHandler, nil, false, sinkMetrics), messages.NewSinkMessageIterator(log, msgHandler, nil, false),
false, false,
cfg.MessageSizeLimit, cfg.MessageSizeLimit,
) )

View file

@ -19,20 +19,16 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := spotConfig.New(log) cfg := spotConfig.New(log)
// Observability
webMetrics := web.New("spot") webMetrics := web.New("spot")
spotMetric := spotMetrics.New("spot") metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...))
dbMetric := databaseMetrics.New("spot")
metrics.New(log, append(webMetrics.List(), append(spotMetric.List(), dbMetric.List()...)...))
pgConn, err := pool.New(dbMetric, cfg.Postgres.String()) pgConn, err := pool.New(cfg.Postgres.String())
if err != nil { if err != nil {
log.Fatal(ctx, "can't init postgres connection: %s", err) log.Fatal(ctx, "can't init postgres connection: %s", err)
} }
defer pgConn.Close() defer pgConn.Close()
prefix := api.NoPrefix builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn)
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, spotMetric, dbMetric, pgConn, prefix)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init services: %s", err) log.Fatal(ctx, "can't init services: %s", err)
} }
@ -41,7 +37,7 @@ func main() {
if err != nil { if err != nil {
log.Fatal(ctx, "failed while creating router: %s", err) log.Fatal(ctx, "failed while creating router: %s", err)
} }
router.AddHandlers(prefix, builder.SpotsAPI) router.AddHandlers(api.NoPrefix, builder.SpotsAPI)
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware) router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
server.Run(ctx, log, &cfg.HTTP, router) server.Run(ctx, log, &cfg.HTTP, router)

View file

@ -23,15 +23,13 @@ func main() {
ctx := context.Background() ctx := context.Background()
log := logger.New() log := logger.New()
cfg := config.New(log) cfg := config.New(log)
// Observability metrics.New(log, storageMetrics.List())
storageMetric := storageMetrics.New("storage")
metrics.New(log, storageMetric.List())
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init object storage: %s", err) log.Fatal(ctx, "can't init object storage: %s", err)
} }
srv, err := storage.New(cfg, log, objStore, storageMetric) srv, err := storage.New(cfg, log, objStore)
if err != nil { if err != nil {
log.Fatal(ctx, "can't init storage service: %s", err) log.Fatal(ctx, "can't init storage service: %s", err)
} }

View file

@ -27,7 +27,6 @@ type cacher struct {
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently." objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines." httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
rewriter *assets.Rewriter // Read only rewriter *assets.Rewriter // Read only
metrics metrics.Assets
Errors chan error Errors chan error
sizeLimit int sizeLimit int
requestHeaders map[string]string requestHeaders map[string]string
@ -38,7 +37,7 @@ func (c *cacher) CanCache() bool {
return c.workers.CanAddTask() return c.workers.CanAddTask()
} }
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics metrics.Assets) (*cacher, error) { func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) {
switch { switch {
case cfg == nil: case cfg == nil:
return nil, errors.New("config is nil") return nil, errors.New("config is nil")
@ -94,7 +93,6 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics me
Errors: make(chan error), Errors: make(chan error),
sizeLimit: cfg.AssetsSizeLimit, sizeLimit: cfg.AssetsSizeLimit,
requestHeaders: cfg.AssetsRequestHeaders, requestHeaders: cfg.AssetsRequestHeaders,
metrics: metrics,
} }
c.workers = NewPool(64, c.CacheFile) c.workers = NewPool(64, c.CacheFile)
return c, nil return c, nil
@ -117,7 +115,7 @@ func (c *cacher) cacheURL(t *Task) {
c.Errors <- errors.Wrap(err, t.urlContext) c.Errors <- errors.Wrap(err, t.urlContext)
return return
} }
c.metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode) metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
defer res.Body.Close() defer res.Body.Close()
if res.StatusCode >= 400 { if res.StatusCode >= 400 {
printErr := true printErr := true
@ -164,12 +162,12 @@ func (c *cacher) cacheURL(t *Task) {
start = time.Now() start = time.Now()
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression) err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
if err != nil { if err != nil {
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true) metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
c.Errors <- errors.Wrap(err, t.urlContext) c.Errors <- errors.Wrap(err, t.urlContext)
return return
} }
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false) metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
c.metrics.IncreaseSavedSessions() metrics.IncreaseSavedSessions()
if isCSS { if isCSS {
if t.depth > 0 { if t.depth > 0 {

View file

@ -0,0 +1,166 @@
package canvas_handler
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"time"
config "openreplay/backend/internal/config/canvas-handler"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool"
)
type ImageStorage struct {
cfg *config.Config
log logger.Logger
basePath string
saverPool pool.WorkerPool
uploaderPool pool.WorkerPool
objStorage objectstorage.ObjectStorage
}
type saveTask struct {
ctx context.Context
sessionID uint64
name string
image *bytes.Buffer
}
type uploadTask struct {
ctx context.Context
path string
name string
}
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
}
path := cfg.FSDir + "/"
if cfg.CanvasDir != "" {
path += cfg.CanvasDir + "/"
}
s := &ImageStorage{
cfg: cfg,
log: log,
basePath: path,
objStorage: objStorage,
}
s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
s.uploaderPool = pool.NewPool(4, 8, s.sendToS3)
return s, nil
}
func (v *ImageStorage) Wait() {
v.saverPool.Pause()
v.uploaderPool.Pause()
}
func (v *ImageStorage) SaveCanvasToDisk(ctx context.Context, sessID uint64, data []byte) error {
type canvasData struct {
Name string
Data []byte
}
var msg = &canvasData{}
if err := json.Unmarshal(data, msg); err != nil {
return fmt.Errorf("can't parse canvas message, err: %s", err)
}
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
return nil
}
func (v *ImageStorage) writeToDisk(payload interface{}) {
task := payload.(*saveTask)
path := fmt.Sprintf("%s%d/", v.basePath, task.sessionID)
// Ensure the directory exists
if err := os.MkdirAll(path, 0755); err != nil {
v.log.Fatal(task.ctx, "can't create a dir, err: %s", err)
}
// Write images to disk
outFile, err := os.Create(path + task.name)
if err != nil {
v.log.Fatal(task.ctx, "can't create an image: %s", err)
}
if _, err := io.Copy(outFile, task.image); err != nil {
v.log.Fatal(task.ctx, "can't copy data to image: %s", err)
}
outFile.Close()
v.log.Debug(task.ctx, "canvas image saved, name: %s, size: %3.3f mb", task.name, float64(task.image.Len())/1024.0/1024.0)
return
}
func (v *ImageStorage) PackSessionCanvases(ctx context.Context, sessID uint64) error {
path := fmt.Sprintf("%s%d/", v.basePath, sessID)
// Check that the directory exists
files, err := os.ReadDir(path)
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
names := make(map[string]bool)
// Build the list of canvas images sets
for _, file := range files {
// Skip already created archives
if strings.HasSuffix(file.Name(), ".tar.zst") {
continue
}
name := strings.Split(file.Name(), ".")
parts := strings.Split(name[0], "_")
if len(name) != 2 || len(parts) != 3 {
v.log.Warn(ctx, "unknown file name: %s, skipping", file.Name())
continue
}
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
names[canvasID] = true
}
sessionID := strconv.FormatUint(sessID, 10)
for name := range names {
// Save to archives
archPath := fmt.Sprintf("%s%s.tar.zst", path, name)
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' ! -name '*.tar.zst' | tar -cf - --files-from=- | zstd -f -o %s",
path, name, archPath)
cmd := exec.Command("sh", "-c", fullCmd)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
return fmt.Errorf("failed to execute command, err: %s, stderr: %v", err, stderr.String())
}
v.uploaderPool.Submit(&uploadTask{ctx: ctx, path: archPath, name: sessionID + "/" + name + ".tar.zst"})
}
return nil
}
func (v *ImageStorage) sendToS3(payload interface{}) {
task := payload.(*uploadTask)
start := time.Now()
video, err := os.ReadFile(task.path)
if err != nil {
v.log.Fatal(task.ctx, "failed to read canvas archive: %s", err)
}
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
v.log.Fatal(task.ctx, "failed to upload canvas to storage: %s", err)
}
v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
return
}

View file

@ -1,224 +0,0 @@
package canvases
import (
"bytes"
"context"
"encoding/json"
"fmt"
"io"
"os"
"os/exec"
"strconv"
"strings"
"time"
config "openreplay/backend/internal/config/canvases"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics/canvas"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool"
"openreplay/backend/pkg/queue/types"
)
type ImageStorage struct {
cfg *config.Config
log logger.Logger
basePath string
saverPool pool.WorkerPool
packerPool pool.WorkerPool
uploaderPool pool.WorkerPool
objStorage objectstorage.ObjectStorage
producer types.Producer
metrics canvas.Canvas
}
type saveTask struct {
ctx context.Context
sessionID uint64
name string
image *bytes.Buffer
}
type packTask struct {
ctx context.Context
sessionID uint64
path string
name string
}
type uploadTask struct {
ctx context.Context
path string
name string
}
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, producer types.Producer, metrics canvas.Canvas) (*ImageStorage, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
case objStorage == nil:
return nil, fmt.Errorf("objectStorage is empty")
case producer == nil:
return nil, fmt.Errorf("producer is empty")
case metrics == nil:
return nil, fmt.Errorf("metrics is empty")
}
path := cfg.FSDir + "/"
if cfg.CanvasDir != "" {
path += cfg.CanvasDir + "/"
}
s := &ImageStorage{
cfg: cfg,
log: log,
basePath: path,
objStorage: objStorage,
producer: producer,
metrics: metrics,
}
s.saverPool = pool.NewPool(2, 2, s.writeToDisk)
s.packerPool = pool.NewPool(8, 16, s.packCanvas)
s.uploaderPool = pool.NewPool(8, 16, s.sendToS3)
return s, nil
}
func (v *ImageStorage) Wait() {
v.saverPool.Pause()
v.uploaderPool.Pause()
}
func (v *ImageStorage) SaveCanvasToDisk(ctx context.Context, sessID uint64, data []byte) error {
type canvasData struct {
Name string
Data []byte
}
var msg = &canvasData{}
if err := json.Unmarshal(data, msg); err != nil {
return fmt.Errorf("can't parse canvas message, err: %s", err)
}
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
return nil
}
func (v *ImageStorage) writeToDisk(payload interface{}) {
task := payload.(*saveTask)
path := fmt.Sprintf("%s%d/", v.basePath, task.sessionID)
// Ensure the directory exists
if err := os.MkdirAll(path, 0755); err != nil {
v.log.Fatal(task.ctx, "can't create a dir, err: %s", err)
}
// Write images to disk
outFile, err := os.Create(path + task.name)
if err != nil {
v.log.Fatal(task.ctx, "can't create an image: %s", err)
}
if _, err := io.Copy(outFile, task.image); err != nil {
v.log.Fatal(task.ctx, "can't copy data to image: %s", err)
}
if outFile != nil {
if err := outFile.Close(); err != nil {
v.log.Warn(task.ctx, "can't close out file: %s", err)
}
}
v.metrics.RecordCanvasImageSize(float64(task.image.Len()))
v.metrics.IncreaseTotalSavedImages()
v.log.Debug(task.ctx, "canvas image saved, name: %s, size: %3.3f mb", task.name, float64(task.image.Len())/1024.0/1024.0)
return
}
func (v *ImageStorage) PrepareSessionCanvases(ctx context.Context, sessID uint64) error {
start := time.Now()
path := fmt.Sprintf("%s%d/", v.basePath, sessID)
// Check that the directory exists
files, err := os.ReadDir(path)
if err != nil {
return err
}
if len(files) == 0 {
return nil
}
// Build the list of canvas images sets
names := make(map[string]int)
for _, file := range files {
if strings.HasSuffix(file.Name(), ".tar.zst") {
continue // Skip already created archives
}
name := strings.Split(file.Name(), ".")
parts := strings.Split(name[0], "_")
if len(name) != 2 || len(parts) != 3 {
v.log.Warn(ctx, "unknown file name: %s, skipping", file.Name())
continue
}
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
names[canvasID]++
}
for name, number := range names {
msg := &messages.CustomEvent{
Name: name,
Payload: path,
}
if err := v.producer.Produce(v.cfg.TopicCanvasTrigger, sessID, msg.Encode()); err != nil {
v.log.Error(ctx, "can't send canvas trigger: %s", err)
}
v.metrics.RecordImagesPerCanvas(float64(number))
}
v.metrics.RecordCanvasesPerSession(float64(len(names)))
v.metrics.RecordPreparingDuration(time.Since(start).Seconds())
v.log.Debug(ctx, "session canvases (%d) prepared in %.3fs, session: %d", len(names), time.Since(start).Seconds(), sessID)
return nil
}
func (v *ImageStorage) ProcessSessionCanvas(ctx context.Context, sessID uint64, path, name string) error {
v.packerPool.Submit(&packTask{ctx: ctx, sessionID: sessID, path: path, name: name})
return nil
}
func (v *ImageStorage) packCanvas(payload interface{}) {
task := payload.(*packTask)
start := time.Now()
sessionID := strconv.FormatUint(task.sessionID, 10)
// Save to archives
archPath := fmt.Sprintf("%s%s.tar.zst", task.path, task.name)
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' ! -name '*.tar.zst' | tar -cf - --files-from=- | zstd -f -o %s",
task.path, task.name, archPath)
cmd := exec.Command("sh", "-c", fullCmd)
var stdout, stderr bytes.Buffer
cmd.Stdout = &stdout
cmd.Stderr = &stderr
err := cmd.Run()
if err != nil {
v.log.Fatal(task.ctx, "failed to execute command, err: %s, stderr: %v", err, stderr.String())
}
v.metrics.RecordArchivingDuration(time.Since(start).Seconds())
v.metrics.IncreaseTotalCreatedArchives()
v.log.Debug(task.ctx, "canvas packed successfully in %.3fs, session: %d", time.Since(start).Seconds(), task.sessionID)
v.uploaderPool.Submit(&uploadTask{ctx: task.ctx, path: archPath, name: sessionID + "/" + task.name + ".tar.zst"})
}
func (v *ImageStorage) sendToS3(payload interface{}) {
task := payload.(*uploadTask)
start := time.Now()
video, err := os.ReadFile(task.path)
if err != nil {
v.log.Fatal(task.ctx, "failed to read canvas archive: %s", err)
}
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
v.log.Fatal(task.ctx, "failed to upload canvas to storage: %s", err)
}
v.metrics.RecordUploadingDuration(time.Since(start).Seconds())
v.metrics.RecordArchiveSize(float64(len(video)))
v.log.Debug(task.ctx, "replay file (size: %d) uploaded successfully in %.3fs", len(video), time.Since(start).Seconds())
}

View file

@ -1,4 +1,4 @@
package canvases package canvas_handler
import ( import (
"openreplay/backend/internal/config/common" "openreplay/backend/internal/config/common"
@ -12,8 +12,8 @@ type Config struct {
objectstorage.ObjectsConfig objectstorage.ObjectsConfig
FSDir string `env:"FS_DIR,required"` FSDir string `env:"FS_DIR,required"`
CanvasDir string `env:"CANVAS_DIR,default=canvas"` CanvasDir string `env:"CANVAS_DIR,default=canvas"`
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"` // For canvas images and sessionEnd events from ender TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"`
TopicCanvasTrigger string `env:"TOPIC_CANVAS_TRIGGER,required"` // For trigger events to start processing (archive and upload) TopicCanvasTrigger string `env:"TOPIC_CANVAS_TRIGGER,required"`
GroupCanvasImage string `env:"GROUP_CANVAS_IMAGE,required"` GroupCanvasImage string `env:"GROUP_CANVAS_IMAGE,required"`
UseProfiler bool `env:"PROFILER_ENABLED,default=false"` UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
} }

View file

@ -1,4 +1,4 @@
package images package imagestorage
import ( import (
"openreplay/backend/internal/config/common" "openreplay/backend/internal/config/common"

View file

@ -2,12 +2,11 @@ package datasaver
import ( import (
"context" "context"
"encoding/json"
"openreplay/backend/pkg/db/types"
"openreplay/backend/internal/config/db" "openreplay/backend/internal/config/db"
"openreplay/backend/pkg/db/clickhouse" "openreplay/backend/pkg/db/clickhouse"
"openreplay/backend/pkg/db/postgres" "openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/db/types"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
. "openreplay/backend/pkg/messages" . "openreplay/backend/pkg/messages"
queue "openreplay/backend/pkg/queue/types" queue "openreplay/backend/pkg/queue/types"
@ -51,6 +50,10 @@ func New(log logger.Logger, cfg *db.Config, pg *postgres.Conn, ch clickhouse.Con
} }
func (s *saverImpl) Handle(msg Message) { func (s *saverImpl) Handle(msg Message) {
if msg.TypeID() == MsgCustomEvent {
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
}
var ( var (
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID()) sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
session *sessions.Session session *sessions.Session
@ -66,23 +69,6 @@ func (s *saverImpl) Handle(msg Message) {
return return
} }
if msg.TypeID() == MsgCustomEvent {
m := msg.(*CustomEvent)
// Try to parse custom event payload to JSON and extract or_payload field
type CustomEventPayload struct {
CustomTimestamp uint64 `json:"or_timestamp"`
}
customPayload := &CustomEventPayload{}
if err := json.Unmarshal([]byte(m.Payload), customPayload); err == nil {
if customPayload.CustomTimestamp >= session.Timestamp {
s.log.Info(sessCtx, "custom event timestamp received: %v", m.Timestamp)
msg.Meta().Timestamp = customPayload.CustomTimestamp
s.log.Info(sessCtx, "custom event timestamp updated: %v", m.Timestamp)
}
}
defer s.Handle(types.WrapCustomEvent(m))
}
if IsMobileType(msg.TypeID()) { if IsMobileType(msg.TypeID()) {
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil { if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
if !postgres.IsPkeyViolation(err) { if !postgres.IsPkeyViolation(err) {

View file

@ -11,7 +11,7 @@ import (
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/memory" "openreplay/backend/pkg/memory"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
heuristicMetrics "openreplay/backend/pkg/metrics/heuristics" metrics "openreplay/backend/pkg/metrics/heuristics"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
) )
@ -23,12 +23,11 @@ type heuristicsImpl struct {
consumer types.Consumer consumer types.Consumer
events builders.EventBuilder events builders.EventBuilder
mm memory.Manager mm memory.Manager
metrics heuristicMetrics.Heuristics
done chan struct{} done chan struct{}
finished chan struct{} finished chan struct{}
} }
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager, metrics heuristicMetrics.Heuristics) service.Interface { func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
s := &heuristicsImpl{ s := &heuristicsImpl{
log: log, log: log,
ctx: context.Background(), ctx: context.Background(),
@ -37,7 +36,6 @@ func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Co
consumer: c, consumer: c,
events: e, events: e,
mm: mm, mm: mm,
metrics: metrics,
done: make(chan struct{}), done: make(chan struct{}),
finished: make(chan struct{}), finished: make(chan struct{}),
} }
@ -53,7 +51,7 @@ func (h *heuristicsImpl) run() {
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil { if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
h.log.Error(h.ctx, "can't send new event to queue: %s", err) h.log.Error(h.ctx, "can't send new event to queue: %s", err)
} else { } else {
h.metrics.IncreaseTotalEvents(messageTypeName(evt)) metrics.IncreaseTotalEvents(messageTypeName(evt))
} }
case <-tick: case <-tick:
h.producer.Flush(h.cfg.ProducerTimeout) h.producer.Flush(h.cfg.ProducerTimeout)

View file

@ -12,7 +12,6 @@ import (
featureflagsAPI "openreplay/backend/pkg/featureflags/api" featureflagsAPI "openreplay/backend/pkg/featureflags/api"
"openreplay/backend/pkg/flakeid" "openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/objectstorage/store" "openreplay/backend/pkg/objectstorage/store"
"openreplay/backend/pkg/projects" "openreplay/backend/pkg/projects"
@ -37,8 +36,8 @@ type ServicesBuilder struct {
UxTestsAPI api.Handlers UxTestsAPI api.Handlers
} }
func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics database.Database, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) { func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
projs := projects.New(log, pgconn, redis, dbMetrics) projs := projects.New(log, pgconn, redis)
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
return nil, err return nil, err
@ -54,11 +53,11 @@ func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics data
tokenizer := token.NewTokenizer(cfg.TokenSecret) tokenizer := token.NewTokenizer(cfg.TokenSecret)
conditions := conditions.New(pgconn) conditions := conditions.New(pgconn)
flaker := flakeid.NewFlaker(cfg.WorkerID) flaker := flakeid.NewFlaker(cfg.WorkerID)
sessions := sessions.New(log, pgconn, projs, redis, dbMetrics) sessions := sessions.New(log, pgconn, projs, redis)
featureFlags := featureflags.New(pgconn) featureFlags := featureflags.New(pgconn)
tags := tags.New(log, pgconn) tags := tags.New(log, pgconn)
uxTesting := uxtesting.New(pgconn) uxTesting := uxtesting.New(pgconn)
responser := api.NewResponser(webMetrics) responser := api.NewResponser(metrics)
builder := &ServicesBuilder{} builder := &ServicesBuilder{}
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil { if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
return nil, err return nil, err

View file

@ -1,4 +1,4 @@
package images package screenshot_handler
import ( import (
"archive/tar" "archive/tar"
@ -6,18 +6,16 @@ import (
"context" "context"
"fmt" "fmt"
"io" "io"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool"
"os" "os"
"os/exec" "os/exec"
"strconv" "strconv"
"time" "time"
gzip "github.com/klauspost/pgzip" gzip "github.com/klauspost/pgzip"
config "openreplay/backend/internal/config/imagestorage"
config "openreplay/backend/internal/config/images"
"openreplay/backend/pkg/logger"
"openreplay/backend/pkg/metrics/images"
"openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool"
) )
type saveTask struct { type saveTask struct {
@ -39,28 +37,20 @@ type ImageStorage struct {
objStorage objectstorage.ObjectStorage objStorage objectstorage.ObjectStorage
saverPool pool.WorkerPool saverPool pool.WorkerPool
uploaderPool pool.WorkerPool uploaderPool pool.WorkerPool
metrics images.Images
} }
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics images.Images) (*ImageStorage, error) { func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
switch { switch {
case cfg == nil: case cfg == nil:
return nil, fmt.Errorf("config is empty") return nil, fmt.Errorf("config is empty")
case log == nil:
return nil, fmt.Errorf("logger is empty")
case objStorage == nil:
return nil, fmt.Errorf("objStorage is empty")
case metrics == nil:
return nil, fmt.Errorf("metrics is empty")
} }
s := &ImageStorage{ s := &ImageStorage{
cfg: cfg, cfg: cfg,
log: log, log: log,
objStorage: objStorage, objStorage: objStorage,
metrics: metrics,
} }
s.saverPool = pool.NewPool(4, 8, s.writeToDisk) s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
s.uploaderPool = pool.NewPool(8, 8, s.sendToS3) s.uploaderPool = pool.NewPool(4, 4, s.sendToS3)
return s, nil return s, nil
} }
@ -97,11 +87,8 @@ func (v *ImageStorage) Process(ctx context.Context, sessID uint64, data []byte)
v.log.Error(ctx, "ExtractTarGz: unknown type: %d in %s", header.Typeflag, header.Name) v.log.Error(ctx, "ExtractTarGz: unknown type: %d in %s", header.Typeflag, header.Name)
} }
} }
v.metrics.RecordOriginalArchiveExtractionDuration(time.Since(start).Seconds())
v.metrics.RecordOriginalArchiveSize(float64(len(images)))
v.metrics.IncreaseTotalSavedArchives()
v.log.Debug(ctx, "arch size: %d, extracted archive in: %s", len(data), time.Since(start)) v.log.Info(ctx, "arch size: %d, extracted archive in: %s", len(data), time.Since(start))
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, images: images}) v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, images: images})
return nil return nil
} }
@ -113,17 +100,17 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
if v.cfg.ScreenshotsDir != "" { if v.cfg.ScreenshotsDir != "" {
path += v.cfg.ScreenshotsDir + "/" path += v.cfg.ScreenshotsDir + "/"
} }
path += strconv.FormatUint(task.sessionID, 10) + "/" path += strconv.FormatUint(task.sessionID, 10) + "/"
// Ensure the directory exists // Ensure the directory exists
if err := os.MkdirAll(path, 0755); err != nil { if err := os.MkdirAll(path, 0755); err != nil {
v.log.Fatal(task.ctx, "error creating directories: %v", err) v.log.Fatal(task.ctx, "Error creating directories: %v", err)
} }
// Write images to disk // Write images to disk
saved := 0 saved := 0
for name, img := range task.images { for name, img := range task.images {
start := time.Now()
outFile, err := os.Create(path + name) // or open file in rewrite mode outFile, err := os.Create(path + name) // or open file in rewrite mode
if err != nil { if err != nil {
v.log.Error(task.ctx, "can't create file: %s", err.Error()) v.log.Error(task.ctx, "can't create file: %s", err.Error())
@ -131,21 +118,18 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
if _, err := io.Copy(outFile, img); err != nil { if _, err := io.Copy(outFile, img); err != nil {
v.log.Error(task.ctx, "can't copy file: %s", err.Error()) v.log.Error(task.ctx, "can't copy file: %s", err.Error())
} }
if outFile == nil { outFile.Close()
continue
}
if err := outFile.Close(); err != nil {
v.log.Warn(task.ctx, "can't close file: %s", err.Error())
}
v.metrics.RecordSavingImageDuration(time.Since(start).Seconds())
v.metrics.IncreaseTotalSavedImages()
saved++ saved++
} }
v.log.Debug(task.ctx, "saved %d images to disk", saved) v.log.Info(task.ctx, "saved %d images to disk", saved)
return return
} }
func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, filesPath string) error { func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, filesPath string) error {
// Temporarily disabled for tests
if v.objStorage == nil {
return fmt.Errorf("object storage is empty")
}
start := time.Now() start := time.Now()
sessionID := strconv.FormatUint(sessID, 10) sessionID := strconv.FormatUint(sessID, 10)
selector := fmt.Sprintf("%s*.jpeg", filesPath) selector := fmt.Sprintf("%s*.jpeg", filesPath)
@ -162,10 +146,8 @@ func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, files
if err != nil { if err != nil {
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String()) return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
} }
v.metrics.RecordArchivingDuration(time.Since(start).Seconds()) v.log.Info(ctx, "packed replay in %v", time.Since(start))
v.metrics.IncreaseTotalCreatedArchives()
v.log.Debug(ctx, "packed replay in %v", time.Since(start))
v.uploaderPool.Submit(&uploadTask{ctx: ctx, sessionID: sessionID, path: archPath, name: sessionID + "/replay.tar.zst"}) v.uploaderPool.Submit(&uploadTask{ctx: ctx, sessionID: sessionID, path: archPath, name: sessionID + "/replay.tar.zst"})
return nil return nil
} }
@ -180,9 +162,6 @@ func (v *ImageStorage) sendToS3(payload interface{}) {
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil { if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
v.log.Fatal(task.ctx, "failed to upload replay file: %s", err) v.log.Fatal(task.ctx, "failed to upload replay file: %s", err)
} }
v.metrics.RecordUploadingDuration(time.Since(start).Seconds()) v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
v.metrics.RecordArchiveSize(float64(len(video)))
v.log.Debug(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
return return
} }

View file

@ -21,7 +21,6 @@ type session struct {
// SessionEnder updates timestamp of last message for each session // SessionEnder updates timestamp of last message for each session
type SessionEnder struct { type SessionEnder struct {
metrics ender.Ender
timeout int64 timeout int64
sessions map[uint64]*session // map[sessionID]session sessions map[uint64]*session // map[sessionID]session
timeCtrl *timeController timeCtrl *timeController
@ -29,9 +28,8 @@ type SessionEnder struct {
enabled bool enabled bool
} }
func New(metrics ender.Ender, timeout int64, parts int) (*SessionEnder, error) { func New(timeout int64, parts int) (*SessionEnder, error) {
return &SessionEnder{ return &SessionEnder{
metrics: metrics,
timeout: timeout, timeout: timeout,
sessions: make(map[uint64]*session), sessions: make(map[uint64]*session),
timeCtrl: NewTimeController(parts), timeCtrl: NewTimeController(parts),
@ -58,7 +56,7 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
for sessID, _ := range se.sessions { for sessID, _ := range se.sessions {
if !activeParts[sessID%se.parts] { if !activeParts[sessID%se.parts] {
delete(se.sessions, sessID) delete(se.sessions, sessID)
se.metrics.DecreaseActiveSessions() ender.DecreaseActiveSessions()
removedSessions++ removedSessions++
} else { } else {
activeSessions++ activeSessions++
@ -91,8 +89,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
isEnded: false, isEnded: false,
isMobile: messages.IsMobileType(msg.TypeID()), isMobile: messages.IsMobileType(msg.TypeID()),
} }
se.metrics.IncreaseActiveSessions() ender.IncreaseActiveSessions()
se.metrics.IncreaseTotalSessions() ender.IncreaseTotalSessions()
return return
} }
// Keep the highest user's timestamp for correct session duration value // Keep the highest user's timestamp for correct session duration value
@ -141,8 +139,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
sess.isEnded = true sess.isEnded = true
if res, _ := handler(sessID, sess.lastUserTime); res { if res, _ := handler(sessID, sess.lastUserTime); res {
delete(se.sessions, sessID) delete(se.sessions, sessID)
se.metrics.DecreaseActiveSessions() ender.DecreaseActiveSessions()
se.metrics.IncreaseClosedSessions() ender.IncreaseClosedSessions()
removedSessions++ removedSessions++
if endCase == 2 { if endCase == 2 {
brokerTime[1]++ brokerTime[1]++

View file

@ -12,7 +12,7 @@ import (
"openreplay/backend/internal/config/sink" "openreplay/backend/internal/config/sink"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
sinkMetrics "openreplay/backend/pkg/metrics/sink" metrics "openreplay/backend/pkg/metrics/sink"
"openreplay/backend/pkg/queue/types" "openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/url/assets" "openreplay/backend/pkg/url/assets"
) )
@ -30,10 +30,9 @@ type AssetsCache struct {
producer types.Producer producer types.Producer
cache map[string]*CachedAsset cache map[string]*CachedAsset
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
metrics sinkMetrics.Sink
} }
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics sinkMetrics.Sink) *AssetsCache { func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
assetsCache := &AssetsCache{ assetsCache := &AssetsCache{
log: log, log: log,
cfg: cfg, cfg: cfg,
@ -41,7 +40,6 @@ func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, produce
producer: producer, producer: producer,
cache: make(map[string]*CachedAsset, 64), cache: make(map[string]*CachedAsset, 64),
blackList: make([]string, 0), blackList: make([]string, 0),
metrics: metrics,
} }
// Parse black list for cache layer // Parse black list for cache layer
if len(cfg.CacheBlackList) > 0 { if len(cfg.CacheBlackList) > 0 {
@ -78,7 +76,7 @@ func (e *AssetsCache) clearCache() {
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration { if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
deleted++ deleted++
delete(e.cache, id) delete(e.cache, id)
e.metrics.DecreaseCachedAssets() metrics.DecreaseCachedAssets()
} }
} }
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize) e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
@ -133,6 +131,17 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
} }
newMsg.SetMeta(msg.Meta()) newMsg.SetMeta(msg.Meta())
return newMsg return newMsg
case *messages.CSSInsertRuleURLBased:
if e.shouldSkipAsset(m.BaseURL) {
return msg
}
newMsg := &messages.CSSInsertRule{
ID: m.ID,
Index: m.Index,
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
}
newMsg.SetMeta(msg.Meta())
return newMsg
case *messages.AdoptedSSReplaceURLBased: case *messages.AdoptedSSReplaceURLBased:
if e.shouldSkipAsset(m.BaseURL) { if e.shouldSkipAsset(m.BaseURL) {
return msg return msg
@ -196,7 +205,7 @@ func parseHost(baseURL string) (string, error) {
} }
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string { func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
e.metrics.IncreaseTotalAssets() metrics.IncreaseTotalAssets()
// Try to find asset in cache // Try to find asset in cache
h := md5.New() h := md5.New()
// Cut first part of url (scheme + host) // Cut first part of url (scheme + host)
@ -219,7 +228,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
e.mutex.RUnlock() e.mutex.RUnlock()
if ok { if ok {
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration { if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
e.metrics.IncreaseSkippedAssets() metrics.IncreaseSkippedAssets()
return cachedAsset.msg return cachedAsset.msg
} }
} }
@ -231,8 +240,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
start := time.Now() start := time.Now()
res := e.getRewrittenCSS(sessionID, baseURL, css) res := e.getRewrittenCSS(sessionID, baseURL, css)
duration := time.Now().Sub(start).Milliseconds() duration := time.Now().Sub(start).Milliseconds()
e.metrics.RecordAssetSize(float64(len(res))) metrics.RecordAssetSize(float64(len(res)))
e.metrics.RecordProcessAssetDuration(float64(duration)) metrics.RecordProcessAssetDuration(float64(duration))
// Save asset to cache if we spent more than threshold // Save asset to cache if we spent more than threshold
if duration > e.cfg.CacheThreshold { if duration > e.cfg.CacheThreshold {
e.mutex.Lock() e.mutex.Lock()
@ -241,7 +250,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
ts: time.Now(), ts: time.Now(),
} }
e.mutex.Unlock() e.mutex.Unlock()
e.metrics.IncreaseCachedAssets() metrics.IncreaseCachedAssets()
} }
// Return rewritten asset // Return rewritten asset
return res return res

View file

@ -18,7 +18,7 @@ import (
config "openreplay/backend/internal/config/storage" config "openreplay/backend/internal/config/storage"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
storageMetrics "openreplay/backend/pkg/metrics/storage" metrics "openreplay/backend/pkg/metrics/storage"
"openreplay/backend/pkg/objectstorage" "openreplay/backend/pkg/objectstorage"
"openreplay/backend/pkg/pool" "openreplay/backend/pkg/pool"
) )
@ -77,10 +77,9 @@ type Storage struct {
splitTime uint64 splitTime uint64
processorPool pool.WorkerPool processorPool pool.WorkerPool
uploaderPool pool.WorkerPool uploaderPool pool.WorkerPool
metrics storageMetrics.Storage
} }
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics storageMetrics.Storage) (*Storage, error) { func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) {
switch { switch {
case cfg == nil: case cfg == nil:
return nil, fmt.Errorf("config is empty") return nil, fmt.Errorf("config is empty")
@ -93,7 +92,6 @@ func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectS
objStorage: objStorage, objStorage: objStorage,
startBytes: make([]byte, cfg.FileSplitSize), startBytes: make([]byte, cfg.FileSplitSize),
splitTime: parseSplitTime(cfg.FileSplitTime), splitTime: parseSplitTime(cfg.FileSplitTime),
metrics: metrics,
} }
s.processorPool = pool.NewPool(1, 1, s.doCompression) s.processorPool = pool.NewPool(1, 1, s.doCompression)
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession) s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
@ -143,7 +141,7 @@ func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err er
if err != nil { if err != nil {
if strings.Contains(err.Error(), "big file") { if strings.Contains(err.Error(), "big file") {
s.log.Warn(ctx, "can't process session: %s", err) s.log.Warn(ctx, "can't process session: %s", err)
s.metrics.IncreaseStorageTotalSkippedSessions() metrics.IncreaseStorageTotalSkippedSessions()
return nil return nil
} }
return err return err
@ -161,8 +159,8 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
return err return err
} }
s.metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String()) metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
s.metrics.RecordSessionSize(float64(len(mob)), tp.String()) metrics.RecordSessionSize(float64(len(mob)), tp.String())
// Put opened session file into task struct // Put opened session file into task struct
task.SetMob(mob, index, tp) task.SetMob(mob, index, tp)
@ -176,7 +174,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
// Check file size before download into memory // Check file size before download into memory
info, err := os.Stat(filePath) info, err := os.Stat(filePath)
if err == nil && info.Size() > s.cfg.MaxFileSize { if err == nil && info.Size() > s.cfg.MaxFileSize {
s.metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String()) metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
return nil, -1, fmt.Errorf("big file, size: %d", info.Size()) return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
} }
// Read file into memory // Read file into memory
@ -192,7 +190,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
if err != nil { if err != nil {
return nil, -1, fmt.Errorf("can't sort session, err: %s", err) return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
} }
s.metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
return mob, index, nil return mob, index, nil
} }
@ -236,12 +234,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
// Compression // Compression
start := time.Now() start := time.Now()
data := s.compress(task.ctx, mob, task.compression) data := s.compress(task.ctx, mob, task.compression)
s.metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
// Encryption // Encryption
start = time.Now() start = time.Now()
result := s.encryptSession(task.ctx, data.Bytes(), task.key) result := s.encryptSession(task.ctx, data.Bytes(), task.key)
s.metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String()) metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
if tp == DOM { if tp == DOM {
task.doms = bytes.NewBuffer(result) task.doms = bytes.NewBuffer(result)
@ -298,8 +296,8 @@ func (s *Storage) packSession(task *Task, tp FileType) {
wg.Wait() wg.Wait()
// Record metrics // Record metrics
s.metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String()) metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
s.metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String()) metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
} }
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte { func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
@ -384,7 +382,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.doms != nil { if task.doms != nil {
// Record compression ratio // Record compression ratio
s.metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String()) metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -397,7 +395,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.dome != nil { if task.dome != nil {
// Record compression ratio // Record compression ratio
s.metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String()) metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -410,7 +408,7 @@ func (s *Storage) uploadSession(payload interface{}) {
go func() { go func() {
if task.dev != nil { if task.dev != nil {
// Record compression ratio // Record compression ratio
s.metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String()) metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
// Upload session to s3 // Upload session to s3
start := time.Now() start := time.Now()
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil { if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
@ -421,9 +419,9 @@ func (s *Storage) uploadSession(payload interface{}) {
wg.Done() wg.Done()
}() }()
wg.Wait() wg.Wait()
s.metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String()) metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
s.metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String()) metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
s.metrics.IncreaseStorageTotalSessions() metrics.IncreaseStorageTotalSessions()
} }
func (s *Storage) doCompression(payload interface{}) { func (s *Storage) doCompression(payload interface{}) {

View file

@ -3,7 +3,6 @@ package analytics
import ( import (
"github.com/go-playground/validator/v10" "github.com/go-playground/validator/v10"
"openreplay/backend/pkg/analytics/charts" "openreplay/backend/pkg/analytics/charts"
"openreplay/backend/pkg/metrics/database"
"time" "time"
"openreplay/backend/internal/config/analytics" "openreplay/backend/internal/config/analytics"
@ -27,9 +26,9 @@ type ServicesBuilder struct {
ChartsAPI api.Handlers ChartsAPI api.Handlers
} }
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServicesBuilder, error) { func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
responser := api.NewResponser(webMetrics) responser := api.NewResponser(webMetrics)
audiTrail, err := tracer.NewTracer(log, pgconn, dbMetrics) audiTrail, err := tracer.NewTracer(log, pgconn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
@ -59,7 +58,7 @@ func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.
return nil, err return nil, err
} }
return &ServicesBuilder{ return &ServicesBuilder{
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil, api.NoPrefix), Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute), RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: audiTrail, AuditTrail: audiTrail,
CardsAPI: cardsHandlers, CardsAPI: cardsHandlers,

View file

@ -18,14 +18,13 @@ type Bulk interface {
} }
type bulkImpl struct { type bulkImpl struct {
conn driver.Conn conn driver.Conn
metrics database.Database table string
table string query string
query string values [][]interface{}
values [][]interface{}
} }
func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (Bulk, error) { func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
switch { switch {
case conn == nil: case conn == nil:
return nil, errors.New("clickhouse connection is empty") return nil, errors.New("clickhouse connection is empty")
@ -35,11 +34,10 @@ func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (
return nil, errors.New("query is empty") return nil, errors.New("query is empty")
} }
return &bulkImpl{ return &bulkImpl{
conn: conn, conn: conn,
metrics: metrics, table: table,
table: table, query: query,
query: query, values: make([][]interface{}, 0),
values: make([][]interface{}, 0),
}, nil }, nil
} }
@ -62,8 +60,8 @@ func (b *bulkImpl) Send() error {
} }
err = batch.Send() err = batch.Send()
// Save bulk metrics // Save bulk metrics
b.metrics.RecordBulkElements(float64(len(b.values)), "ch", b.table) database.RecordBulkElements(float64(len(b.values)), "ch", b.table)
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table) database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
// Prepare values slice for a new data // Prepare values slice for a new data
b.values = make([][]interface{}, 0) b.values = make([][]interface{}, 0)
return err return err

View file

@ -18,7 +18,6 @@ import (
"openreplay/backend/pkg/db/types" "openreplay/backend/pkg/db/types"
"openreplay/backend/pkg/hashid" "openreplay/backend/pkg/hashid"
"openreplay/backend/pkg/messages" "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/sessions" "openreplay/backend/pkg/sessions"
"openreplay/backend/pkg/url" "openreplay/backend/pkg/url"
) )
@ -58,14 +57,13 @@ func NewTask() *task {
type connectorImpl struct { type connectorImpl struct {
conn driver.Conn conn driver.Conn
metrics database.Database
batches map[string]Bulk //driver.Batch batches map[string]Bulk //driver.Batch
workerTask chan *task workerTask chan *task
done chan struct{} done chan struct{}
finished chan struct{} finished chan struct{}
} }
func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector { func NewConnector(cfg common.Clickhouse) Connector {
conn, err := clickhouse.Open(&clickhouse.Options{ conn, err := clickhouse.Open(&clickhouse.Options{
Addr: []string{cfg.GetTrimmedURL()}, Addr: []string{cfg.GetTrimmedURL()},
Auth: clickhouse.Auth{ Auth: clickhouse.Auth{
@ -86,7 +84,6 @@ func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
c := &connectorImpl{ c := &connectorImpl{
conn: conn, conn: conn,
metrics: metrics,
batches: make(map[string]Bulk, 20), batches: make(map[string]Bulk, 20),
workerTask: make(chan *task, 1), workerTask: make(chan *task, 1),
done: make(chan struct{}), done: make(chan struct{}),
@ -97,7 +94,7 @@ func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
} }
func (c *connectorImpl) newBatch(name, query string) error { func (c *connectorImpl) newBatch(name, query string) error {
batch, err := NewBulk(c.conn, c.metrics, name, query) batch, err := NewBulk(c.conn, name, query)
if err != nil { if err != nil {
return fmt.Errorf("can't create new batch: %s", err) return fmt.Errorf("can't create new batch: %s", err)
} }
@ -106,25 +103,25 @@ func (c *connectorImpl) newBatch(name, query string) error {
} }
var batches = map[string]string{ var batches = map[string]string{
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)", "sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?)",
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))", "autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)", "issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)", "mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
"mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`, "mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
} }
func (c *connectorImpl) Prepare() error { func (c *connectorImpl) Prepare() error {
@ -215,7 +212,6 @@ func (c *connectorImpl) InsertWebSession(session *sessions.Session) error {
session.Metadata8, session.Metadata8,
session.Metadata9, session.Metadata9,
session.Metadata10, session.Metadata10,
"web",
session.Timezone, session.Timezone,
session.UtmSource, session.UtmSource,
session.UtmMedium, session.UtmMedium,
@ -247,10 +243,8 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
return nil return nil
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"label": msg.Label, "label": msg.Label,
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)), "hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal input event: %s", err) return fmt.Errorf("can't marshal input event: %s", err)
@ -265,8 +259,6 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
nullableUint16(uint16(msg.InputDuration)), nullableUint16(uint16(msg.InputDuration)),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -283,14 +275,12 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
return fmt.Errorf("can't extract url parts: %s", err) return fmt.Errorf("can't extract url parts: %s", err)
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"issue_id": issueID, "issue_id": issueID,
"issue_type": "mouse_thrashing", "issue_type": "mouse_thrashing",
"url": cropString(msg.Url), "url": cropString(msg.Url),
"url_host": host, "url_host": host,
"url_path": path, "url_path": path,
"url_hostpath": hostpath, "url_hostpath": hostpath,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal issue event: %s", err) return fmt.Errorf("can't marshal issue event: %s", err)
@ -305,8 +295,6 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
"mouse_thrashing", "mouse_thrashing",
issueID, issueID,
jsonString, jsonString,
@ -339,14 +327,12 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
return fmt.Errorf("can't extract url parts: %s", err) return fmt.Errorf("can't extract url parts: %s", err)
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"issue_id": issueID, "issue_id": issueID,
"issue_type": msg.Type, "issue_type": msg.Type,
"url": cropString(msg.Url), "url": cropString(msg.Url),
"url_host": host, "url_host": host,
"url_path": path, "url_path": path,
"url_hostpath": hostpath, "url_hostpath": hostpath,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal issue event: %s", err) return fmt.Errorf("can't marshal issue event: %s", err)
@ -361,8 +347,6 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
msg.Type, msg.Type,
issueID, issueID,
jsonString, jsonString,
@ -434,8 +418,6 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
"dom_building_time": domBuildingTime, "dom_building_time": domBuildingTime,
"dom_content_loaded_event_time": domContentLoadedEventTime, "dom_content_loaded_event_time": domContentLoadedEventTime,
"load_event_time": loadEventTime, "load_event_time": loadEventTime,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal page event: %s", err) return fmt.Errorf("can't marshal page event: %s", err)
@ -450,8 +432,6 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
cropString(msg.URL), cropString(msg.URL),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -485,17 +465,15 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
return fmt.Errorf("can't extract url parts: %s", err) return fmt.Errorf("can't extract url parts: %s", err)
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"label": msg.Label, "label": msg.Label,
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)), "hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
"selector": msg.Selector, "selector": msg.Selector,
"normalized_x": nX, "normalized_x": nX,
"normalized_y": nY, "normalized_y": nY,
"url": cropString(msg.Url), "url": cropString(msg.Url),
"url_host": host, "url_host": host,
"url_path": path, "url_path": path,
"url_hostpath": hostpath, "url_hostpath": hostpath,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal click event: %s", err) return fmt.Errorf("can't marshal click event: %s", err)
@ -510,8 +488,6 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
cropString(msg.Url), cropString(msg.Url),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -522,6 +498,11 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
} }
func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *types.ErrorEvent) error { func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *types.ErrorEvent) error {
keys, values := make([]string, 0, len(msg.Tags)), make([]*string, 0, len(msg.Tags))
for k, v := range msg.Tags {
keys = append(keys, k)
values = append(values, v)
}
// Check error source before insert to avoid panic from clickhouse lib // Check error source before insert to avoid panic from clickhouse lib
switch msg.Source { switch msg.Source {
case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic": case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic":
@ -530,11 +511,12 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
} }
msgID, _ := msg.ID(session.ProjectID) msgID, _ := msg.ID(session.ProjectID)
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"source": msg.Source, "source": msg.Source,
"name": nullableString(msg.Name), "name": nullableString(msg.Name),
"message": msg.Message, "message": msg.Message,
"user_device": session.UserDevice, "error_id": msgID,
"user_device_type": session.UserDeviceType, "error_tags_keys": keys,
"error_tags_values": values,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal error event: %s", err) return fmt.Errorf("can't marshal error event: %s", err)
@ -549,9 +531,6 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
msgID,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("errors", err) c.checkError("errors", err)
@ -583,8 +562,6 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
"min_used_js_heap_size": msg.MinUsedJSHeapSize, "min_used_js_heap_size": msg.MinUsedJSHeapSize,
"avg_used_js_heap_size": msg.AvgUsedJSHeapSize, "avg_used_js_heap_size": msg.AvgUsedJSHeapSize,
"max_used_js_heap_size": msg.MaxUsedJSHeapSize, "max_used_js_heap_size": msg.MaxUsedJSHeapSize,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal performance event: %s", err) return fmt.Errorf("can't marshal performance event: %s", err)
@ -599,8 +576,6 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("performance", err) c.checkError("performance", err)
@ -624,18 +599,16 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
return fmt.Errorf("can't extract url parts: %s", err) return fmt.Errorf("can't extract url parts: %s", err)
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"request_body": request, "request_body": request,
"response_body": response, "response_body": response,
"status": uint16(msg.Status), "status": uint16(msg.Status),
"method": url.EnsureMethod(msg.Method), "method": url.EnsureMethod(msg.Method),
"success": msg.Status < 400, "success": msg.Status < 400,
"transfer_size": uint32(msg.TransferredBodySize), "transfer_size": uint32(msg.TransferredBodySize),
"url": cropString(msg.URL), "url": cropString(msg.URL),
"url_host": host, "url_host": host,
"url_path": path, "url_path": path,
"url_hostpath": hostpath, "url_hostpath": hostpath,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal request event: %s", err) return fmt.Errorf("can't marshal request event: %s", err)
@ -650,8 +623,6 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
nullableUint16(uint16(msg.Duration)), nullableUint16(uint16(msg.Duration)),
jsonString, jsonString,
); err != nil { ); err != nil {
@ -663,10 +634,8 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.CustomEvent) error { func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.CustomEvent) error {
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"name": msg.Name, "name": msg.Name,
"payload": msg.Payload, "payload": msg.Payload,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal custom event: %s", err) return fmt.Errorf("can't marshal custom event: %s", err)
@ -681,8 +650,6 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("custom", err) c.checkError("custom", err)
@ -693,11 +660,9 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.GraphQL) error { func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.GraphQL) error {
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"name": msg.OperationName, "name": msg.OperationName,
"request_body": nullableString(msg.Variables), "request_body": nullableString(msg.Variables),
"response_body": nullableString(msg.Response), "response_body": nullableString(msg.Response),
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal graphql event: %s", err) return fmt.Errorf("can't marshal graphql event: %s", err)
@ -712,8 +677,6 @@ func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.G
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("graphql", err) c.checkError("graphql", err)
@ -761,7 +724,7 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
session.Metadata8, session.Metadata8,
session.Metadata9, session.Metadata9,
session.Metadata10, session.Metadata10,
"mobile", "ios",
session.Timezone, session.Timezone,
); err != nil { ); err != nil {
c.checkError("mobile_sessions", err) c.checkError("mobile_sessions", err)
@ -772,10 +735,8 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messages.MobileEvent) error { func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messages.MobileEvent) error {
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"name": msg.Name, "name": msg.Name,
"payload": msg.Payload, "payload": msg.Payload,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile custom event: %s", err) return fmt.Errorf("can't marshal mobile custom event: %s", err)
@ -790,8 +751,6 @@ func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messa
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_custom", err) c.checkError("mobile_custom", err)
@ -805,9 +764,7 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
return nil return nil
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"label": msg.Label, "label": msg.Label,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile clicks event: %s", err) return fmt.Errorf("can't marshal mobile clicks event: %s", err)
@ -822,8 +779,6 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_clicks", err) c.checkError("mobile_clicks", err)
@ -837,10 +792,8 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
return nil return nil
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"label": msg.Label, "label": msg.Label,
"direction": nullableString(msg.Direction), "direction": nullableString(msg.Direction),
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile swipe event: %s", err) return fmt.Errorf("can't marshal mobile swipe event: %s", err)
@ -855,8 +808,6 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_swipes", err) c.checkError("mobile_swipes", err)
@ -870,9 +821,7 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
return nil return nil
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"label": msg.Label, "label": msg.Label,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile input event: %s", err) return fmt.Errorf("can't marshal mobile input event: %s", err)
@ -887,8 +836,6 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_inputs", err) c.checkError("mobile_inputs", err)
@ -908,15 +855,13 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
response = &msg.Response response = &msg.Response
} }
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"url": cropString(msg.URL), "url": cropString(msg.URL),
"request_body": request, "request_body": request,
"response_body": response, "response_body": response,
"status": uint16(msg.Status), "status": uint16(msg.Status),
"method": url.EnsureMethod(msg.Method), "method": url.EnsureMethod(msg.Method),
"duration": uint16(msg.Duration), "duration": uint16(msg.Duration),
"success": msg.Status < 400, "success": msg.Status < 400,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile request event: %s", err) return fmt.Errorf("can't marshal mobile request event: %s", err)
@ -931,8 +876,6 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_requests", err) c.checkError("mobile_requests", err)
@ -943,11 +886,9 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messages.MobileCrash) error { func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messages.MobileCrash) error {
jsonString, err := json.Marshal(map[string]interface{}{ jsonString, err := json.Marshal(map[string]interface{}{
"name": msg.Name, "name": msg.Name,
"reason": msg.Reason, "reason": msg.Reason,
"stacktrace": msg.Stacktrace, "stacktrace": msg.Stacktrace,
"user_device": session.UserDevice,
"user_device_type": session.UserDeviceType,
}) })
if err != nil { if err != nil {
return fmt.Errorf("can't marshal mobile crash event: %s", err) return fmt.Errorf("can't marshal mobile crash event: %s", err)
@ -962,8 +903,6 @@ func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messag
eventTime.Unix(), eventTime.Unix(),
session.UserUUID, session.UserUUID,
true, true,
session.Platform,
session.UserOSVersion,
jsonString, jsonString,
); err != nil { ); err != nil {
c.checkError("mobile_crashes", err) c.checkError("mobile_crashes", err)

View file

@ -52,7 +52,6 @@ func NewBatchesTask(size int) *batchesTask {
type BatchSet struct { type BatchSet struct {
log logger.Logger log logger.Logger
c pool.Pool c pool.Pool
metrics database.Database
ctx context.Context ctx context.Context
batches map[uint64]*SessionBatch batches map[uint64]*SessionBatch
workerTask chan *batchesTask workerTask chan *batchesTask
@ -60,11 +59,10 @@ type BatchSet struct {
finished chan struct{} finished chan struct{}
} }
func NewBatchSet(log logger.Logger, c pool.Pool, metrics database.Database) *BatchSet { func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet {
bs := &BatchSet{ bs := &BatchSet{
log: log, log: log,
c: c, c: c,
metrics: metrics,
ctx: context.Background(), ctx: context.Background(),
batches: make(map[uint64]*SessionBatch), batches: make(map[uint64]*SessionBatch),
workerTask: make(chan *batchesTask, 1), workerTask: make(chan *batchesTask, 1),
@ -106,7 +104,7 @@ func (conn *BatchSet) Stop() {
func (conn *BatchSet) sendBatches(t *batchesTask) { func (conn *BatchSet) sendBatches(t *batchesTask) {
for _, batch := range t.batches { for _, batch := range t.batches {
// Record batch size // Record batch size
conn.metrics.RecordBatchElements(float64(batch.Len())) database.RecordBatchElements(float64(batch.Len()))
start := time.Now() start := time.Now()
@ -122,7 +120,7 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
} }
} }
br.Close() // returns err br.Close() // returns err
conn.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds())) database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
} }
} }

View file

@ -24,7 +24,6 @@ type Bulk interface {
type bulkImpl struct { type bulkImpl struct {
conn pool.Pool conn pool.Pool
metrics database.Database
table string table string
columns string columns string
template string template string
@ -76,12 +75,12 @@ func (b *bulkImpl) send() error {
return fmt.Errorf("send bulk err: %s", err) return fmt.Errorf("send bulk err: %s", err)
} }
// Save bulk metrics // Save bulk metrics
b.metrics.RecordBulkElements(float64(size), "pg", b.table) database.RecordBulkElements(float64(size), "pg", b.table)
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table) database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
return nil return nil
} }
func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template string, setSize, sizeLimit int) (Bulk, error) { func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
switch { switch {
case conn == nil: case conn == nil:
return nil, errors.New("db conn is empty") return nil, errors.New("db conn is empty")
@ -98,7 +97,6 @@ func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template
} }
return &bulkImpl{ return &bulkImpl{
conn: conn, conn: conn,
metrics: metrics,
table: table, table: table,
columns: columns, columns: columns,
template: template, template: template,

View file

@ -2,7 +2,6 @@ package postgres
import ( import (
"context" "context"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
"openreplay/backend/pkg/logger" "openreplay/backend/pkg/logger"
@ -22,7 +21,6 @@ type BulkSet struct {
log logger.Logger log logger.Logger
c pool.Pool c pool.Pool
ctx context.Context ctx context.Context
metrics database.Database
autocompletes Bulk autocompletes Bulk
requests Bulk requests Bulk
customEvents Bulk customEvents Bulk
@ -45,11 +43,10 @@ type BulkSet struct {
finished chan struct{} finished chan struct{}
} }
func NewBulkSet(log logger.Logger, c pool.Pool, metrics database.Database) *BulkSet { func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet {
bs := &BulkSet{ bs := &BulkSet{
log: log, log: log,
c: c, c: c,
metrics: metrics,
ctx: context.Background(), ctx: context.Background(),
workerTask: make(chan *bulksTask, 1), workerTask: make(chan *bulksTask, 1),
done: make(chan struct{}), done: make(chan struct{}),
@ -103,7 +100,7 @@ func (conn *BulkSet) Get(name string) Bulk {
func (conn *BulkSet) initBulks() { func (conn *BulkSet) initBulks() {
var err error var err error
conn.autocompletes, err = NewBulk(conn.c, conn.metrics, conn.autocompletes, err = NewBulk(conn.c,
"autocomplete", "autocomplete",
"(value, type, project_id)", "(value, type, project_id)",
"($%d, $%d, $%d)", "($%d, $%d, $%d)",
@ -111,7 +108,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
} }
conn.requests, err = NewBulk(conn.c, conn.metrics, conn.requests, err = NewBulk(conn.c,
"events_common.requests", "events_common.requests",
"(session_id, timestamp, seq_index, url, duration, success)", "(session_id, timestamp, seq_index, url, duration, success)",
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
@ -119,7 +116,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
} }
conn.customEvents, err = NewBulk(conn.c, conn.metrics, conn.customEvents, err = NewBulk(conn.c,
"events_common.customs", "events_common.customs",
"(session_id, timestamp, seq_index, name, payload)", "(session_id, timestamp, seq_index, name, payload)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
@ -127,7 +124,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
} }
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics, conn.webPageEvents, err = NewBulk(conn.c,
"events.pages", "events.pages",
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+ "(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+ "load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
@ -139,7 +136,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
} }
conn.webInputDurations, err = NewBulk(conn.c, conn.metrics, conn.webInputDurations, err = NewBulk(conn.c,
"events.inputs", "events.inputs",
"(session_id, message_id, timestamp, label, hesitation, duration)", "(session_id, message_id, timestamp, label, hesitation, duration)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
@ -147,7 +144,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
} }
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics, conn.webGraphQL, err = NewBulk(conn.c,
"events.graphql", "events.graphql",
"(session_id, timestamp, message_id, name, request_body, response_body)", "(session_id, timestamp, message_id, name, request_body, response_body)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
@ -155,7 +152,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
} }
conn.webErrors, err = NewBulk(conn.c, conn.metrics, conn.webErrors, err = NewBulk(conn.c,
"errors", "errors",
"(error_id, project_id, source, name, message, payload)", "(error_id, project_id, source, name, message, payload)",
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)", "($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
@ -163,7 +160,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
} }
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics, conn.webErrorEvents, err = NewBulk(conn.c,
"events.errors", "events.errors",
"(session_id, message_id, timestamp, error_id)", "(session_id, message_id, timestamp, error_id)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",
@ -171,7 +168,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
} }
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics, conn.webErrorTags, err = NewBulk(conn.c,
"public.errors_tags", "public.errors_tags",
"(session_id, message_id, error_id, key, value)", "(session_id, message_id, error_id, key, value)",
"($%d, $%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d, $%d)",
@ -179,7 +176,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
} }
conn.webIssues, err = NewBulk(conn.c, conn.metrics, conn.webIssues, err = NewBulk(conn.c,
"issues", "issues",
"(project_id, issue_id, type, context_string)", "(project_id, issue_id, type, context_string)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",
@ -187,7 +184,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
} }
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics, conn.webIssueEvents, err = NewBulk(conn.c,
"events_common.issues", "events_common.issues",
"(session_id, issue_id, timestamp, seq_index, payload)", "(session_id, issue_id, timestamp, seq_index, payload)",
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))", "($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
@ -195,7 +192,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
} }
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics, conn.webCustomEvents, err = NewBulk(conn.c,
"events_common.customs", "events_common.customs",
"(session_id, seq_index, timestamp, name, payload, level)", "(session_id, seq_index, timestamp, name, payload, level)",
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
@ -203,7 +200,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
} }
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics, conn.webClickEvents, err = NewBulk(conn.c,
"events.clicks", "events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)", "(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
@ -211,7 +208,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
} }
conn.webClickXYEvents, err = NewBulk(conn.c, conn.metrics, conn.webClickXYEvents, err = NewBulk(conn.c,
"events.clicks", "events.clicks",
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)", "(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)", "($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
@ -219,7 +216,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
} }
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics, conn.webNetworkRequest, err = NewBulk(conn.c,
"events_common.requests", "events_common.requests",
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)", "(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)", "($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
@ -227,7 +224,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
} }
conn.webCanvasNodes, err = NewBulk(conn.c, conn.metrics, conn.webCanvasNodes, err = NewBulk(conn.c,
"events.canvas_recordings", "events.canvas_recordings",
"(session_id, recording_id, timestamp)", "(session_id, recording_id, timestamp)",
"($%d, $%d, $%d)", "($%d, $%d, $%d)",
@ -235,7 +232,7 @@ func (conn *BulkSet) initBulks() {
if err != nil { if err != nil {
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err) conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
} }
conn.webTagTriggers, err = NewBulk(conn.c, conn.metrics, conn.webTagTriggers, err = NewBulk(conn.c,
"events.tags", "events.tags",
"(session_id, timestamp, seq_index, tag_id)", "(session_id, timestamp, seq_index, tag_id)",
"($%d, $%d, $%d, $%d)", "($%d, $%d, $%d, $%d)",

View file

@ -2,7 +2,6 @@ package postgres
import ( import (
"context" "context"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/db/postgres/batch" "openreplay/backend/pkg/db/postgres/batch"
"openreplay/backend/pkg/db/postgres/pool" "openreplay/backend/pkg/db/postgres/pool"
@ -23,7 +22,7 @@ type Conn struct {
chConn CH chConn CH
} }
func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database) *Conn { func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
if pool == nil { if pool == nil {
log.Fatal(context.Background(), "pg pool is empty") log.Fatal(context.Background(), "pg pool is empty")
} }
@ -31,8 +30,8 @@ func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database
log: log, log: log,
Pool: pool, Pool: pool,
chConn: ch, chConn: ch,
bulks: NewBulkSet(log, pool, metrics), bulks: NewBulkSet(log, pool),
batches: batch.NewBatchSet(log, pool, metrics), batches: batch.NewBatchSet(log, pool),
} }
} }

View file

@ -181,6 +181,11 @@ func (conn *Conn) InsertWebErrorEvent(sess *sessions.Session, e *types.ErrorEven
if err := conn.bulks.Get("webErrorEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil { if err := conn.bulks.Get("webErrorEvents").Append(sess.SessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID); err != nil {
conn.log.Error(sessCtx, "insert web error event err: %s", err) conn.log.Error(sessCtx, "insert web error event err: %s", err)
} }
for key, value := range e.Tags {
if err := conn.bulks.Get("webErrorTags").Append(sess.SessionID, truncSqIdx(e.MessageID), errorID, key, value); err != nil {
conn.log.Error(sessCtx, "insert web error token err: %s", err)
}
}
return nil return nil
} }

View file

@ -23,12 +23,58 @@ type Pool interface {
} }
type poolImpl struct { type poolImpl struct {
url string url string
conn *pgxpool.Pool conn *pgxpool.Pool
metrics database.Database
} }
func New(metrics database.Database, url string) (Pool, error) { func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
start := time.Now()
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return res, err
}
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now()
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return res
}
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
start := time.Now()
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
method, table := methodName(sql)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
database.IncreaseTotalRequests(method, table)
return err
}
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
start := time.Now()
res := p.conn.SendBatch(getTimeoutContext(), b)
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
database.IncreaseTotalRequests("sendBatch", "")
return res
}
func (p *poolImpl) Begin() (*Tx, error) {
start := time.Now()
tx, err := p.conn.Begin(context.Background())
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
database.IncreaseTotalRequests("begin", "")
return &Tx{tx}, err
}
func (p *poolImpl) Close() {
p.conn.Close()
}
func New(url string) (Pool, error) {
if url == "" { if url == "" {
return nil, errors.New("pg connection url is empty") return nil, errors.New("pg connection url is empty")
} }
@ -37,73 +83,24 @@ func New(metrics database.Database, url string) (Pool, error) {
return nil, fmt.Errorf("pgxpool.Connect error: %v", err) return nil, fmt.Errorf("pgxpool.Connect error: %v", err)
} }
res := &poolImpl{ res := &poolImpl{
url: url, url: url,
conn: conn, conn: conn,
metrics: metrics,
} }
return res, nil return res, nil
} }
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
start := time.Now()
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return res, err
}
func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now()
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return res
}
func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
start := time.Now()
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
method, table := methodName(sql)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
p.metrics.IncreaseTotalRequests(method, table)
return err
}
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
start := time.Now()
res := p.conn.SendBatch(getTimeoutContext(), b)
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
p.metrics.IncreaseTotalRequests("sendBatch", "")
return res
}
func (p *poolImpl) Begin() (*Tx, error) {
start := time.Now()
tx, err := p.conn.Begin(context.Background())
p.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
p.metrics.IncreaseTotalRequests("begin", "")
return &Tx{tx, p.metrics}, err
}
func (p *poolImpl) Close() {
p.conn.Close()
}
// TX - start // TX - start
type Tx struct { type Tx struct {
pgx.Tx pgx.Tx
metrics database.Database
} }
func (tx *Tx) TxExec(sql string, args ...interface{}) error { func (tx *Tx) TxExec(sql string, args ...interface{}) error {
start := time.Now() start := time.Now()
_, err := tx.Exec(context.Background(), sql, args...) _, err := tx.Exec(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
tx.metrics.IncreaseTotalRequests(method, table) database.IncreaseTotalRequests(method, table)
return err return err
} }
@ -111,24 +108,24 @@ func (tx *Tx) TxQueryRow(sql string, args ...interface{}) pgx.Row {
start := time.Now() start := time.Now()
res := tx.QueryRow(context.Background(), sql, args...) res := tx.QueryRow(context.Background(), sql, args...)
method, table := methodName(sql) method, table := methodName(sql)
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table) database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
tx.metrics.IncreaseTotalRequests(method, table) database.IncreaseTotalRequests(method, table)
return res return res
} }
func (tx *Tx) TxRollback() error { func (tx *Tx) TxRollback() error {
start := time.Now() start := time.Now()
err := tx.Rollback(context.Background()) err := tx.Rollback(context.Background())
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "") database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
tx.metrics.IncreaseTotalRequests("rollback", "") database.IncreaseTotalRequests("rollback", "")
return err return err
} }
func (tx *Tx) TxCommit() error { func (tx *Tx) TxCommit() error {
start := time.Now() start := time.Now()
err := tx.Commit(context.Background()) err := tx.Commit(context.Background())
tx.metrics.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "") database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
tx.metrics.IncreaseTotalRequests("commit", "") database.IncreaseTotalRequests("commit", "")
return err return err
} }

View file

@ -61,6 +61,7 @@ func parseTags(tagsJSON string) (tags map[string]*string, err error) {
} }
func WrapJSException(m *JSException) (*ErrorEvent, error) { func WrapJSException(m *JSException) (*ErrorEvent, error) {
meta, err := parseTags(m.Metadata)
return &ErrorEvent{ return &ErrorEvent{
MessageID: m.Meta().Index, MessageID: m.Meta().Index,
Timestamp: m.Meta().Timestamp, Timestamp: m.Meta().Timestamp,
@ -68,8 +69,9 @@ func WrapJSException(m *JSException) (*ErrorEvent, error) {
Name: m.Name, Name: m.Name,
Message: m.Message, Message: m.Message,
Payload: m.Payload, Payload: m.Payload,
Tags: meta,
OriginType: m.TypeID(), OriginType: m.TypeID(),
}, nil }, err
} }
func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent { func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {

View file

@ -77,6 +77,8 @@ func (d *DeadClickDetector) Handle(message Message, timestamp uint64) Message {
*MoveNode, *MoveNode,
*RemoveNode, *RemoveNode,
*SetCSSData, *SetCSSData,
*CSSInsertRule,
*CSSDeleteRule,
*SetInputValue, *SetInputValue,
*SetInputChecked: *SetInputChecked:
return d.Build() return d.Build()

View file

@ -2,7 +2,6 @@ package integrations
import ( import (
"openreplay/backend/pkg/integrations/service" "openreplay/backend/pkg/integrations/service"
"openreplay/backend/pkg/metrics/database"
"openreplay/backend/pkg/metrics/web" "openreplay/backend/pkg/metrics/web"
"openreplay/backend/pkg/server/tracer" "openreplay/backend/pkg/server/tracer"
"time" "time"
@ -24,7 +23,7 @@ type ServiceBuilder struct {
IntegrationsAPI api.Handlers IntegrationsAPI api.Handlers
} }
func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServiceBuilder, error) { func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics web.Web, pgconn pool.Pool) (*ServiceBuilder, error) {
objStore, err := store.NewStore(&cfg.ObjectsConfig) objStore, err := store.NewStore(&cfg.ObjectsConfig)
if err != nil { if err != nil {
return nil, err return nil, err
@ -38,12 +37,12 @@ func NewServiceBuilder(log logger.Logger, cfg *integrations.Config, webMetrics w
if err != nil { if err != nil {
return nil, err return nil, err
} }
auditrail, err := tracer.NewTracer(log, pgconn, dbMetrics) auditrail, err := tracer.NewTracer(log, pgconn)
if err != nil { if err != nil {
return nil, err return nil, err
} }
builder := &ServiceBuilder{ builder := &ServiceBuilder{
Auth: auth.NewAuth(log, cfg.JWTSecret, "", pgconn, nil, api.NoPrefix), Auth: auth.NewAuth(log, cfg.JWTSecret, "", pgconn, nil),
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute), RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
AuditTrail: auditrail, AuditTrail: auditrail,
IntegrationsAPI: handlers, IntegrationsAPI: handlers,

View file

@ -2,7 +2,7 @@
package messages package messages
func IsReplayerType(id int) bool { func IsReplayerType(id int) bool {
return 1 != id && 17 != id && 23 != id && 24 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 33 != id && 42 != id && 56 != id && 63 != id && 64 != id && 66 != id && 78 != id && 81 != id && 82 != id && 112 != id && 115 != id && 124 != id && 125 != id && 126 != id && 127 != id && 90 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 107 != id && 110 != id return 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 33 != id && 42 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 80 != id && 81 != id && 82 != id && 112 != id && 115 != id && 124 != id && 125 != id && 126 != id && 127 != id && 90 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 107 != id && 110 != id
} }
func IsMobileType(id int) bool { func IsMobileType(id int) bool {
@ -10,5 +10,5 @@ func IsMobileType(id int) bool {
} }
func IsDOMType(id int) bool { func IsDOMType(id int) bool {
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 34 == id || 35 == id || 49 == id || 50 == id || 51 == id || 43 == id || 52 == id || 54 == id || 55 == id || 57 == id || 58 == id || 60 == id || 61 == id || 68 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 113 == id || 114 == id || 117 == id || 118 == id || 119 == id || 122 == id || 93 == id || 96 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 106 == id || 111 == id return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 34 == id || 35 == id || 37 == id || 38 == id || 49 == id || 50 == id || 51 == id || 43 == id || 52 == id || 54 == id || 55 == id || 57 == id || 58 == id || 59 == id || 60 == id || 61 == id || 67 == id || 68 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 113 == id || 114 == id || 117 == id || 118 == id || 119 == id || 122 == id || 93 == id || 96 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 106 == id || 111 == id
} }

View file

@ -8,13 +8,11 @@ import (
type sinkIteratorImpl struct { type sinkIteratorImpl struct {
coreIterator MessageIterator coreIterator MessageIterator
handler MessageHandler handler MessageHandler
metrics sink.Sink
} }
func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool, metrics sink.Sink) MessageIterator { func NewSinkMessageIterator(log logger.Logger, messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
iter := &sinkIteratorImpl{ iter := &sinkIteratorImpl{
handler: messageHandler, handler: messageHandler,
metrics: metrics,
} }
iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode) iter.coreIterator = NewMessageIterator(log, iter.handle, messageFilter, autoDecode)
return iter return iter
@ -25,8 +23,8 @@ func (i *sinkIteratorImpl) handle(message Message) {
} }
func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) { func (i *sinkIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
i.metrics.RecordBatchSize(float64(len(batchData))) sink.RecordBatchSize(float64(len(batchData)))
i.metrics.IncreaseTotalBatches() sink.IncreaseTotalBatches()
// Call core iterator // Call core iterator
i.coreIterator.Iterate(batchData, batchInfo) i.coreIterator.Iterate(batchData, batchInfo)
// Send batch end signal // Send batch end signal

View file

@ -44,8 +44,9 @@ func NewMessageIterator(log logger.Logger, messageHandler MessageHandler, messag
iter.filter = filter iter.filter = filter
} }
iter.preFilter = map[int]struct{}{ iter.preFilter = map[int]struct{}{
MsgBatchMetadata: {}, MsgTimestamp: {}, MsgSessionStart: {}, MsgBatchMetadata: {}, MsgBatchMeta: {}, MsgTimestamp: {},
MsgSessionEnd: {}, MsgSetPageLocation: {}, MsgMobileBatchMeta: {}, MsgSessionStart: {}, MsgSessionEnd: {}, MsgSetPageLocation: {},
MsgMobileBatchMeta: {},
} }
return iter return iter
} }
@ -151,6 +152,20 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
i.version = m.Version i.version = m.Version
i.batchInfo.version = m.Version i.batchInfo.version = m.Version
case *BatchMeta: // Is not required to be present in batch since Mobile doesn't have it (though we might change it)
if i.messageInfo.Index > 1 { // Might be several 0-0 BatchMeta in a row without an error though
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
}
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
i.messageInfo.Timestamp = uint64(m.Timestamp)
if m.Timestamp == 0 {
i.zeroTsLog("BatchMeta")
}
// Try to get saved session's page url
if savedURL := i.urls.Get(i.messageInfo.batch.sessionID); savedURL != "" {
i.messageInfo.Url = savedURL
}
case *Timestamp: case *Timestamp:
i.messageInfo.Timestamp = m.Timestamp i.messageInfo.Timestamp = m.Timestamp
if m.Timestamp == 0 { if m.Timestamp == 0 {

View file

@ -2,6 +2,34 @@ package messages
func transformDeprecated(msg Message) Message { func transformDeprecated(msg Message) Message {
switch m := msg.(type) { switch m := msg.(type) {
case *JSExceptionDeprecated:
return &JSException{
Name: m.Name,
Message: m.Message,
Payload: m.Payload,
Metadata: "{}",
}
case *Fetch:
return &NetworkRequest{
Type: "fetch",
Method: m.Method,
URL: m.URL,
Request: m.Request,
Response: m.Response,
Status: m.Status,
Timestamp: m.Timestamp,
Duration: m.Duration,
}
case *IssueEventDeprecated:
return &IssueEvent{
MessageID: m.MessageID,
Timestamp: m.Timestamp,
Type: m.Type,
ContextString: m.ContextString,
Context: m.Context,
Payload: m.Payload,
URL: "",
}
case *ResourceTimingDeprecated: case *ResourceTimingDeprecated:
return &ResourceTiming{ return &ResourceTiming{
Timestamp: m.Timestamp, Timestamp: m.Timestamp,

Some files were not shown because too many files have changed in this diff Show more