Merge remote-tracking branch 'origin/api-v1.9.5' into api-ms-teams
# Conflicts: # ee/api/chalicelib/core/projects.py # ee/api/chalicelib/core/webhook.py # ee/scripts/schema/db/init_dbs/postgresql/init_schema.sql # scripts/schema/db/init_dbs/postgresql/init_schema.sql
This commit is contained in:
commit
9e4c846fe8
1377 changed files with 22741 additions and 127511 deletions
37
.github/workflows/api-ee.yaml
vendored
37
.github/workflows/api-ee.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -43,11 +48,26 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice" "alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
@ -91,9 +111,20 @@ jobs:
|
|||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
37
.github/workflows/api.yaml
vendored
37
.github/workflows/api.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -42,11 +47,26 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice" "alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
@ -90,9 +110,20 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
6
.github/workflows/frontend.yaml
vendored
6
.github/workflows/frontend.yaml
vendored
|
|
@ -41,7 +41,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
|
|
@ -84,7 +84,7 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
|
||||
|
|
@ -130,7 +130,7 @@ jobs:
|
|||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
|
|
|
|||
103
.github/workflows/sourcemaps-reader.yaml
vendored
Normal file
103
.github/workflows/sourcemaps-reader.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- sourcemap-reader/**
|
||||
|
||||
name: Build and Deploy Chalice
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pusing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
4
.github/workflows/utilities.yaml
vendored
4
.github/workflows/utilities.yaml
vendored
|
|
@ -36,7 +36,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd utilities
|
||||
|
|
@ -53,7 +53,7 @@ jobs:
|
|||
bash kube-install.sh --app utilities
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
|
|
|
|||
34
.github/workflows/workers-ee.yaml
vendored
34
.github/workflows/workers-ee.yaml
vendored
|
|
@ -7,6 +7,10 @@ on:
|
|||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -49,7 +53,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
|
|
@ -61,6 +65,7 @@ jobs:
|
|||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
|
|
@ -89,14 +94,25 @@ jobs:
|
|||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee $image
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh skip $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
|
|
@ -140,6 +156,18 @@ jobs:
|
|||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
32
.github/workflows/workers.yaml
vendored
32
.github/workflows/workers.yaml
vendored
|
|
@ -7,6 +7,10 @@ on:
|
|||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -49,7 +53,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
|
|
@ -59,8 +63,9 @@ jobs:
|
|||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
|
|
@ -89,13 +94,24 @@ jobs:
|
|||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh skip $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
|
|
@ -137,6 +153,16 @@ jobs:
|
|||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
|
|
@ -8,13 +8,13 @@ ARG envarg
|
|||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
|
||||
WORKDIR /work_tmp
|
||||
COPY requirements.txt /work_tmp/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r /work_tmp/requirements.txt
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env
|
||||
|
||||
|
|
|
|||
|
|
@ -4,4 +4,8 @@
|
|||
**/build.sh
|
||||
**/build_*.sh
|
||||
**/*deploy.sh
|
||||
Dockerfile*
|
||||
Dockerfile*
|
||||
|
||||
app_alerts.py
|
||||
requirements-alerts.txt
|
||||
entrypoint_alerts.sh
|
||||
|
|
@ -7,12 +7,13 @@ ENV APP_NAME=alerts \
|
|||
PG_MINCONN=1 \
|
||||
PG_MAXCONN=10 \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=true \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
|
||||
COPY requirements.txt /work_tmp/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r /work_tmp/requirements.txt
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements-alerts.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
|
||||
|
||||
|
|
@ -7,5 +7,5 @@
|
|||
Dockerfile*
|
||||
|
||||
app.py
|
||||
entrypoint_alerts.sh
|
||||
entrypoint.sh
|
||||
requirements.txt
|
||||
28
api/build.sh
28
api/build.sh
|
|
@ -7,6 +7,15 @@
|
|||
|
||||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
|
||||
# Helper function
|
||||
exit_err() {
|
||||
err_code=$1
|
||||
if [[ err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
}
|
||||
|
||||
environment=$1
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
envarg="default-foss"
|
||||
check_prereq() {
|
||||
|
|
@ -18,8 +27,12 @@ check_prereq() {
|
|||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../api ../_api
|
||||
cd ../_api
|
||||
destination="_api"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_api_ee"
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -27,9 +40,10 @@ function build_api(){
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/chalice:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../_api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
|
|
@ -39,11 +53,13 @@ function build_api(){
|
|||
}
|
||||
|
||||
check_prereq
|
||||
build_api $1
|
||||
build_api $environment
|
||||
echo buil_complete
|
||||
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1
|
||||
|
||||
[[ $1 == "ee" ]] && {
|
||||
[[ $environment == "ee" ]] && {
|
||||
cp ../ee/api/build_crons.sh .
|
||||
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_crons.sh $1
|
||||
}
|
||||
exit_err $?
|
||||
rm build_crons.sh
|
||||
} || true
|
||||
|
|
|
|||
|
|
@ -16,9 +16,13 @@ check_prereq() {
|
|||
}
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../api ../_alerts
|
||||
cd ../_alerts
|
||||
function build_alerts(){
|
||||
destination="_alerts"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_alerts_ee"
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -26,9 +30,10 @@ function build_api(){
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
}
|
||||
docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
|
||||
mv Dockerfile_alerts.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../_alerts
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
|
||||
|
|
@ -38,4 +43,4 @@ function build_api(){
|
|||
}
|
||||
|
||||
check_prereq
|
||||
build_api $1
|
||||
build_alerts $1
|
||||
|
|
|
|||
|
|
@ -103,6 +103,7 @@ def Build(a):
|
|||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.desc
|
||||
|
|
@ -120,16 +121,16 @@ def Build(a):
|
|||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.change:
|
||||
|
|
@ -142,13 +143,11 @@ def Build(a):
|
|||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
|
||||
AND datetime<=toDateTime(%(now)s/1000)
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
|
||||
AND timestamp<=%(now)s
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND timestamp<%(startDate)s
|
||||
AND timestamp>=%(timestamp_sub2)s
|
||||
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
|
@ -164,13 +163,11 @@ def Build(a):
|
|||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND timestamp<%(startDate)s
|
||||
AND timestamp>=%(timestamp_sub2)s
|
||||
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
|
|
@ -185,21 +182,27 @@ def process():
|
|||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if can_check(alert):
|
||||
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
|
||||
query, params = Build(alert)
|
||||
query = cur.mogrify(query, params)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logging.info("Valid alert, notifying users")
|
||||
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logging.error(str(e))
|
||||
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(query)
|
||||
logging.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.Alerts
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from os import access, R_OK
|
||||
from os.path import exists as path_exists
|
||||
from os.path import exists as path_exists, getsize
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
|
|
@ -207,9 +207,11 @@ def get_raw_mob_by_id(project_id, session_id):
|
|||
path_to_file = efs_path + "/" + __get_mob_path(project_id=project_id, session_id=session_id)
|
||||
if path_exists(path_to_file):
|
||||
if not access(path_to_file, R_OK):
|
||||
raise HTTPException(400, f"Replay file found under: {efs_path};"
|
||||
f" but it is not readable, please check permissions")
|
||||
|
||||
raise HTTPException(400, f"Replay file found under: {efs_path};" +
|
||||
f" but it is not readable, please check permissions")
|
||||
# getsize return size in bytes, UNPROCESSED_MAX_SIZE is in Kb
|
||||
if (getsize(path_to_file) / 1000) >= config("UNPROCESSED_MAX_SIZE", cast=int, default=200 * 1000):
|
||||
raise HTTPException(413, "Replay file too large")
|
||||
return path_to_file
|
||||
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -69,7 +69,8 @@ class Slack(BaseCollaboration):
|
|||
url=integration["endpoint"],
|
||||
json={"attachments": attachments[i:i + 100]})
|
||||
if r.status_code != 200:
|
||||
print("!!!! something went wrong")
|
||||
print("!!!! something went wrong while sending to:")
|
||||
print(integration)
|
||||
print(r)
|
||||
print(r.text)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# from elasticsearch import Elasticsearch, RequestsHttpConnection
|
||||
from elasticsearch import Elasticsearch
|
||||
from chalicelib.core import log_tools
|
||||
import base64
|
||||
import logging
|
||||
|
||||
logging.getLogger('elasticsearch').level = logging.ERROR
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import re
|
||||
|
||||
from chalicelib.core import projects
|
||||
from chalicelib.utils import pg_client, dev
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
MAX_INDEXES = 10
|
||||
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import json
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import users
|
||||
from chalicelib.utils import pg_client, helper, dev
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
if gdpr:
|
||||
extra_projection += ',s.gdpr'
|
||||
if recorded:
|
||||
extra_projection += """, COALESCE(nullif(EXTRACT(EPOCH FROM s.first_recorded_session_at) * 1000, NULL)::BIGINT,
|
||||
extra_projection += """,COALESCE(nullif(EXTRACT(EPOCH FROM s.first_recorded_session_at) * 1000, NULL)::BIGINT,
|
||||
(SELECT MIN(sessions.start_ts)
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = s.project_id
|
||||
|
|
|
|||
|
|
@ -11,11 +11,7 @@ def reset(data: schemas.ForgetPasswordPayloadSchema):
|
|||
if not helper.has_smtp():
|
||||
return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]}
|
||||
a_users = users.get_by_email_only(data.email)
|
||||
if len(a_users) > 1:
|
||||
print(f"multiple users found for [{data.email}] please contact our support")
|
||||
return {"errors": ["multiple users, please contact our support"]}
|
||||
elif len(a_users) == 1:
|
||||
a_users = a_users[0]
|
||||
if a_users:
|
||||
invitation_link = users.generate_new_invitation(user_id=a_users["id"])
|
||||
email_helper.send_forgot_password(recipient=data.email, invitation_link=invitation_link)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -89,9 +89,9 @@ def get_by_id2_pg(project_id, session_id, context: schemas.CurrentContext, full_
|
|||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors if
|
||||
# limit the number of errors to reduce the response-body size
|
||||
e['source'] == "js_exception"][:500]
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_sessionId2_pg(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
|
|
@ -332,7 +332,15 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
# print("--------------------")
|
||||
# print(main_query)
|
||||
# print("--------------------")
|
||||
cur.execute(main_query)
|
||||
try:
|
||||
cur.execute(main_query)
|
||||
except Exception as err:
|
||||
print("--------- SESSIONS-SERIES QUERY EXCEPTION -----------")
|
||||
print(main_query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data.json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
if view_type == schemas.MetricTimeseriesViewType.line_chart:
|
||||
sessions = cur.fetchall()
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ def __get_saved_data(project_id, session_id, issue_id, tool):
|
|||
return helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
|
||||
def create_new_assignment(tenant_id, project_id, session_id, creator_id, assignee, description, title, issue_type, integration_project_id):
|
||||
def create_new_assignment(tenant_id, project_id, session_id, creator_id, assignee, description, title, issue_type,
|
||||
integration_project_id):
|
||||
error, integration = integrations_manager.get_integration(tenant_id=tenant_id, user_id=creator_id)
|
||||
if error is not None:
|
||||
return error
|
||||
|
|
@ -40,7 +41,7 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne
|
|||
integration_project_id=integration_project_id)
|
||||
except integration_base_issue.RequestException as e:
|
||||
return integration_base_issue.proxy_issues_handler(e)
|
||||
if issue is not None and "id" not in issue:
|
||||
if issue is None or "id" not in issue:
|
||||
return {"errors": ["something went wrong while creating the issue"]}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
from decouple import config
|
||||
|
||||
from chalicelib.utils import s3
|
||||
from chalicelib.utils.s3 import client
|
||||
|
||||
|
||||
def __get_mob_keys(project_id, session_id):
|
||||
|
|
@ -18,7 +17,7 @@ def __get_mob_keys(project_id, session_id):
|
|||
def get_urls(project_id, session_id):
|
||||
results = []
|
||||
for k in __get_mob_keys(project_id=project_id, session_id=session_id):
|
||||
results.append(client.generate_presigned_url(
|
||||
results.append(s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={'Bucket': config("sessions_bucket"), 'Key': k},
|
||||
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
|
||||
|
|
@ -28,7 +27,7 @@ def get_urls(project_id, session_id):
|
|||
|
||||
def get_urls_depercated(session_id):
|
||||
return [
|
||||
client.generate_presigned_url(
|
||||
s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("sessions_bucket"),
|
||||
|
|
@ -36,7 +35,7 @@ def get_urls_depercated(session_id):
|
|||
},
|
||||
ExpiresIn=100000
|
||||
),
|
||||
client.generate_presigned_url(
|
||||
s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("sessions_bucket"),
|
||||
|
|
@ -47,7 +46,7 @@ def get_urls_depercated(session_id):
|
|||
|
||||
|
||||
def get_ios(session_id):
|
||||
return client.generate_presigned_url(
|
||||
return s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("ios_bucket"),
|
||||
|
|
|
|||
|
|
@ -140,7 +140,7 @@ def share_to_slack(tenant_id, user_id, project_id, note_id, webhook_id):
|
|||
note = get_note(tenant_id=tenant_id, project_id=project_id, user_id=user_id, note_id=note_id, share=user_id)
|
||||
if note is None:
|
||||
return {"errors": ["Note not found"]}
|
||||
session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/sessions/{note['sessionId']}")
|
||||
session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}")
|
||||
title = f"<{session_url}|Note for session {note['sessionId']}>"
|
||||
|
||||
blocks = [{"type": "section",
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ __maintainer__ = "KRAIEM Taha Yassine"
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, sessions
|
||||
from chalicelib.utils import dev
|
||||
|
||||
"""
|
||||
todo: remove LIMIT from the query
|
||||
|
|
@ -200,14 +199,12 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
n_stages_query += ") AS stages_t"
|
||||
|
||||
n_stages_query = f"""
|
||||
SELECT stages_and_issues_t.*,sessions.session_id, sessions.user_uuid FROM (
|
||||
SELECT stages_and_issues_t.*, sessions.user_uuid FROM (
|
||||
SELECT * FROM (
|
||||
SELECT * FROM
|
||||
{n_stages_query}
|
||||
LEFT JOIN LATERAL
|
||||
(
|
||||
SELECT * FROM
|
||||
(SELECT ISE.session_id,
|
||||
( SELECT ISE.session_id,
|
||||
ISS.type as issue_type,
|
||||
ISE.timestamp AS issue_timestamp,
|
||||
ISS.context_string as issue_context,
|
||||
|
|
@ -216,21 +213,29 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
WHERE ISE.timestamp >= stages_t.stage1_timestamp
|
||||
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
|
||||
AND ISS.project_id=%(project_id)s
|
||||
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}) AS base_t
|
||||
) AS issues_t
|
||||
USING (session_id)) AS stages_and_issues_t
|
||||
inner join sessions USING(session_id);
|
||||
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
|
||||
) AS issues_t USING (session_id)
|
||||
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
|
||||
"""
|
||||
|
||||
# LIMIT 10000
|
||||
params = {"project_id": project_id, "startTimestamp": filter_d["startDate"], "endTimestamp": filter_d["endDate"],
|
||||
"issueTypes": tuple(filter_issues), **values}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(n_stages_query, params)
|
||||
# print("---------------------------------------------------")
|
||||
# print(cur.mogrify(n_stages_query, params))
|
||||
# print(query)
|
||||
# print("---------------------------------------------------")
|
||||
cur.execute(cur.mogrify(n_stages_query, params))
|
||||
rows = cur.fetchall()
|
||||
try:
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
except Exception as err:
|
||||
print("--------- FUNNEL SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(filter_d)
|
||||
print("--------------------")
|
||||
raise err
|
||||
return rows
|
||||
|
||||
|
||||
|
|
@ -559,8 +564,8 @@ def get_top_insights(filter_d, project_id):
|
|||
"dropDueToIssues": 0
|
||||
|
||||
}]
|
||||
counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id,
|
||||
user_id=None, count_only=True)
|
||||
counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d),
|
||||
project_id=project_id, user_id=None, count_only=True)
|
||||
output[0]["sessionsCount"] = counts["countSessions"]
|
||||
output[0]["usersCount"] = counts["countUsers"]
|
||||
return output, 0
|
||||
|
|
|
|||
|
|
@ -60,8 +60,8 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
VALUES (%(email)s, 'owner', %(fullname)s,%(data)s)
|
||||
RETURNING user_id,email,role,name
|
||||
),
|
||||
au AS (INSERT
|
||||
INTO public.basic_authentication (user_id, password)
|
||||
au AS (
|
||||
INSERT INTO public.basic_authentication (user_id, password)
|
||||
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)))
|
||||
)
|
||||
INSERT INTO public.projects (name, active)
|
||||
|
|
@ -70,9 +70,9 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
cur = cur.fetchone()
|
||||
project_id = cur["project_id"]
|
||||
api_key = cur["api_key"]
|
||||
data = cur.fetchone()
|
||||
project_id = data["project_id"]
|
||||
api_key = data["api_key"]
|
||||
telemetry.new_client()
|
||||
created_at = TimeUTC.now()
|
||||
r = users.authenticate(email, password)
|
||||
|
|
|
|||
|
|
@ -1,4 +1,3 @@
|
|||
import hashlib
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
|
|
@ -8,17 +7,11 @@ from chalicelib.core import sourcemaps_parser
|
|||
from chalicelib.utils import s3
|
||||
|
||||
|
||||
def __get_key(project_id, url):
|
||||
u = urlparse(url)
|
||||
new_url = u.scheme + "://" + u.netloc + u.path
|
||||
return f"{project_id}/{hashlib.md5(new_url.encode()).hexdigest()}"
|
||||
|
||||
|
||||
def presign_share_urls(project_id, urls):
|
||||
results = []
|
||||
for u in urls:
|
||||
results.append(s3.get_presigned_url_for_sharing(bucket=config('sourcemaps_bucket'), expires_in=120,
|
||||
key=__get_key(project_id, u),
|
||||
key=s3.generate_file_key_from_url(project_id, u),
|
||||
check_exists=True))
|
||||
return results
|
||||
|
||||
|
|
@ -28,7 +21,7 @@ def presign_upload_urls(project_id, urls):
|
|||
for u in urls:
|
||||
results.append(s3.get_presigned_url_for_upload(bucket=config('sourcemaps_bucket'),
|
||||
expires_in=1800,
|
||||
key=__get_key(project_id, u)))
|
||||
key=s3.generate_file_key_from_url(project_id, u)))
|
||||
return results
|
||||
|
||||
|
||||
|
|
@ -54,7 +47,8 @@ def __frame_is_valid(f):
|
|||
|
||||
def __format_frame(f):
|
||||
f["context"] = [] # no context by default
|
||||
if "source" in f: f.pop("source")
|
||||
if "source" in f:
|
||||
f.pop("source")
|
||||
url = f.pop("fileName")
|
||||
f["absPath"] = url
|
||||
f["filename"] = urlparse(url).path
|
||||
|
|
@ -74,8 +68,13 @@ def format_payload(p, truncate_to_first=False):
|
|||
|
||||
|
||||
def url_exists(url):
|
||||
r = requests.head(url, allow_redirects=False)
|
||||
return r.status_code == 200 and r.headers.get("Content-Type") != "text/html"
|
||||
try:
|
||||
r = requests.head(url, allow_redirects=False)
|
||||
return r.status_code == 200 and r.headers.get("Content-Type") != "text/html"
|
||||
except Exception as e:
|
||||
print(f"!! Issue checking if URL exists: {url}")
|
||||
print(e)
|
||||
return False
|
||||
|
||||
|
||||
def get_traces_group(project_id, payload):
|
||||
|
|
@ -88,7 +87,7 @@ def get_traces_group(project_id, payload):
|
|||
file_exists_in_bucket = False
|
||||
file_exists_in_server = False
|
||||
file_url = u["absPath"]
|
||||
key = __get_key(project_id, file_url) # use filename instead?
|
||||
key = s3.generate_file_key_from_url(project_id, file_url) # use filename instead?
|
||||
params_idx = file_url.find("?")
|
||||
if file_url and len(file_url) > 0 \
|
||||
and not (file_url[:params_idx] if params_idx > -1 else file_url).endswith(".js"):
|
||||
|
|
@ -97,8 +96,8 @@ def get_traces_group(project_id, payload):
|
|||
continue
|
||||
|
||||
if key not in payloads:
|
||||
file_exists_in_bucket = s3.exists(config('sourcemaps_bucket'), key)
|
||||
if not file_exists_in_bucket:
|
||||
file_exists_in_bucket = len(file_url) > 0 and s3.exists(config('sourcemaps_bucket'), key)
|
||||
if len(file_url) > 0 and not file_exists_in_bucket:
|
||||
print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3 looking in server")
|
||||
if not file_url.endswith(".map"):
|
||||
file_url += '.map'
|
||||
|
|
@ -148,7 +147,7 @@ MAX_COLUMN_OFFSET = 60
|
|||
def fetch_missed_contexts(frames):
|
||||
source_cache = {}
|
||||
for i in range(len(frames)):
|
||||
if len(frames[i]["context"]) > 0:
|
||||
if frames[i] and frames[i].get("context") and len(frames[i]["context"]) > 0:
|
||||
continue
|
||||
file_abs_path = frames[i]["frame"]["absPath"]
|
||||
if file_abs_path in source_cache:
|
||||
|
|
@ -180,7 +179,7 @@ def fetch_missed_contexts(frames):
|
|||
|
||||
line = lines[l]
|
||||
offset = c - MAX_COLUMN_OFFSET
|
||||
if offset < 0: # if the line is shirt
|
||||
if offset < 0: # if the line is short
|
||||
offset = 0
|
||||
frames[i]["context"].append([frames[i]["lineNo"], line[offset: c + MAX_COLUMN_OFFSET + 1]])
|
||||
return frames
|
||||
|
|
|
|||
|
|
@ -43,8 +43,8 @@ def compute():
|
|||
def new_client():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
f"""SELECT *,
|
||||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
f"""SELECT *, openreplay_version() AS version_number,
|
||||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
FROM public.tenants
|
||||
LIMIT 1;""")
|
||||
data = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from fastapi import BackgroundTasks
|
|||
import schemas
|
||||
from chalicelib.core import authorizers, metadata, projects
|
||||
from chalicelib.core import tenants, assist
|
||||
from chalicelib.utils import dev, email_helper
|
||||
from chalicelib.utils import email_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -194,7 +194,6 @@ def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks):
|
|||
new_member = create_new_member(email=data["email"], invitation_token=invitation_token,
|
||||
admin=data.get("admin", False), name=name)
|
||||
new_member["invitationLink"] = __get_invitation_link(new_member.pop("invitationToken"))
|
||||
|
||||
background_tasks.add_task(email_helper.send_team_invitation, **{
|
||||
"recipient": data["email"],
|
||||
"invitation_link": new_member["invitationLink"],
|
||||
|
|
@ -339,15 +338,16 @@ def get_by_email_only(email):
|
|||
users.name,
|
||||
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
|
||||
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
|
||||
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
|
||||
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
|
||||
TRUE AS has_password
|
||||
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
|
||||
WHERE
|
||||
users.email = %(email)s
|
||||
AND users.deleted_at IS NULL;""",
|
||||
WHERE users.email = %(email)s
|
||||
AND users.deleted_at IS NULL
|
||||
LIMIT 1;""",
|
||||
{"email": email})
|
||||
)
|
||||
r = cur.fetchall()
|
||||
return helper.list_to_camel_case(r)
|
||||
r = cur.fetchone()
|
||||
return helper.dict_to_camel_case(r)
|
||||
|
||||
|
||||
def get_by_email_reset(email, reset_token):
|
||||
|
|
@ -376,8 +376,9 @@ def get_by_email_reset(email, reset_token):
|
|||
|
||||
def get_member(tenant_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(
|
||||
f"""SELECT
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
f"""SELECT
|
||||
users.user_id,
|
||||
users.email,
|
||||
users.role,
|
||||
|
|
@ -392,7 +393,8 @@ def get_member(tenant_id, user_id):
|
|||
invitation_token
|
||||
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
|
||||
WHERE users.deleted_at IS NULL AND users.user_id=%(user_id)s
|
||||
ORDER BY name, user_id""", {"user_id": user_id})
|
||||
ORDER BY name, user_id""",
|
||||
{"user_id": user_id})
|
||||
)
|
||||
u = helper.dict_to_camel_case(cur.fetchone())
|
||||
if u:
|
||||
|
|
@ -480,8 +482,8 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
|
|||
changes = {"password": new_password}
|
||||
user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
tenant_id = r.pop("tenantId")
|
||||
|
||||
tenant_id = r.pop("tenantId")
|
||||
r["limits"] = {
|
||||
"teamMember": -1,
|
||||
"projects": -1,
|
||||
|
|
@ -508,8 +510,8 @@ def set_password_invitation(user_id, new_password):
|
|||
"changePwdExpireAt": None, "changePwdToken": None}
|
||||
user = update(tenant_id=-1, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
tenant_id = r.pop("tenantId")
|
||||
|
||||
tenant_id = r.pop("tenantId")
|
||||
r["limits"] = {
|
||||
"teamMember": -1,
|
||||
"projects": -1,
|
||||
|
|
@ -608,6 +610,18 @@ def auth_exists(user_id, tenant_id, jwt_iat, jwt_aud):
|
|||
)
|
||||
|
||||
|
||||
def change_jwt_iat(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""UPDATE public.users
|
||||
SET jwt_iat = timezone('utc'::text, now())
|
||||
WHERE user_id = %(user_id)s
|
||||
RETURNING jwt_iat;""",
|
||||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
return cur.fetchone().get("jwt_iat")
|
||||
|
||||
|
||||
def authenticate(email, password, for_change_password=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
|
|
@ -629,22 +643,16 @@ def authenticate(email, password, for_change_password=False):
|
|||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
|
||||
if r is not None:
|
||||
if for_change_password:
|
||||
return True
|
||||
r = helper.dict_to_camel_case(r)
|
||||
query = cur.mogrify(
|
||||
f"""UPDATE public.users
|
||||
SET jwt_iat = timezone('utc'::text, now())
|
||||
WHERE user_id = %(user_id)s
|
||||
RETURNING jwt_iat;""",
|
||||
{"user_id": r["userId"]})
|
||||
cur.execute(query)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
|
||||
TimeUTC.datetime_to_timestamp(cur.fetchone()["jwt_iat"]),
|
||||
aud=f"front:{helper.get_stage_name()}"),
|
||||
"email": email,
|
||||
**r
|
||||
}
|
||||
if r is not None:
|
||||
if for_change_password:
|
||||
return True
|
||||
r = helper.dict_to_camel_case(r)
|
||||
jwt_iat = change_jwt_iat(r['userId'])
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
|
||||
TimeUTC.datetime_to_timestamp(jwt_iat),
|
||||
aud=f"front:{helper.get_stage_name()}"),
|
||||
"email": email,
|
||||
**r
|
||||
}
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -147,8 +147,8 @@ def __trigger(hook, data):
|
|||
|
||||
r = requests.post(url=hook["endpoint"], json=data, headers=headers)
|
||||
if r.status_code != 200:
|
||||
logging.error("=======> webhook: something went wrong")
|
||||
logging.error(r)
|
||||
logging.error("=======> webhook: something went wrong for:")
|
||||
logging.error(hook)
|
||||
logging.error(r.status_code)
|
||||
logging.error(r.text)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -136,13 +136,18 @@ class PostgresClient:
|
|||
and not self.unlimited_query:
|
||||
postgreSQL_pool.putconn(self.connection)
|
||||
|
||||
def recreate_cursor(self):
|
||||
def recreate_cursor(self, rollback=False):
|
||||
if rollback:
|
||||
try:
|
||||
self.connection.rollback()
|
||||
except Exception as error:
|
||||
logging.error("Error while rollbacking connection for recreation", error)
|
||||
try:
|
||||
self.cursor.close()
|
||||
except Exception as error:
|
||||
logging.error("Error while closing cursor for recreation", error)
|
||||
self.cursor = None
|
||||
self.__enter__()
|
||||
return self.__enter__()
|
||||
|
||||
|
||||
async def init():
|
||||
|
|
|
|||
|
|
@ -1,3 +1,6 @@
|
|||
import hashlib
|
||||
from urllib.parse import urlparse
|
||||
|
||||
from botocore.exceptions import ClientError
|
||||
from decouple import config
|
||||
from datetime import datetime, timedelta
|
||||
|
|
@ -88,3 +91,13 @@ def schedule_for_deletion(bucket, key):
|
|||
s3_object.copy_from(CopySource={'Bucket': bucket, 'Key': key},
|
||||
Expires=datetime.now() + timedelta(days=7),
|
||||
MetadataDirective='REPLACE')
|
||||
|
||||
|
||||
def generate_file_key(project_id, key):
|
||||
return f"{project_id}/{hashlib.md5(key.encode()).hexdigest()}"
|
||||
|
||||
|
||||
def generate_file_key_from_url(project_id, url):
|
||||
u = urlparse(url)
|
||||
new_url = u.scheme + "://" + u.netloc + u.path
|
||||
return generate_file_key(project_id=project_id, key=new_url)
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
BEGIN;
|
||||
CREATE INDEX pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0;
|
||||
COMMIT;
|
||||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
export ASSIST_KEY=ignore
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload
|
||||
|
|
|
|||
|
|
@ -42,11 +42,12 @@ sourcemaps_reader=http://sourcemaps-reader-openreplay.app.svc.cluster.local:9000
|
|||
STAGE=default-foss
|
||||
version_number=1.4.0
|
||||
FS_DIR=/mnt/efs
|
||||
EFS_SESSION_MOB_PATTERN=%(sessionId)s/dom.mob
|
||||
EFS_DEVTOOLS_MOB_PATTERN=%(sessionId)s/devtools.mob
|
||||
EFS_SESSION_MOB_PATTERN=%(sessionId)s
|
||||
EFS_DEVTOOLS_MOB_PATTERN=%(sessionId)sdevtools
|
||||
SESSION_MOB_PATTERN_S=%(sessionId)s/dom.mobs
|
||||
SESSION_MOB_PATTERN_E=%(sessionId)s/dom.mobe
|
||||
DEVTOOLS_MOB_PATTERN=%(sessionId)s/devtools.mobs
|
||||
PRESIGNED_URL_EXPIRATION=3600
|
||||
ASSIST_JWT_EXPIRATION=144000
|
||||
ASSIST_JWT_SECRET=
|
||||
ASSIST_JWT_SECRET=
|
||||
PYTHONUNBUFFERED=1
|
||||
|
|
@ -1,15 +1,15 @@
|
|||
requests==2.28.1
|
||||
urllib3==1.26.12
|
||||
boto3==1.24.80
|
||||
pyjwt==2.5.0
|
||||
psycopg2-binary==2.9.3
|
||||
elasticsearch==8.4.2
|
||||
boto3==1.26.4
|
||||
pyjwt==2.6.0
|
||||
psycopg2-binary==2.9.5
|
||||
elasticsearch==8.5.0
|
||||
jira==3.4.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.85.0
|
||||
uvicorn[standard]==0.18.3
|
||||
fastapi==0.86.0
|
||||
uvicorn[standard]==0.19.0
|
||||
python-decouple==3.6
|
||||
pydantic[email]==1.10.2
|
||||
apscheduler==3.9.1
|
||||
|
|
@ -1,15 +1,15 @@
|
|||
requests==2.28.1
|
||||
urllib3==1.26.12
|
||||
boto3==1.24.80
|
||||
pyjwt==2.5.0
|
||||
psycopg2-binary==2.9.3
|
||||
elasticsearch==8.4.2
|
||||
boto3==1.26.4
|
||||
pyjwt==2.6.0
|
||||
psycopg2-binary==2.9.5
|
||||
elasticsearch==8.5.0
|
||||
jira==3.4.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.85.0
|
||||
uvicorn[standard]==0.18.3
|
||||
fastapi==0.86.0
|
||||
uvicorn[standard]==0.19.0
|
||||
python-decouple==3.6
|
||||
pydantic[email]==1.10.2
|
||||
apscheduler==3.9.1
|
||||
|
|
@ -50,6 +50,7 @@ def login(data: schemas.UserLoginSchema = Body(...)):
|
|||
|
||||
|
||||
@app.post('/{projectId}/sessions/search', tags=["sessions"])
|
||||
@app.post('/{projectId}/sessions/search2', tags=["sessions"])
|
||||
def sessions_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id)
|
||||
|
|
@ -445,7 +446,7 @@ def get_all_assignments(projectId: int, context: schemas.CurrentContext = Depend
|
|||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
def create_issue_assignment(projectId: int, sessionId: int, integrationProjectId,
|
||||
data: schemas.AssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -778,7 +779,7 @@ def create_project(data: schemas.CreateProjectSchema = Body(...),
|
|||
return projects.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.post('/projects/{projectId}', tags=['projects'])
|
||||
@app.put('/projects/{projectId}', tags=['projects'])
|
||||
def edit_project(projectId: int, data: schemas.CreateProjectSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return projects.edit(tenant_id=context.tenant_id, user_id=context.user_id, data=data, project_id=projectId)
|
||||
|
|
@ -868,7 +869,7 @@ def delete_slack_integration(integrationId: int, context: schemas.CurrentContext
|
|||
return webhook.delete(context.tenant_id, integrationId)
|
||||
|
||||
|
||||
@app.post('/webhooks', tags=["webhooks"])
|
||||
@app.put('/webhooks', tags=["webhooks"])
|
||||
def add_edit_webhook(data: schemas.CreateEditWebhookSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": webhook.add_edit(tenant_id=context.tenant_id, data=data.dict(), replace_none=True)}
|
||||
|
|
|
|||
|
|
@ -132,7 +132,7 @@ def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema =
|
|||
return users.set_password_invitation(new_password=data.password, user_id=user["userId"])
|
||||
|
||||
|
||||
@app.post('/client/members/{memberId}', tags=["client"])
|
||||
@app.put('/client/members/{memberId}', tags=["client"])
|
||||
def edit_member(memberId: int, data: schemas.EditMemberSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.edit_member(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
|
||||
|
|
@ -165,7 +165,6 @@ def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
|
||||
def get_session(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
|
|
@ -183,7 +182,6 @@ def get_session(projectId: int, sessionId: Union[int, str], background_tasks: Ba
|
|||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
|
|
@ -323,7 +321,6 @@ def add_remove_favorite_session2(projectId: int, sessionId: int,
|
|||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign', tags=["sessions"])
|
||||
def assign_session(projectId: int, sessionId, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId,
|
||||
tenant_id=context.tenant_id,
|
||||
|
|
@ -336,7 +333,6 @@ def assign_session(projectId: int, sessionId, context: schemas.CurrentContext =
|
|||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
def assign_session(projectId: int, sessionId: int, issueId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId,
|
||||
|
|
@ -349,7 +345,6 @@ def assign_session(projectId: int, sessionId: int, issueId: str,
|
|||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.post('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.comment(tenant_id=context.tenant_id, project_id=projectId,
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
# Example
|
||||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
set -e
|
||||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
ee="false"
|
||||
|
|
@ -25,12 +26,17 @@ function build_service() {
|
|||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
}
|
||||
echo "Build completed for $image"
|
||||
return
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../backend ../_backend
|
||||
cd ../_backend
|
||||
destination="_backend"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_backend_ee"
|
||||
}
|
||||
cp -R ../backend ../${destination}
|
||||
cd ../${destination}
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp -r ../ee/backend/* ./
|
||||
|
|
@ -38,6 +44,8 @@ function build_api(){
|
|||
}
|
||||
[[ $2 != "" ]] && {
|
||||
build_service $2
|
||||
cd ../backend
|
||||
rm -rf ../${destination}
|
||||
return
|
||||
}
|
||||
for image in $(ls cmd);
|
||||
|
|
@ -46,7 +54,7 @@ function build_api(){
|
|||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
|
||||
done
|
||||
cd ../backend
|
||||
rm -rf ../_backend
|
||||
rm -rf ../${destination}
|
||||
echo "backend build completed"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
GROUP_CACHE=from_file
|
||||
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -31,40 +30,28 @@ func main() {
|
|||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
msgHandler := func(msg messages.Message) {
|
||||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
// TODO: connect to "raw" topic in order to listen for JSException
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
return
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
msgConsumer := queue.NewConsumer(
|
||||
cfg.GroupCache,
|
||||
[]string{cfg.TopicCache},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgAssetCache {
|
||||
m := iter.Message().Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg := m.(*messages.AssetCache)
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
} else if iter.Type() == messages.MsgErrorEvent {
|
||||
m := iter.Message().Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg := m.(*messages.ErrorEvent)
|
||||
if msg.Source != "js_exception" {
|
||||
continue
|
||||
}
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
continue
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, []int{messages.MsgAssetCache, messages.MsgJSException}, true),
|
||||
true,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -79,15 +66,18 @@ func main() {
|
|||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
cacher.Stop()
|
||||
msgConsumer.Close()
|
||||
os.Exit(0)
|
||||
case err := <-cacher.Errors:
|
||||
log.Printf("Error while caching: %v", err)
|
||||
// TODO: notify user
|
||||
case <-tick:
|
||||
cacher.UpdateTimeouts()
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
if !cacher.CanCache() {
|
||||
continue
|
||||
}
|
||||
if err := msgConsumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
"log"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
|
@ -30,7 +31,8 @@ func main() {
|
|||
cfg := db.New()
|
||||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
pg := cache.NewPGCache(
|
||||
postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
|
|
@ -45,10 +47,6 @@ func main() {
|
|||
// Create handler's aggregator
|
||||
builderMap := sessions.NewBuilderMap(handlersFabric)
|
||||
|
||||
keepMessage := func(tp int) bool {
|
||||
return tp == messages.MsgMetadata || tp == messages.MsgIssueEvent || tp == messages.MsgSessionStart || tp == messages.MsgSessionEnd || tp == messages.MsgUserID || tp == messages.MsgUserAnonymousID || tp == messages.MsgCustomEvent || tp == messages.MsgClickEvent || tp == messages.MsgInputEvent || tp == messages.MsgPageEvent || tp == messages.MsgErrorEvent || tp == messages.MsgFetchEvent || tp == messages.MsgGraphQLEvent || tp == messages.MsgIntegrationEvent || tp == messages.MsgPerformanceTrackAggr || tp == messages.MsgResourceEvent || tp == messages.MsgLongTask || tp == messages.MsgJSException || tp == messages.MsgResourceTiming || tp == messages.MsgRawCustomEvent || tp == messages.MsgCustomIssue || tp == messages.MsgFetch || tp == messages.MsgGraphQL || tp == messages.MsgStateAction || tp == messages.MsgSetInputTarget || tp == messages.MsgSetInputValue || tp == messages.MsgCreateDocument || tp == messages.MsgMouseClick || tp == messages.MsgSetPageLocation || tp == messages.MsgPageLoadTiming || tp == messages.MsgPageRenderTiming
|
||||
}
|
||||
|
||||
var producer types.Producer = nil
|
||||
if cfg.UseQuickwit {
|
||||
producer = queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
|
|
@ -60,69 +58,74 @@ func main() {
|
|||
saver.InitStats()
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
msgFilter := []int{messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgClickEvent,
|
||||
messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming,
|
||||
messages.MsgRawCustomEvent, messages.MsgCustomIssue, messages.MsgFetch, messages.MsgGraphQL,
|
||||
messages.MsgStateAction, messages.MsgSetInputTarget, messages.MsgSetInputValue, messages.MsgCreateDocument,
|
||||
messages.MsgMouseClick, messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming}
|
||||
|
||||
// Handler logic
|
||||
handler := func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
|
||||
for iter.Next() {
|
||||
if !keepMessage(iter.Type()) {
|
||||
continue
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
msg := iter.Message().Decode()
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
session, err := pg.GetSession(sessionID)
|
||||
if session == nil {
|
||||
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
iter.Close()
|
||||
|
||||
var (
|
||||
session *types2.Session
|
||||
err error
|
||||
)
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
session, err = pg.GetSession(msg.SessionID())
|
||||
} else {
|
||||
session, err = pg.Cache.GetSession(msg.SessionID())
|
||||
}
|
||||
if session == nil {
|
||||
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(msg)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(msg.SessionID(), func(msg messages.Message) {
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Init consumer
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupDB,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
handler,
|
||||
messages.NewMessageIterator(msgHandler, msgFilter, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -133,33 +136,36 @@ func main() {
|
|||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
commitTick := time.Tick(cfg.CommitBatchTimeout)
|
||||
|
||||
// Send collected batches to db
|
||||
commitDBUpdates := func() {
|
||||
start := time.Now()
|
||||
pg.CommitBatches()
|
||||
pgDur := time.Now().Sub(start).Milliseconds()
|
||||
|
||||
start = time.Now()
|
||||
if err := saver.CommitStats(); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
chDur := time.Now().Sub(start).Milliseconds()
|
||||
log.Printf("commit duration(ms), pg: %d, ch: %d", pgDur, chDur)
|
||||
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Printf("Caught signal %s: terminating\n", sig.String())
|
||||
commitDBUpdates()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-commitTick:
|
||||
// Send collected batches to db
|
||||
start := time.Now()
|
||||
pg.CommitBatches()
|
||||
pgDur := time.Now().Sub(start).Milliseconds()
|
||||
|
||||
start = time.Now()
|
||||
if err := saver.CommitStats(consumer.HasFirstPartition()); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
chDur := time.Now().Sub(start).Milliseconds()
|
||||
log.Printf("commit duration(ms), pg: %d, ch: %d", pgDur, chDur)
|
||||
|
||||
// TODO: use commit worker to save time each tick
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
commitDBUpdates()
|
||||
default:
|
||||
// Handle new message from queue
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/internal/storage"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -20,42 +20,27 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("ender")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
// Load service configuration
|
||||
metrics := monitoring.New("ender")
|
||||
cfg := ender.New()
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// Init all modules
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber, logger.NewQueueStats(cfg.LoggerTimeout))
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupEnder,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgSessionStart || iter.Type() == messages.MsgSessionEnd {
|
||||
continue
|
||||
}
|
||||
if iter.Message().Meta().Timestamp == 0 {
|
||||
log.Printf("ZERO TS, sessID: %d, msgType: %d", sessionID, iter.Type())
|
||||
}
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
sessions.UpdateSession(sessionID, meta.Timestamp, iter.Message().Meta().Timestamp)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
[]string{cfg.TopicRawWeb},
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) { sessions.UpdateSession(msg) },
|
||||
[]int{messages.MsgTimestamp},
|
||||
false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -94,7 +79,16 @@ func main() {
|
|||
currDuration, newDuration)
|
||||
return true
|
||||
}
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(msg)); err != nil {
|
||||
if cfg.UseEncryption {
|
||||
if key := storage.GenerateEncryptionKey(); key != nil {
|
||||
if err := pg.InsertSessionEncryptionKey(sessionID, key); err != nil {
|
||||
log.Printf("can't save session encryption key: %s, session will not be encrypted", err)
|
||||
} else {
|
||||
msg.EncryptionKey = string(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd to topic: %s; sessID: %d", err, sessionID)
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -47,25 +46,18 @@ func main() {
|
|||
|
||||
// Init producer and consumer for data bus
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewMessageConsumer(
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
builderMap.HandleMessage(msg)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupHeuristics,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
var lastMessageID uint64
|
||||
for iter.Next() {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
msg := iter.Message().Decode()
|
||||
if msg == nil {
|
||||
log.Printf("failed batch, sess: %d, lastIndex: %d", sessionID, lastMessageID)
|
||||
continue
|
||||
}
|
||||
lastMessageID = msg.Meta().Index
|
||||
builderMap.HandleMessage(sessionID, msg, iter.Message().Meta().Index)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, nil, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -86,7 +78,7 @@ func main() {
|
|||
os.Exit(0)
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(readyMsg))
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, readyMsg.Encode())
|
||||
})
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
|
|
|
|||
|
|
@ -13,12 +13,10 @@ import (
|
|||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
//
|
||||
func main() {
|
||||
metrics := monitoring.New("integrations")
|
||||
|
||||
|
|
@ -84,7 +82,7 @@ func main() {
|
|||
}
|
||||
sessionID = sessData.ID
|
||||
}
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(event.IntegrationEvent))
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, event.IntegrationEvent.Encode())
|
||||
case err := <-manager.Errors:
|
||||
log.Printf("Integration error: %v\n", err)
|
||||
case i := <-manager.RequestDataUpdates:
|
||||
|
|
|
|||
|
|
@ -3,9 +3,10 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
|
@ -13,13 +14,15 @@ import (
|
|||
"openreplay/backend/internal/sink/assetscache"
|
||||
"openreplay/backend/internal/sink/oswriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pprof.StartProfilingServer()
|
||||
|
||||
metrics := monitoring.New("sink")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
|
@ -35,9 +38,10 @@ func main() {
|
|||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics)
|
||||
|
||||
counter := storage.NewLogCounter()
|
||||
// Session message metrics
|
||||
totalMessages, err := metrics.RegisterCounter("messages_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_total metric: %s", err)
|
||||
|
|
@ -51,64 +55,96 @@ func main() {
|
|||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
msgHandler := func(msg messages.Message) {
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Process assets
|
||||
if msg.TypeID() == messages.MsgSetNodeAttributeURLBased ||
|
||||
msg.TypeID() == messages.MsgSetCSSDataURLBased ||
|
||||
msg.TypeID() == messages.MsgCSSInsertRuleURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSReplaceURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
if m == nil {
|
||||
log.Printf("assets decode err, info: %s", msg.Meta().Batch().Info())
|
||||
return
|
||||
}
|
||||
msg = assetMessageHandler.ParseAssets(m)
|
||||
}
|
||||
|
||||
// Filter message
|
||||
if !messages.IsReplayerType(msg.TypeID()) {
|
||||
return
|
||||
}
|
||||
|
||||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := msg.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msgType: %d", msg.SessionID(), msg.TypeID())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(ts))
|
||||
}
|
||||
|
||||
// Write encoded message with index to session file
|
||||
data := msg.EncodeWithIndex()
|
||||
if data == nil {
|
||||
log.Printf("can't encode with index, err: %s", err)
|
||||
return
|
||||
}
|
||||
wasWritten := false // To avoid timestamp duplicates in original mob file
|
||||
if messages.IsDOMType(msg.TypeID()) {
|
||||
if err := writer.WriteDOM(msg.SessionID(), data); err != nil {
|
||||
if strings.Contains(err.Error(), "not a directory") {
|
||||
// Trying to write data to mob file by original path
|
||||
oldErr := writer.WriteMOB(msg.SessionID(), data)
|
||||
if oldErr != nil {
|
||||
log.Printf("MOB Writeer error: %s, prev DOM error: %s, info: %s", oldErr, err, msg.Meta().Batch().Info())
|
||||
} else {
|
||||
wasWritten = true
|
||||
}
|
||||
} else {
|
||||
log.Printf("DOM Writer error: %s, info: %s", err, msg.Meta().Batch().Info())
|
||||
}
|
||||
}
|
||||
}
|
||||
if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
|
||||
// TODO: write only necessary timestamps
|
||||
if err := writer.WriteDEV(msg.SessionID(), data); err != nil {
|
||||
if strings.Contains(err.Error(), "not a directory") {
|
||||
if !wasWritten {
|
||||
// Trying to write data to mob file by original path
|
||||
oldErr := writer.WriteMOB(msg.SessionID(), data)
|
||||
if oldErr != nil {
|
||||
log.Printf("MOB Writeer error: %s, prev DEV error: %s, info: %s", oldErr, err, msg.Meta().Batch().Info())
|
||||
}
|
||||
}
|
||||
} else {
|
||||
log.Printf("Devtools Writer error: %s, info: %s", err, msg.Meta().Batch().Info())
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(data)))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupSink,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if iter.Type() == MsgSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, sessionID, iter.Message().Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
msg := iter.Message()
|
||||
// Process assets
|
||||
if iter.Type() == MsgSetNodeAttributeURLBased ||
|
||||
iter.Type() == MsgSetCSSDataURLBased ||
|
||||
iter.Type() == MsgCSSInsertRuleURLBased ||
|
||||
iter.Type() == MsgAdoptedSSReplaceURLBased ||
|
||||
iter.Type() == MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg = assetMessageHandler.ParseAssets(sessionID, m) // TODO: filter type only once (use iterator inide or bring ParseAssets out here).
|
||||
}
|
||||
|
||||
// Filter message
|
||||
if !IsReplayerType(msg.TypeID()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := msg.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msgType: %d", sessionID, iter.Type())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(sessionID, time.UnixMilli(ts))
|
||||
}
|
||||
|
||||
// Write encoded message with index to session file
|
||||
data := msg.EncodeWithIndex()
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
log.Printf("Writer error: %v\n", err)
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(data)))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, nil, false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -122,6 +158,9 @@ func main() {
|
|||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
if err := writer.CloseAll(); err != nil {
|
||||
log.Printf("closeAll error: %v\n", err)
|
||||
}
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
|
|
@ -129,7 +168,7 @@ func main() {
|
|||
os.Exit(0)
|
||||
case <-tick:
|
||||
if err := writer.SyncAll(); err != nil {
|
||||
log.Fatalf("Sync error: %v\n", err)
|
||||
log.Fatalf("sync error: %v\n", err)
|
||||
}
|
||||
counter.Print()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
|
|
@ -142,5 +181,4 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,10 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
|
@ -38,24 +36,24 @@ func main() {
|
|||
log.Fatalf("can't init sessionFinder module: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupStorage,
|
||||
[]string{
|
||||
cfg.TopicTrigger,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgSessionEnd {
|
||||
msg := iter.Message().Decode().(*messages.SessionEnd)
|
||||
if err := srv.UploadKey(strconv.FormatUint(sessionID, 10), 5); err != nil {
|
||||
log.Printf("can't find session: %d", sessionID)
|
||||
sessionFinder.Find(sessionID, msg.Timestamp)
|
||||
}
|
||||
// Log timestamp of last processed session
|
||||
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) {
|
||||
sesEnd := msg.(*messages.SessionEnd)
|
||||
if err := srv.UploadSessionFiles(sesEnd); err != nil {
|
||||
log.Printf("can't find session: %d", msg.SessionID())
|
||||
sessionFinder.Find(msg.SessionID(), sesEnd.Timestamp)
|
||||
}
|
||||
}
|
||||
},
|
||||
// Log timestamp of last processed session
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(msg.Meta().Batch().Timestamp()))
|
||||
},
|
||||
[]int{messages.MsgSessionEnd},
|
||||
true,
|
||||
),
|
||||
true,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ go 1.18
|
|||
require (
|
||||
cloud.google.com/go/logging v1.4.2
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.44.98
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||
github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
|
|
|
|||
|
|
@ -33,6 +33,11 @@ type cacher struct {
|
|||
sizeLimit int
|
||||
downloadedAssets syncfloat64.Counter
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
}
|
||||
|
||||
func (c *cacher) CanCache() bool {
|
||||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
||||
|
|
@ -44,7 +49,7 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
if err != nil {
|
||||
log.Printf("can't create downloaded_assets metric: %s", err)
|
||||
}
|
||||
return &cacher{
|
||||
c := &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
|
||||
httpClient: &http.Client{
|
||||
|
|
@ -60,47 +65,48 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
downloadedAssets: downloadedAssets,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlContext string, isJS bool) {
|
||||
var cachePath string
|
||||
if isJS {
|
||||
cachePath = assets.GetCachePathForJS(requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(sessionID, requestURL)
|
||||
}
|
||||
if c.timeoutMap.contains(cachePath) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(cachePath)
|
||||
crTime := c.s3.GetCreationTime(cachePath)
|
||||
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) { // recently uploaded
|
||||
return
|
||||
}
|
||||
func (c *cacher) CacheFile(task *Task) {
|
||||
c.cacheURL(task)
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", requestURL, nil)
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
func (c *cacher) cacheURL(t *Task) {
|
||||
t.retries--
|
||||
req, _ := http.NewRequest("GET", t.requestURL, nil)
|
||||
if t.retries%2 == 0 {
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
}
|
||||
for k, v := range c.requestHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
// TODO: retry
|
||||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), urlContext)
|
||||
printErr := true
|
||||
// Retry 403 error
|
||||
if res.StatusCode == 403 && t.retries > 0 {
|
||||
c.workers.AddTask(t)
|
||||
printErr = false
|
||||
}
|
||||
if printErr {
|
||||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), t.urlContext)
|
||||
}
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(io.LimitReader(res.Body, int64(c.sizeLimit+1)))
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
if len(data) > c.sizeLimit {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), urlContext)
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), t.urlContext)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -112,44 +118,94 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlCo
|
|||
|
||||
strData := string(data)
|
||||
if isCSS {
|
||||
strData = c.rewriter.RewriteCSS(sessionID, requestURL, strData) // TODO: one method for rewrite and return list
|
||||
strData = c.rewriter.RewriteCSS(t.sessionID, t.requestURL, strData) // TODO: one method for rewrite and return list
|
||||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
err = c.s3.Upload(strings.NewReader(strData), cachePath, contentType, false)
|
||||
err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
c.downloadedAssets.Add(context.Background(), 1)
|
||||
|
||||
if isCSS {
|
||||
if depth > 0 {
|
||||
if t.depth > 0 {
|
||||
for _, extractedURL := range assets.ExtractURLsFromCSS(string(data)) {
|
||||
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
|
||||
go c.cacheURL(fullURL, sessionID, depth-1, urlContext+"\n -> "+fullURL, false)
|
||||
if fullURL, cachable := assets.GetFullCachableURL(t.requestURL, extractedURL); cachable {
|
||||
c.checkTask(&Task{
|
||||
requestURL: fullURL,
|
||||
sessionID: t.sessionID,
|
||||
depth: t.depth - 1,
|
||||
urlContext: t.urlContext + "\n -> " + fullURL,
|
||||
isJS: false,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), urlContext)
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), t.urlContext)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cacher) checkTask(newTask *Task) {
|
||||
// check if file was recently uploaded
|
||||
var cachePath string
|
||||
if newTask.isJS {
|
||||
cachePath = assets.GetCachePathForJS(newTask.requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(newTask.sessionID, newTask.requestURL)
|
||||
}
|
||||
if c.timeoutMap.contains(cachePath) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(cachePath)
|
||||
crTime := c.s3.GetCreationTime(cachePath)
|
||||
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) {
|
||||
return
|
||||
}
|
||||
// add new file in queue to download
|
||||
newTask.cachePath = cachePath
|
||||
c.workers.AddTask(newTask)
|
||||
}
|
||||
|
||||
func (c *cacher) CacheJSFile(sourceURL string) {
|
||||
go c.cacheURL(sourceURL, 0, 0, sourceURL, true)
|
||||
c.checkTask(&Task{
|
||||
requestURL: sourceURL,
|
||||
sessionID: 0,
|
||||
depth: 0,
|
||||
urlContext: sourceURL,
|
||||
isJS: true,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *cacher) CacheURL(sessionID uint64, fullURL string) {
|
||||
go c.cacheURL(fullURL, sessionID, MAX_CACHE_DEPTH, fullURL, false)
|
||||
c.checkTask(&Task{
|
||||
requestURL: fullURL,
|
||||
sessionID: sessionID,
|
||||
depth: MAX_CACHE_DEPTH,
|
||||
urlContext: fullURL,
|
||||
isJS: false,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *cacher) UpdateTimeouts() {
|
||||
c.timeoutMap.deleteOutdated()
|
||||
}
|
||||
|
||||
func (c *cacher) Stop() {
|
||||
c.workers.Stop()
|
||||
}
|
||||
|
||||
func setRetries() int {
|
||||
return 10
|
||||
}
|
||||
|
|
|
|||
80
backend/internal/assets/cacher/pool.go
Normal file
80
backend/internal/assets/cacher/pool.go
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Task struct {
|
||||
requestURL string
|
||||
sessionID uint64
|
||||
depth byte
|
||||
urlContext string
|
||||
isJS bool
|
||||
cachePath string
|
||||
retries int
|
||||
}
|
||||
|
||||
type WorkerPool struct {
|
||||
tasks chan *Task
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
term sync.Once
|
||||
size int
|
||||
job Job
|
||||
}
|
||||
|
||||
func (p *WorkerPool) CanAddTask() bool {
|
||||
if len(p.tasks) < cap(p.tasks) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Job func(task *Task)
|
||||
|
||||
func NewPool(size int, job Job) *WorkerPool {
|
||||
newPool := &WorkerPool{
|
||||
tasks: make(chan *Task, 128),
|
||||
done: make(chan struct{}),
|
||||
size: size,
|
||||
job: job,
|
||||
}
|
||||
newPool.init()
|
||||
return newPool
|
||||
}
|
||||
|
||||
func (p *WorkerPool) init() {
|
||||
p.wg.Add(p.size)
|
||||
for i := 0; i < p.size; i++ {
|
||||
go p.worker()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) worker() {
|
||||
for {
|
||||
select {
|
||||
case newTask := <-p.tasks:
|
||||
p.job(newTask)
|
||||
case <-p.done:
|
||||
p.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) AddTask(task *Task) {
|
||||
if task.retries <= 0 {
|
||||
return
|
||||
}
|
||||
p.tasks <- task
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Stop() {
|
||||
log.Printf("stopping workers")
|
||||
p.term.Do(func() {
|
||||
close(p.done)
|
||||
})
|
||||
p.wg.Wait()
|
||||
log.Printf("all workers have been stopped")
|
||||
}
|
||||
|
|
@ -17,9 +17,6 @@ import (
|
|||
)
|
||||
|
||||
func readFile(path string) (map[string]string, error) {
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("file path is empty")
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open file: %s", err)
|
||||
|
|
@ -33,13 +30,23 @@ func readFile(path string) (map[string]string, error) {
|
|||
res := make(map[string]string)
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
env := strings.Split(line, "=")
|
||||
if len(env) < 2 {
|
||||
continue
|
||||
}
|
||||
res[env[0]] = env[1]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parseFile(a interface{}, path string) {
|
||||
// Skip parsing process without logs if we don't have path to config file
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
envs, err := readFile(path)
|
||||
if err != nil {
|
||||
log.Printf("can't parse config file: %s", err)
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ type Config struct {
|
|||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
|
||||
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
|
||||
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -17,6 +17,8 @@ type Config struct {
|
|||
CacheAssets bool `env:"CACHE_ASSETS,required"`
|
||||
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
|
||||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
CacheThreshold int64 `env:"CACHE_THRESHOLD,default=75"`
|
||||
CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
|
||||
func (mi *Saver) InsertMessage(msg Message) error {
|
||||
sessionID := msg.SessionID()
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *Metadata:
|
||||
|
|
@ -37,23 +38,16 @@ func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
|
|||
case *PageEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebPageEvent(sessionID, m)
|
||||
case *ErrorEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, m)
|
||||
case *FetchEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebFetchEvent(sessionID, m)
|
||||
case *GraphQLEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
|
||||
case *JSException:
|
||||
return mi.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{
|
||||
MessageID: m.Meta().Index,
|
||||
Timestamp: m.Timestamp,
|
||||
Source: m.Source,
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
})
|
||||
return mi.pg.InsertWebIntegrationEvent(m)
|
||||
|
||||
// IOS
|
||||
case *IOSSessionStart:
|
||||
|
|
|
|||
|
|
@ -16,12 +16,10 @@ func (si *Saver) InsertStats(session *Session, msg Message) error {
|
|||
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
|
||||
case *ResourceEvent:
|
||||
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
|
||||
case *LongTask:
|
||||
return si.pg.InsertWebStatsLongtask(session.SessionID, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *Saver) CommitStats(optimize bool) error {
|
||||
func (si *Saver) CommitStats() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,12 +69,12 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
|
||||
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
||||
// The difference with web is mostly here:
|
||||
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, Encode(&IOSSessionStart{
|
||||
sessStart := &IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
|
|
@ -85,7 +85,8 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserDevice: ios.MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
}))
|
||||
}
|
||||
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, sessStart.Encode())
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartIOSSessionResponse{
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package router
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/Masterminds/semver"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"io"
|
||||
"log"
|
||||
|
|
@ -37,6 +38,22 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
return bodyBytes, nil
|
||||
}
|
||||
|
||||
func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint64) {
|
||||
ts = uint64(req.Timestamp)
|
||||
c, err := semver.NewConstraint(">=4.1.6")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v, err := semver.NewVersion(req.TrackerVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if c.Check(v) {
|
||||
return uint64(startTimeMili)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
|
||||
|
|
@ -91,17 +108,22 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
startTimeMili := startTime.UnixMilli()
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{ID: sessionID, ExpTime: expTime.UnixMilli()}
|
||||
tokenData = &token.TokenData{
|
||||
ID: sessionID,
|
||||
Delay: startTimeMili - req.Timestamp,
|
||||
ExpTime: expTime.UnixMilli(),
|
||||
}
|
||||
|
||||
sessionStart := &SessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
Timestamp: getSessionTimestamp(req, startTimeMili),
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
|
|
@ -125,7 +147,7 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
// Send sessionStart message to kafka
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, Encode(sessionStart)); err != nil {
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, sessionStart.Encode()); err != nil {
|
||||
log.Printf("can't send session start: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -137,6 +159,7 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
|
||||
Delay: tokenData.Delay,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ type StartSessionRequest struct {
|
|||
Token string `json:"token"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
RevID string `json:"revID"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
IsSnippet bool `json:"isSnippet"`
|
||||
DeviceMemory uint64 `json:"deviceMemory"`
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import (
|
|||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
log2 "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -27,9 +29,10 @@ type SessionEnder struct {
|
|||
timeCtrl *timeController
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
totalSessions syncfloat64.Counter
|
||||
stats log2.QueueStats
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) {
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int, stats log2.QueueStats) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
|
|
@ -48,24 +51,31 @@ func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder,
|
|||
timeCtrl: NewTimeController(parts),
|
||||
activeSessions: activeSessions,
|
||||
totalSessions: totalSessions,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateSession save timestamp for new sessions and update for existing sessions
|
||||
func (se *SessionEnder) UpdateSession(sessionID uint64, timestamp, msgTimestamp int64) {
|
||||
localTS := time.Now().UnixMilli()
|
||||
currTS := timestamp
|
||||
if currTS == 0 {
|
||||
func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
||||
se.stats.Collect(msg)
|
||||
var (
|
||||
sessionID = msg.Meta().SessionID()
|
||||
batchTimestamp = msg.Meta().Batch().Timestamp()
|
||||
msgTimestamp = msg.Meta().Timestamp
|
||||
localTimestamp = time.Now().UnixMilli()
|
||||
)
|
||||
if batchTimestamp == 0 {
|
||||
log.Printf("got empty timestamp for sessionID: %d", sessionID)
|
||||
return
|
||||
}
|
||||
se.timeCtrl.UpdateTime(sessionID, currTS)
|
||||
se.timeCtrl.UpdateTime(sessionID, batchTimestamp)
|
||||
sess, ok := se.sessions[sessionID]
|
||||
if !ok {
|
||||
// Register new session
|
||||
se.sessions[sessionID] = &session{
|
||||
lastTimestamp: currTS, // timestamp from message broker
|
||||
lastUpdate: localTS, // local timestamp
|
||||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
lastTimestamp: batchTimestamp, // timestamp from message broker
|
||||
lastUpdate: localTimestamp, // local timestamp
|
||||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
isEnded: false,
|
||||
}
|
||||
se.activeSessions.Add(context.Background(), 1)
|
||||
|
|
@ -77,9 +87,9 @@ func (se *SessionEnder) UpdateSession(sessionID uint64, timestamp, msgTimestamp
|
|||
sess.lastUserTime = msgTimestamp
|
||||
}
|
||||
// Keep information about the latest message for generating sessionEnd trigger
|
||||
if currTS > sess.lastTimestamp {
|
||||
sess.lastTimestamp = currTS
|
||||
sess.lastUpdate = localTS
|
||||
if batchTimestamp > sess.lastTimestamp {
|
||||
sess.lastTimestamp = batchTimestamp
|
||||
sess.lastUpdate = localTimestamp
|
||||
sess.isEnded = false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,35 +1,111 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AssetsCache struct {
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
type CachedAsset struct {
|
||||
msg string
|
||||
ts time.Time
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
return &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
type AssetsCache struct {
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
totalAssets syncfloat64.Counter
|
||||
cachedAssets syncfloat64.Counter
|
||||
skippedAssets syncfloat64.Counter
|
||||
assetSize syncfloat64.Histogram
|
||||
assetDuration syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache {
|
||||
// Assets metrics
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cachedAssets, err := metrics.RegisterCounter("assets_cached")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_cached metric: %s", err)
|
||||
}
|
||||
skippedAssets, err := metrics.RegisterCounter("assets_skipped")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_skipped metric: %s", err)
|
||||
}
|
||||
assetSize, err := metrics.RegisterHistogram("asset_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_size metric: %s", err)
|
||||
}
|
||||
assetDuration, err := metrics.RegisterHistogram("asset_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_duration metric: %s", err)
|
||||
}
|
||||
assetsCache := &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
totalAssets: totalAssets,
|
||||
cachedAssets: cachedAssets,
|
||||
skippedAssets: skippedAssets,
|
||||
assetSize: assetSize,
|
||||
assetDuration: assetDuration,
|
||||
}
|
||||
go assetsCache.cleaner()
|
||||
return assetsCache
|
||||
}
|
||||
|
||||
func (e *AssetsCache) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 30)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
e.clearCache()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.Message {
|
||||
func (e *AssetsCache) clearCache() {
|
||||
e.mutex.Lock()
|
||||
defer e.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
cacheSize := len(e.cache)
|
||||
deleted := 0
|
||||
|
||||
for id, cache := range e.cache {
|
||||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
||||
switch m := msg.(type) {
|
||||
case *messages.SetNodeAttributeURLBased:
|
||||
if m.Name == "src" || m.Name == "href" {
|
||||
newMsg := &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleURL(sessID, m.BaseURL, m.Value),
|
||||
Value: e.handleURL(m.SessionID(), m.BaseURL, m.Value),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -37,7 +113,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleCSS(sessID, m.BaseURL, m.Value),
|
||||
Value: e.handleCSS(m.SessionID(), m.BaseURL, m.Value),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -45,7 +121,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
case *messages.SetCSSDataURLBased:
|
||||
newMsg := &messages.SetCSSData{
|
||||
ID: m.ID,
|
||||
Data: e.handleCSS(sessID, m.BaseURL, m.Data),
|
||||
Data: e.handleCSS(m.SessionID(), m.BaseURL, m.Data),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -53,14 +129,14 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
|
||||
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.AdoptedSSReplaceURLBased:
|
||||
newMsg := &messages.AdoptedSSReplace{
|
||||
SheetID: m.SheetID,
|
||||
Text: e.handleCSS(sessID, m.BaseURL, m.Text),
|
||||
Text: e.handleCSS(m.SessionID(), m.BaseURL, m.Text),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -68,7 +144,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.AdoptedSSInsertRule{
|
||||
SheetID: m.SheetID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
|
||||
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -78,10 +154,11 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
|
||||
func (e *AssetsCache) sendAssetForCache(sessionID uint64, baseURL string, relativeURL string) {
|
||||
if fullURL, cacheable := assets.GetFullCachableURL(baseURL, relativeURL); cacheable {
|
||||
assetMessage := &messages.AssetCache{URL: fullURL}
|
||||
if err := e.producer.Produce(
|
||||
e.cfg.TopicCache,
|
||||
sessionID,
|
||||
messages.Encode(&messages.AssetCache{URL: fullURL}),
|
||||
assetMessage.Encode(),
|
||||
); err != nil {
|
||||
log.Printf("can't send asset to cache topic, sessID: %d, err: %s", sessionID, err)
|
||||
}
|
||||
|
|
@ -94,18 +171,72 @@ func (e *AssetsCache) sendAssetsForCacheFromCSS(sessionID uint64, baseURL string
|
|||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, url string) string {
|
||||
func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, urlVal string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetForCache(sessionID, baseURL, url)
|
||||
return e.rewriter.RewriteURL(sessionID, baseURL, url)
|
||||
e.sendAssetForCache(sessionID, baseURL, urlVal)
|
||||
return e.rewriter.RewriteURL(sessionID, baseURL, urlVal)
|
||||
} else {
|
||||
return assets.ResolveURL(baseURL, urlVal)
|
||||
}
|
||||
return assets.ResolveURL(baseURL, url)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
ctx := context.Background()
|
||||
e.totalAssets.Add(ctx, 1)
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
u, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
log.Printf("can't parse url: %s, err: %s", baseURL, err)
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
}
|
||||
return e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
}
|
||||
justUrl := u.Scheme + "://" + u.Host + "/"
|
||||
// Calculate hash sum of url + css
|
||||
io.WriteString(h, justUrl)
|
||||
io.WriteString(h, css)
|
||||
hash := string(h.Sum(nil))
|
||||
// Check the resulting hash in cache
|
||||
e.mutex.RLock()
|
||||
cachedAsset, ok := e.cache[hash]
|
||||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
e.skippedAssets.Add(ctx, 1)
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
// Send asset to download in assets service
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
return e.rewriter.RewriteCSS(sessionID, baseURL, css)
|
||||
}
|
||||
return assets.ResolveCSS(baseURL, css)
|
||||
// Rewrite asset
|
||||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
e.assetSize.Record(ctx, float64(len(res)))
|
||||
e.assetDuration.Record(ctx, float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
e.cache[hash] = &CachedAsset{
|
||||
msg: res,
|
||||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
e.cachedAssets.Add(ctx, 1)
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
}
|
||||
|
||||
func (e *AssetsCache) getRewrittenCSS(sessionID uint64, url, css string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
return e.rewriter.RewriteCSS(sessionID, url, css)
|
||||
} else {
|
||||
return assets.ResolveCSS(url, css)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,8 +1,11 @@
|
|||
package oswriter
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
"math"
|
||||
"os"
|
||||
"path/filepath"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -10,26 +13,26 @@ import (
|
|||
type Writer struct {
|
||||
ulimit int
|
||||
dir string
|
||||
files map[uint64]*os.File
|
||||
atimes map[uint64]int64
|
||||
files map[string]*os.File
|
||||
atimes map[string]int64
|
||||
}
|
||||
|
||||
func NewWriter(ulimit uint16, dir string) *Writer {
|
||||
return &Writer{
|
||||
ulimit: int(ulimit),
|
||||
dir: dir + "/",
|
||||
files: make(map[uint64]*os.File),
|
||||
atimes: make(map[uint64]int64),
|
||||
files: make(map[string]*os.File),
|
||||
atimes: make(map[string]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) open(key uint64) (*os.File, error) {
|
||||
file, ok := w.files[key]
|
||||
func (w *Writer) open(fname string) (*os.File, error) {
|
||||
file, ok := w.files[fname]
|
||||
if ok {
|
||||
return file, nil
|
||||
}
|
||||
if len(w.atimes) == w.ulimit {
|
||||
var m_k uint64
|
||||
var m_k string
|
||||
var m_t int64 = math.MaxInt64
|
||||
for k, t := range w.atimes {
|
||||
if t < m_t {
|
||||
|
|
@ -37,21 +40,38 @@ func (w *Writer) open(key uint64) (*os.File, error) {
|
|||
m_t = t
|
||||
}
|
||||
}
|
||||
if err := w.Close(m_k); err != nil {
|
||||
if err := w.close(m_k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
file, err := os.OpenFile(w.dir+strconv.FormatUint(key, 10), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
|
||||
// mkdir if not exist
|
||||
pathTo := w.dir + filepath.Dir(fname)
|
||||
if info, err := os.Stat(pathTo); os.IsNotExist(err) {
|
||||
if err := os.MkdirAll(pathTo, 0755); err != nil {
|
||||
log.Printf("os.MkdirAll error: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if !info.IsDir() {
|
||||
return nil, errors.New("not a directory")
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(w.dir+fname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
log.Printf("os.OpenFile error: %s", err)
|
||||
return nil, err
|
||||
}
|
||||
w.files[key] = file
|
||||
w.atimes[key] = time.Now().Unix()
|
||||
w.files[fname] = file
|
||||
w.atimes[fname] = time.Now().Unix()
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (w *Writer) Close(key uint64) error {
|
||||
file := w.files[key]
|
||||
func (w *Writer) close(fname string) error {
|
||||
file := w.files[fname]
|
||||
if file == nil {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -61,21 +81,67 @@ func (w *Writer) Close(key uint64) error {
|
|||
if err := file.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(w.files, key)
|
||||
delete(w.atimes, key)
|
||||
delete(w.files, fname)
|
||||
delete(w.atimes, fname)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) Write(key uint64, data []byte) error {
|
||||
file, err := w.open(key)
|
||||
func (w *Writer) WriteDOM(sid uint64, data []byte) error {
|
||||
return w.write(strconv.FormatUint(sid, 10)+"/dom.mob", data)
|
||||
}
|
||||
|
||||
func (w *Writer) WriteDEV(sid uint64, data []byte) error {
|
||||
return w.write(strconv.FormatUint(sid, 10)+"/devtools.mob", data)
|
||||
}
|
||||
|
||||
func (w *Writer) WriteMOB(sid uint64, data []byte) error {
|
||||
// Use session id as a file name without directory
|
||||
fname := strconv.FormatUint(sid, 10)
|
||||
file, err := w.openWithoutDir(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: add check for the number of recorded bytes to file
|
||||
_, err = file.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) write(fname string, data []byte) error {
|
||||
file, err := w.open(fname)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
_, err = file.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) openWithoutDir(fname string) (*os.File, error) {
|
||||
file, ok := w.files[fname]
|
||||
if ok {
|
||||
return file, nil
|
||||
}
|
||||
if len(w.atimes) == w.ulimit {
|
||||
var m_k string
|
||||
var m_t int64 = math.MaxInt64
|
||||
for k, t := range w.atimes {
|
||||
if t < m_t {
|
||||
m_k = k
|
||||
m_t = t
|
||||
}
|
||||
}
|
||||
if err := w.close(m_k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
|
||||
file, err := os.OpenFile(w.dir+fname, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.files[fname] = file
|
||||
w.atimes[fname] = time.Now().Unix()
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (w *Writer) SyncAll() error {
|
||||
for _, file := range w.files {
|
||||
if err := file.Sync(); err != nil {
|
||||
|
|
|
|||
17
backend/internal/storage/encryptor.go
Normal file
17
backend/internal/storage/encryptor.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func GenerateEncryptionKey() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func EncryptData(data, fullKey []byte) ([]byte, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
func DecryptData(data, fullKey []byte) ([]byte, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
|
@ -8,21 +8,26 @@ import (
|
|||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionSize syncfloat64.Histogram
|
||||
readingTime syncfloat64.Histogram
|
||||
archivingTime syncfloat64.Histogram
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDevtoolsSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingTime syncfloat64.Histogram
|
||||
archivingTime syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) {
|
||||
|
|
@ -37,10 +42,14 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
|
|||
if err != nil {
|
||||
log.Printf("can't create sessions_total metric: %s", err)
|
||||
}
|
||||
sessionSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
sessionDOMSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create session_size metric: %s", err)
|
||||
}
|
||||
sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_dt_size metric: %s", err)
|
||||
}
|
||||
readingTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
|
|
@ -50,17 +59,35 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
|
|||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
return &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionSize: sessionSize,
|
||||
readingTime: readingTime,
|
||||
archivingTime: archivingTime,
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDevtoolsSize: sessionDevtoolsSize,
|
||||
readingTime: readingTime,
|
||||
archivingTime: archivingTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) UploadKey(key string, retryCount int) error {
|
||||
func (s *Storage) UploadSessionFiles(msg *messages.SessionEnd) error {
|
||||
sessionDir := strconv.FormatUint(msg.SessionID(), 10)
|
||||
if err := s.uploadKey(msg.SessionID(), sessionDir+"/dom.mob", true, 5, msg.EncryptionKey); err != nil {
|
||||
oldErr := s.uploadKey(msg.SessionID(), sessionDir, true, 5, msg.EncryptionKey)
|
||||
if oldErr != nil {
|
||||
return fmt.Errorf("upload file error: %s. failed checking mob file using old path: %s", err, oldErr)
|
||||
}
|
||||
// Exit method anyway because we don't have dev tools separation in prev version
|
||||
return nil
|
||||
}
|
||||
if err := s.uploadKey(msg.SessionID(), sessionDir+"/devtools.mob", false, 4, msg.EncryptionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make a bit cleaner
|
||||
func (s *Storage) uploadKey(sessID uint64, key string, shouldSplit bool, retryCount int, encryptionKey string) error {
|
||||
if retryCount <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
|
@ -68,7 +95,6 @@ func (s *Storage) UploadKey(key string, retryCount int) error {
|
|||
start := time.Now()
|
||||
file, err := os.Open(s.cfg.FSDir + "/" + key)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
return fmt.Errorf("File open error: %v; sessID: %s, part: %d, sessStart: %s\n",
|
||||
err, key, sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
|
|
@ -76,45 +102,134 @@ func (s *Storage) UploadKey(key string, retryCount int) error {
|
|||
}
|
||||
defer file.Close()
|
||||
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.UploadKey(key, retryCount-1)
|
||||
})
|
||||
return nil
|
||||
// Ignore "s" at the end of mob file name for "old" sessions
|
||||
newVers := false
|
||||
if strings.Contains(key, "/") {
|
||||
newVers = true
|
||||
}
|
||||
s.readingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
startReader := bytes.NewBuffer(s.startBytes[:nRead])
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), key, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
if err := s.s3.Upload(s.gzipFile(file), key+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
// Save metrics
|
||||
var fileSize float64 = 0
|
||||
var fileSize int64 = 0
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
log.Printf("can't get file info: %s", err)
|
||||
} else {
|
||||
fileSize = float64(fileInfo.Size())
|
||||
fileSize = fileInfo.Size()
|
||||
}
|
||||
|
||||
var encryptedData []byte
|
||||
if shouldSplit {
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.uploadKey(sessID, key, shouldSplit, retryCount-1, encryptionKey)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
s.readingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(s.startBytes[:nRead], []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
} else {
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
// Compress and save to s3
|
||||
startReader := bytes.NewBuffer(encryptedData)
|
||||
startKey := key
|
||||
if newVers {
|
||||
startKey += "s"
|
||||
}
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), startKey, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
// TODO: fix possible error (if we read less then FileSplitSize)
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
restPartSize := fileSize - int64(nRead)
|
||||
fileData := make([]byte, restPartSize)
|
||||
nRead, err = file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != restPartSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
// Compress and save to s3
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), key+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
} else {
|
||||
start = time.Now()
|
||||
fileData := make([]byte, fileSize)
|
||||
nRead, err := file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != fileSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), key+"s", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Save metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
if shouldSplit {
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
s.sessionDOMSize.Record(ctx, float64(fileSize))
|
||||
} else {
|
||||
s.sessionDevtoolsSize.Record(ctx, float64(fileSize))
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
|
||||
s.sessionSize.Record(ctx, fileSize)
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
73
backend/pkg/db/cache/cache.go
vendored
Normal file
73
backend/pkg/db/cache/cache.go
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SessionMeta struct {
|
||||
*types.Session
|
||||
lastUse time.Time
|
||||
}
|
||||
|
||||
type ProjectMeta struct {
|
||||
*types.Project
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
type Cache interface {
|
||||
SetSession(sess *types.Session)
|
||||
HasSession(sessID uint64) bool
|
||||
GetSession(sessID uint64) (*types.Session, error)
|
||||
GetProject(projectID uint32) (*types.Project, error)
|
||||
GetProjectByKey(projectKey string) (*types.Project, error)
|
||||
}
|
||||
|
||||
type cacheImpl struct {
|
||||
conn *postgres.Conn
|
||||
mutex sync.RWMutex
|
||||
sessions map[uint64]*SessionMeta
|
||||
projects map[uint32]*ProjectMeta
|
||||
projectsByKeys sync.Map
|
||||
projectExpirationTimeout time.Duration
|
||||
}
|
||||
|
||||
func NewCache(conn *postgres.Conn, projectExpirationTimeoutMs int64) Cache {
|
||||
newCache := &cacheImpl{
|
||||
conn: conn,
|
||||
sessions: make(map[uint64]*SessionMeta),
|
||||
projects: make(map[uint32]*ProjectMeta),
|
||||
projectExpirationTimeout: time.Duration(1000 * projectExpirationTimeoutMs),
|
||||
}
|
||||
go newCache.cleaner()
|
||||
return newCache
|
||||
}
|
||||
|
||||
func (c *cacheImpl) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 5)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
c.clearCache()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheImpl) clearCache() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
cacheSize := len(c.sessions)
|
||||
deleted := 0
|
||||
for id, sess := range c.sessions {
|
||||
if now.Sub(sess.lastUse).Minutes() > 3 {
|
||||
deleted++
|
||||
delete(c.sessions, id)
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d sessions", deleted, cacheSize)
|
||||
}
|
||||
12
backend/pkg/db/cache/messages-common.go
vendored
12
backend/pkg/db/cache/messages-common.go
vendored
|
|
@ -4,23 +4,25 @@ import (
|
|||
"log"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"time"
|
||||
// . "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertSessionEnd(sessionID uint64, timestamp uint64) (uint64, error) {
|
||||
return c.Conn.InsertSessionEnd(sessionID, timestamp)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertSessionEncryptionKey(sessionID uint64, key []byte) error {
|
||||
return c.Conn.InsertSessionEncryptionKey(sessionID, key)
|
||||
}
|
||||
|
||||
func (c *PGCache) HandleSessionEnd(sessionID uint64) error {
|
||||
if err := c.Conn.HandleSessionEnd(sessionID); err != nil {
|
||||
log.Printf("can't handle session end: %s", err)
|
||||
}
|
||||
c.DeleteSession(sessionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -28,11 +30,11 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := c.GetProject(session.ProjectID)
|
||||
project, err := c.Cache.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
22
backend/pkg/db/cache/messages-ios.go
vendored
22
backend/pkg/db/cache/messages-ios.go
vendored
|
|
@ -1,16 +1,16 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) error {
|
||||
if c.sessions[sessionID] != nil {
|
||||
return errors.New("This session already in cache!")
|
||||
if c.Cache.HasSession(sessionID) {
|
||||
return fmt.Errorf("session %d already in cache", sessionID)
|
||||
}
|
||||
c.sessions[sessionID] = &Session{
|
||||
newSess := &Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "ios",
|
||||
Timestamp: s.Timestamp,
|
||||
|
|
@ -24,8 +24,10 @@ func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) er
|
|||
UserCountry: s.UserCountry,
|
||||
UserDeviceType: s.UserDeviceType,
|
||||
}
|
||||
if err := c.Conn.InsertSessionStart(sessionID, c.sessions[sessionID]); err != nil {
|
||||
c.sessions[sessionID] = nil
|
||||
c.Cache.SetSession(newSess)
|
||||
if err := c.Conn.InsertSessionStart(sessionID, newSess); err != nil {
|
||||
// don't know why?
|
||||
c.Cache.SetSession(nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -40,7 +42,7 @@ func (c *PGCache) InsertIOSScreenEnter(sessionID uint64, screenEnter *IOSScreenE
|
|||
if err := c.Conn.InsertIOSScreenEnter(sessionID, screenEnter); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -52,7 +54,7 @@ func (c *PGCache) InsertIOSClickEvent(sessionID uint64, clickEvent *IOSClickEven
|
|||
if err := c.Conn.InsertIOSClickEvent(sessionID, clickEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -64,7 +66,7 @@ func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEven
|
|||
if err := c.Conn.InsertIOSInputEvent(sessionID, inputEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -73,7 +75,7 @@ func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEven
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertIOSCrash(sessionID uint64, crash *IOSCrash) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
44
backend/pkg/db/cache/messages-web.go
vendored
44
backend/pkg/db/cache/messages-web.go
vendored
|
|
@ -1,7 +1,7 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
|
@ -31,10 +31,10 @@ func (c *PGCache) InsertWebSessionStart(sessionID uint64, s *SessionStart) error
|
|||
}
|
||||
|
||||
func (c *PGCache) HandleWebSessionStart(sessionID uint64, s *SessionStart) error {
|
||||
if c.sessions[sessionID] != nil {
|
||||
return errors.New("This session already in cache!")
|
||||
if c.Cache.HasSession(sessionID) {
|
||||
return fmt.Errorf("session %d already in cache", sessionID)
|
||||
}
|
||||
c.sessions[sessionID] = &Session{
|
||||
newSess := &Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "web",
|
||||
Timestamp: s.Timestamp,
|
||||
|
|
@ -55,8 +55,10 @@ func (c *PGCache) HandleWebSessionStart(sessionID uint64, s *SessionStart) error
|
|||
UserDeviceHeapSize: s.UserDeviceHeapSize,
|
||||
UserID: &s.UserID,
|
||||
}
|
||||
if err := c.Conn.HandleSessionStart(sessionID, c.sessions[sessionID]); err != nil {
|
||||
c.sessions[sessionID] = nil
|
||||
c.Cache.SetSession(newSess)
|
||||
if err := c.Conn.HandleSessionStart(sessionID, newSess); err != nil {
|
||||
// don't know why?
|
||||
c.Cache.SetSession(nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -71,8 +73,14 @@ func (c *PGCache) HandleWebSessionEnd(sessionID uint64, e *SessionEnd) error {
|
|||
return c.HandleSessionEnd(sessionID)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebJSException(e *JSException) error {
|
||||
return c.InsertWebErrorEvent(e.SessionID(), WrapJSException(e))
|
||||
}
|
||||
func (c *PGCache) InsertWebIntegrationEvent(e *IntegrationEvent) error {
|
||||
return c.InsertWebErrorEvent(e.SessionID(), WrapIntegrationEvent(e))
|
||||
}
|
||||
func (c *PGCache) InsertWebErrorEvent(sessionID uint64, e *ErrorEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -84,7 +92,7 @@ func (c *PGCache) InsertWebErrorEvent(sessionID uint64, e *ErrorEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertSessionReferrer(sessionID uint64, referrer string) error {
|
||||
_, err := c.GetSession(sessionID)
|
||||
_, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -92,11 +100,11 @@ func (c *PGCache) InsertSessionReferrer(sessionID uint64, referrer string) error
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebFetchEvent(sessionID uint64, e *FetchEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := c.GetProject(session.ProjectID)
|
||||
project, err := c.Cache.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -104,11 +112,11 @@ func (c *PGCache) InsertWebFetchEvent(sessionID uint64, e *FetchEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebGraphQLEvent(sessionID uint64, e *GraphQLEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := c.GetProject(session.ProjectID)
|
||||
project, err := c.Cache.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -116,7 +124,7 @@ func (c *PGCache) InsertWebGraphQLEvent(sessionID uint64, e *GraphQLEvent) error
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -124,7 +132,7 @@ func (c *PGCache) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebUserID(sessionID uint64, userID *UserID) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -132,7 +140,7 @@ func (c *PGCache) InsertWebUserID(sessionID uint64, userID *UserID) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *UserAnonymousID) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -140,7 +148,7 @@ func (c *PGCache) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *Us
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -148,7 +156,7 @@ func (c *PGCache) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -156,7 +164,7 @@ func (c *PGCache) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertWebInputEvent(sessionID uint64, e *InputEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
31
backend/pkg/db/cache/pg-cache.go
vendored
31
backend/pkg/db/cache/pg-cache.go
vendored
|
|
@ -1,37 +1,20 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
type ProjectMeta struct {
|
||||
*Project
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
// !TODO: remove old sessions by timeout to avoid memleaks
|
||||
|
||||
/*
|
||||
* Cache layer around the stateless PG adapter
|
||||
**/
|
||||
type PGCache struct {
|
||||
*postgres.Conn
|
||||
sessions map[uint64]*Session
|
||||
projects map[uint32]*ProjectMeta
|
||||
projectsByKeys sync.Map // map[string]*ProjectMeta
|
||||
projectExpirationTimeout time.Duration
|
||||
Cache Cache
|
||||
}
|
||||
|
||||
// TODO: create conn automatically
|
||||
func NewPGCache(pgConn *postgres.Conn, projectExpirationTimeoutMs int64) *PGCache {
|
||||
func NewPGCache(conn *postgres.Conn, projectExpirationTimeoutMs int64) *PGCache {
|
||||
// Create in-memory cache layer for sessions and projects
|
||||
c := NewCache(conn, projectExpirationTimeoutMs)
|
||||
// Return PG wrapper with integrated cache layer
|
||||
return &PGCache{
|
||||
Conn: pgConn,
|
||||
sessions: make(map[uint64]*Session),
|
||||
projects: make(map[uint32]*ProjectMeta),
|
||||
projectExpirationTimeout: time.Duration(1000 * projectExpirationTimeoutMs),
|
||||
Conn: conn,
|
||||
Cache: c,
|
||||
}
|
||||
}
|
||||
|
|
|
|||
8
backend/pkg/db/cache/project.go
vendored
8
backend/pkg/db/cache/project.go
vendored
|
|
@ -5,7 +5,7 @@ import (
|
|||
"time"
|
||||
)
|
||||
|
||||
func (c *PGCache) GetProjectByKey(projectKey string) (*Project, error) {
|
||||
func (c *cacheImpl) GetProjectByKey(projectKey string) (*Project, error) {
|
||||
pmInterface, found := c.projectsByKeys.Load(projectKey)
|
||||
if found {
|
||||
if pm, ok := pmInterface.(*ProjectMeta); ok {
|
||||
|
|
@ -15,7 +15,7 @@ func (c *PGCache) GetProjectByKey(projectKey string) (*Project, error) {
|
|||
}
|
||||
}
|
||||
|
||||
p, err := c.Conn.GetProjectByKey(projectKey)
|
||||
p, err := c.conn.GetProjectByKey(projectKey)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -24,12 +24,12 @@ func (c *PGCache) GetProjectByKey(projectKey string) (*Project, error) {
|
|||
return p, nil
|
||||
}
|
||||
|
||||
func (c *PGCache) GetProject(projectID uint32) (*Project, error) {
|
||||
func (c *cacheImpl) GetProject(projectID uint32) (*Project, error) {
|
||||
if c.projects[projectID] != nil &&
|
||||
time.Now().Before(c.projects[projectID].expirationTime) {
|
||||
return c.projects[projectID].Project, nil
|
||||
}
|
||||
p, err := c.Conn.GetProject(projectID)
|
||||
p, err := c.conn.GetProject(projectID)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
|
|||
46
backend/pkg/db/cache/session.go
vendored
46
backend/pkg/db/cache/session.go
vendored
|
|
@ -4,28 +4,48 @@ import (
|
|||
"errors"
|
||||
"github.com/jackc/pgx/v4"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
"time"
|
||||
)
|
||||
|
||||
var NilSessionInCacheError = errors.New("nil session in error")
|
||||
|
||||
func (c *PGCache) GetSession(sessionID uint64) (*Session, error) {
|
||||
if s, inCache := c.sessions[sessionID]; inCache {
|
||||
if s == nil {
|
||||
return s, NilSessionInCacheError
|
||||
}
|
||||
return s, nil
|
||||
func (c *cacheImpl) SetSession(sess *Session) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if meta, ok := c.sessions[sess.SessionID]; ok {
|
||||
meta.Session = sess
|
||||
meta.lastUse = time.Now()
|
||||
} else {
|
||||
c.sessions[sess.SessionID] = &SessionMeta{sess, time.Now()}
|
||||
}
|
||||
s, err := c.Conn.GetSession(sessionID)
|
||||
}
|
||||
|
||||
func (c *cacheImpl) HasSession(sessID uint64) bool {
|
||||
c.mutex.RLock()
|
||||
defer c.mutex.RUnlock()
|
||||
|
||||
sess, ok := c.sessions[sessID]
|
||||
return ok && sess.Session != nil
|
||||
}
|
||||
|
||||
func (c *cacheImpl) GetSession(sessionID uint64) (*Session, error) {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
if s, inCache := c.sessions[sessionID]; inCache {
|
||||
if s.Session == nil {
|
||||
return nil, NilSessionInCacheError
|
||||
}
|
||||
return s.Session, nil
|
||||
}
|
||||
s, err := c.conn.GetSession(sessionID)
|
||||
if err == pgx.ErrNoRows {
|
||||
c.sessions[sessionID] = nil
|
||||
c.sessions[sessionID] = &SessionMeta{nil, time.Now()}
|
||||
}
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
c.sessions[sessionID] = s
|
||||
c.sessions[sessionID] = &SessionMeta{s, time.Now()}
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (c *PGCache) DeleteSession(sessionID uint64) {
|
||||
delete(c.sessions, sessionID)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -55,8 +55,7 @@ func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics)
|
|||
}
|
||||
c, err := pgxpool.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
log.Fatalln("pgxpool.Connect Error")
|
||||
log.Fatalf("pgxpool.Connect err: %s", err)
|
||||
}
|
||||
conn := &Conn{
|
||||
batches: make(map[uint64]*pgx.Batch),
|
||||
|
|
|
|||
|
|
@ -1,6 +1,8 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
|
|
@ -33,3 +35,8 @@ func calcResponseTime(pe *messages.PageEvent) uint64 {
|
|||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
// TODO: review message indexing (it is better to have lower values in db for faster search (int4/int2))
|
||||
func truncSqIdx(messageID uint64) uint32 {
|
||||
return uint32(messageID % math.MaxInt32)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -82,6 +82,10 @@ func (conn *Conn) InsertSessionEnd(sessionID uint64, timestamp uint64) (uint64,
|
|||
return dur, nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertSessionEncryptionKey(sessionID uint64, key []byte) error {
|
||||
return conn.c.Exec(`UPDATE sessions SET file_key = $2 WHERE session_id = $1`, sessionID, string(key))
|
||||
}
|
||||
|
||||
func (conn *Conn) HandleSessionEnd(sessionID uint64) error {
|
||||
sqlRequest := `
|
||||
UPDATE sessions
|
||||
|
|
@ -91,22 +95,23 @@ func (conn *Conn) HandleSessionEnd(sessionID uint64) error {
|
|||
ELSE
|
||||
(COALESCE(ARRAY_AGG(DISTINCT ps.type), '{}'))::issue_type[]
|
||||
END
|
||||
FROM events_common.issues
|
||||
INNER JOIN issues AS ps USING (issue_id)
|
||||
WHERE session_id = $1)
|
||||
WHERE session_id = $1`
|
||||
FROM events_common.issues
|
||||
INNER JOIN issues AS ps USING (issue_id)
|
||||
WHERE session_id = $1)
|
||||
WHERE session_id = $1
|
||||
`
|
||||
return conn.c.Exec(sqlRequest, sessionID)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertRequest(sessionID uint64, timestamp uint64, index uint64, url string, duration uint64, success bool) error {
|
||||
if err := conn.requests.Append(sessionID, timestamp, getSqIdx(index), url, duration, success); err != nil {
|
||||
func (conn *Conn) InsertRequest(sessionID uint64, timestamp uint64, index uint32, url string, duration uint64, success bool) error {
|
||||
if err := conn.requests.Append(sessionID, timestamp, index, url, duration, success); err != nil {
|
||||
return fmt.Errorf("insert request in bulk err: %s", err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertCustomEvent(sessionID uint64, timestamp uint64, index uint64, name string, payload string) error {
|
||||
if err := conn.customEvents.Append(sessionID, timestamp, getSqIdx(index), name, payload); err != nil {
|
||||
func (conn *Conn) InsertCustomEvent(sessionID uint64, timestamp uint64, index uint32, name string, payload string) error {
|
||||
if err := conn.customEvents.Append(sessionID, timestamp, index, name, payload); err != nil {
|
||||
return fmt.Errorf("insert custom event in bulk err: %s", err)
|
||||
}
|
||||
return nil
|
||||
|
|
@ -160,20 +165,16 @@ func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messag
|
|||
if *payload == "" || *payload == "{}" {
|
||||
payload = nil
|
||||
}
|
||||
context := &e.Context
|
||||
if *context == "" || *context == "{}" {
|
||||
context = nil
|
||||
}
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO issues (
|
||||
project_id, issue_id, type, context_string, context
|
||||
project_id, issue_id, type, context_string
|
||||
) (SELECT
|
||||
project_id, $2, $3, $4, CAST($5 AS jsonb)
|
||||
project_id, $2, $3, $4
|
||||
FROM sessions
|
||||
WHERE session_id = $1
|
||||
)ON CONFLICT DO NOTHING`,
|
||||
sessionID, issueID, e.Type, e.ContextString, context,
|
||||
sessionID, issueID, e.Type, e.ContextString,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -184,7 +185,7 @@ func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messag
|
|||
$1, $2, $3, $4, CAST($5 AS jsonb)
|
||||
)`,
|
||||
sessionID, issueID, e.Timestamp,
|
||||
getSqIdx(e.MessageID),
|
||||
truncSqIdx(e.MessageID),
|
||||
payload,
|
||||
); err != nil {
|
||||
return err
|
||||
|
|
@ -204,7 +205,7 @@ func (conn *Conn) InsertIssueEvent(sessionID uint64, projectID uint32, e *messag
|
|||
VALUES
|
||||
($1, $2, $3, left($4, 2700), $5, 'error')
|
||||
`,
|
||||
sessionID, getSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload,
|
||||
sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.ContextString, e.Payload,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ import (
|
|||
)
|
||||
|
||||
func (conn *Conn) InsertIOSCustomEvent(sessionID uint64, e *messages.IOSCustomEvent) error {
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp, e.Index, e.Name, e.Payload)
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp, truncSqIdx(e.Index), e.Name, e.Payload)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "CUSTOM_IOS", e.Name)
|
||||
}
|
||||
|
|
@ -31,7 +31,7 @@ func (conn *Conn) InsertIOSUserAnonymousID(sessionID uint64, userAnonymousID *me
|
|||
}
|
||||
|
||||
func (conn *Conn) InsertIOSNetworkCall(sessionID uint64, e *messages.IOSNetworkCall) error {
|
||||
err := conn.InsertRequest(sessionID, e.Timestamp, e.Index, e.URL, e.Duration, e.Success)
|
||||
err := conn.InsertRequest(sessionID, e.Timestamp, truncSqIdx(e.Index), e.URL, e.Duration, e.Success)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "REQUEST_IOS", url.DiscardURLQuery(e.URL))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,11 +5,6 @@ import (
|
|||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertWebStatsLongtask(sessionID uint64, l *LongTask) error {
|
||||
return nil // Do we even use them?
|
||||
// conn.exec(``);
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebStatsPerformance(sessionID uint64, p *PerformanceTrackAggr) error {
|
||||
timestamp := (p.TimestampEnd + p.TimestampStart) / 2
|
||||
|
||||
|
|
@ -65,7 +60,7 @@ func (conn *Conn) InsertWebStatsResourceEvent(sessionID uint64, e *ResourceEvent
|
|||
urlQuery := url.DiscardURLQuery(e.URL)
|
||||
urlMethod := url.EnsureMethod(e.Method)
|
||||
conn.batchQueue(sessionID, sqlRequest,
|
||||
sessionID, e.Timestamp, e.MessageID,
|
||||
sessionID, e.Timestamp, truncSqIdx(e.MessageID),
|
||||
e.Type,
|
||||
e.URL, host, urlQuery,
|
||||
e.Success, e.Status,
|
||||
|
|
|
|||
|
|
@ -2,21 +2,15 @@ package postgres
|
|||
|
||||
import (
|
||||
"log"
|
||||
"math"
|
||||
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
// TODO: change messages and replace everywhere to e.Index
|
||||
func getSqIdx(messageID uint64) uint {
|
||||
return uint(messageID % math.MaxInt32)
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebCustomEvent(sessionID uint64, projectID uint32, e *CustomEvent) error {
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp,
|
||||
e.MessageID,
|
||||
truncSqIdx(e.MessageID),
|
||||
e.Name, e.Payload)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, projectID, "CUSTOM", e.Name)
|
||||
|
|
@ -46,7 +40,7 @@ func (conn *Conn) InsertWebPageEvent(sessionID uint64, projectID uint32, e *Page
|
|||
return err
|
||||
}
|
||||
// base_path is deprecated
|
||||
if err = conn.webPageEvents.Append(sessionID, e.MessageID, e.Timestamp, e.Referrer, url.DiscardURLQuery(e.Referrer),
|
||||
if err = conn.webPageEvents.Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Referrer, url.DiscardURLQuery(e.Referrer),
|
||||
host, path, query, e.DomContentLoadedEventEnd, e.LoadEventEnd, e.ResponseEnd, e.FirstPaint, e.FirstContentfulPaint,
|
||||
e.SpeedIndex, e.VisuallyComplete, e.TimeToInteractive, calcResponseTime(e), calcDomBuildingTime(e)); err != nil {
|
||||
log.Printf("insert web page event in bulk err: %s", err)
|
||||
|
|
@ -69,7 +63,7 @@ func (conn *Conn) InsertWebClickEvent(sessionID uint64, projectID uint32, e *Cli
|
|||
WHERE session_id = $1 AND timestamp <= $3 ORDER BY timestamp DESC LIMIT 1
|
||||
)
|
||||
`
|
||||
conn.batchQueue(sessionID, sqlRequest, sessionID, e.MessageID, e.Timestamp, e.Label, e.Selector)
|
||||
conn.batchQueue(sessionID, sqlRequest, sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Label, e.Selector)
|
||||
// Accumulate session updates and exec inside batch with another sql commands
|
||||
conn.updateSessionEvents(sessionID, 1, 0)
|
||||
// Add new value set to autocomplete bulk
|
||||
|
|
@ -85,7 +79,7 @@ func (conn *Conn) InsertWebInputEvent(sessionID uint64, projectID uint32, e *Inp
|
|||
if e.ValueMasked {
|
||||
value = nil
|
||||
}
|
||||
if err := conn.webInputEvents.Append(sessionID, e.MessageID, e.Timestamp, value, e.Label); err != nil {
|
||||
if err := conn.webInputEvents.Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, value, e.Label); err != nil {
|
||||
log.Printf("insert web input event err: %s", err)
|
||||
}
|
||||
conn.updateSessionEvents(sessionID, 1, 0)
|
||||
|
|
@ -93,7 +87,7 @@ func (conn *Conn) InsertWebInputEvent(sessionID uint64, projectID uint32, e *Inp
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *ErrorEvent) (err error) {
|
||||
func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *types.ErrorEvent) (err error) {
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -105,7 +99,7 @@ func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *Err
|
|||
}
|
||||
}
|
||||
}()
|
||||
errorID := hashid.WebErrorID(projectID, e)
|
||||
errorID := e.ID(projectID)
|
||||
|
||||
if err = tx.exec(`
|
||||
INSERT INTO errors
|
||||
|
|
@ -123,7 +117,7 @@ func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *Err
|
|||
VALUES
|
||||
($1, $2, $3, $4)
|
||||
`,
|
||||
sessionID, e.MessageID, e.Timestamp, errorID,
|
||||
sessionID, truncSqIdx(e.MessageID), e.Timestamp, errorID,
|
||||
); err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -135,6 +129,18 @@ func (conn *Conn) InsertWebErrorEvent(sessionID uint64, projectID uint32, e *Err
|
|||
return err
|
||||
}
|
||||
err = tx.commit()
|
||||
|
||||
// Insert tags
|
||||
sqlRequest := `
|
||||
INSERT INTO public.errors_tags (
|
||||
session_id, message_id, error_id, key, value
|
||||
) VALUES (
|
||||
$1, $2, $3, $4, $5
|
||||
) ON CONFLICT DO NOTHING`
|
||||
for key, value := range e.Tags {
|
||||
conn.batchQueue(sessionID, sqlRequest, sessionID, truncSqIdx(e.MessageID), errorID, key, value)
|
||||
}
|
||||
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -163,7 +169,7 @@ func (conn *Conn) InsertWebFetchEvent(sessionID uint64, projectID uint32, savePa
|
|||
$12, $13
|
||||
) ON CONFLICT DO NOTHING`
|
||||
conn.batchQueue(sessionID, sqlRequest,
|
||||
sessionID, e.Timestamp, getSqIdx(e.MessageID),
|
||||
sessionID, e.Timestamp, truncSqIdx(e.MessageID),
|
||||
e.URL, host, path, query,
|
||||
request, response, e.Status, url.EnsureMethod(e.Method),
|
||||
e.Duration, e.Status < 400,
|
||||
|
|
@ -181,7 +187,7 @@ func (conn *Conn) InsertWebGraphQLEvent(sessionID uint64, projectID uint32, save
|
|||
request = &e.Variables
|
||||
response = &e.Response
|
||||
}
|
||||
if err := conn.webGraphQLEvents.Append(sessionID, e.Timestamp, e.MessageID, e.OperationName, request, response); err != nil {
|
||||
if err := conn.webGraphQLEvents.Append(sessionID, e.Timestamp, truncSqIdx(e.MessageID), e.OperationName, request, response); err != nil {
|
||||
log.Printf("insert web graphQL event err: %s", err)
|
||||
}
|
||||
conn.insertAutocompleteValue(sessionID, projectID, "GRAPHQL", e.OperationName)
|
||||
|
|
|
|||
118
backend/pkg/db/types/error-event.go
Normal file
118
backend/pkg/db/types/error-event.go
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"encoding/hex"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"hash/fnv"
|
||||
"log"
|
||||
"strconv"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type ErrorEvent struct {
|
||||
MessageID uint64
|
||||
Timestamp uint64
|
||||
Source string
|
||||
Name string
|
||||
Message string
|
||||
Payload string
|
||||
Tags map[string]*string
|
||||
}
|
||||
|
||||
func unquote(s string) string {
|
||||
if s[0] == '"' {
|
||||
return s[1 : len(s)-1]
|
||||
}
|
||||
return s
|
||||
}
|
||||
func parseTags(tagsJSON string) (tags map[string]*string, err error) {
|
||||
if len(tagsJSON) == 0 {
|
||||
return nil, fmt.Errorf("empty tags")
|
||||
}
|
||||
if tagsJSON[0] == '[' {
|
||||
var tagsArr []json.RawMessage
|
||||
if err = json.Unmarshal([]byte(tagsJSON), &tagsArr); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tags = make(map[string]*string)
|
||||
for _, keyBts := range tagsArr {
|
||||
tags[unquote(string(keyBts))] = nil
|
||||
}
|
||||
} else if tagsJSON[0] == '{' {
|
||||
var tagsObj map[string]json.RawMessage
|
||||
if err = json.Unmarshal([]byte(tagsJSON), &tagsObj); err != nil {
|
||||
return
|
||||
}
|
||||
|
||||
tags = make(map[string]*string)
|
||||
for key, valBts := range tagsObj {
|
||||
val := unquote(string(valBts))
|
||||
tags[key] = &val
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func WrapJSException(m *JSException) *ErrorEvent {
|
||||
meta, err := parseTags(m.Metadata)
|
||||
if err != nil {
|
||||
log.Printf("Error on parsing Exception metadata: %v", err)
|
||||
}
|
||||
return &ErrorEvent{
|
||||
MessageID: m.Meta().Index,
|
||||
Timestamp: uint64(m.Meta().Timestamp),
|
||||
Source: "js_exception",
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
Tags: meta,
|
||||
}
|
||||
}
|
||||
|
||||
func WrapIntegrationEvent(m *IntegrationEvent) *ErrorEvent {
|
||||
return &ErrorEvent{
|
||||
MessageID: m.Meta().Index, // This will be always 0 here since it's coming from backend TODO: find another way to index
|
||||
Timestamp: m.Timestamp,
|
||||
Source: m.Source,
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
}
|
||||
}
|
||||
|
||||
type stackFrame struct {
|
||||
FileName string `json:"fileName"`
|
||||
LineNo int `json:"lineNumber"`
|
||||
ColNo int `json:"columnNumber"`
|
||||
}
|
||||
|
||||
func parseFirstFrame(payload string) (*stackFrame, error) {
|
||||
var frames []*stackFrame
|
||||
if err := json.Unmarshal([]byte(payload), &frames); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if len(frames) == 0 {
|
||||
return nil, nil
|
||||
}
|
||||
return frames[0], nil
|
||||
}
|
||||
|
||||
func (e *ErrorEvent) ID(projectID uint32) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(e.Source))
|
||||
hash.Write([]byte(e.Name))
|
||||
hash.Write([]byte(e.Message))
|
||||
frame, err := parseFirstFrame(e.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Can't parse stackframe ((( %v ))): %v", e.Payload, err)
|
||||
}
|
||||
if frame != nil {
|
||||
hash.Write([]byte(frame.FileName))
|
||||
hash.Write([]byte(strconv.Itoa(frame.LineNo)))
|
||||
hash.Write([]byte(strconv.Itoa(frame.ColNo)))
|
||||
}
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
|
@ -20,7 +20,7 @@ type Session struct {
|
|||
IssueTypes []string
|
||||
IssueScore int
|
||||
|
||||
UserID *string // pointer??
|
||||
UserID *string
|
||||
UserAnonymousID *string
|
||||
Metadata1 *string
|
||||
Metadata2 *string
|
||||
|
|
|
|||
|
|
@ -56,15 +56,6 @@ func (b *EventMapper) Handle(message Message, messageID uint64, timestamp uint64
|
|||
Selector: msg.Selector,
|
||||
}
|
||||
}
|
||||
case *JSException:
|
||||
return &ErrorEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Source: "js_exception",
|
||||
Name: msg.Name,
|
||||
Message: msg.Message,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *ResourceTiming:
|
||||
return &ResourceEvent{
|
||||
MessageID: messageID,
|
||||
|
|
|
|||
|
|
@ -23,12 +23,3 @@ func IOSCrashID(projectID uint32, crash *messages.IOSCrash) string {
|
|||
hash.Write([]byte(crash.Stacktrace))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func WebErrorID(projectID uint32, errorEvent *messages.ErrorEvent) string {
|
||||
hash := fnv.New128a()
|
||||
hash.Write([]byte(errorEvent.Source))
|
||||
hash.Write([]byte(errorEvent.Name))
|
||||
hash.Write([]byte(errorEvent.Message))
|
||||
hash.Write([]byte(errorEvent.Payload))
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
|
|
|||
|
|
@ -5,8 +5,7 @@ import (
|
|||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
//"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type partitionStats struct {
|
||||
|
|
@ -18,15 +17,15 @@ type partitionStats struct {
|
|||
}
|
||||
|
||||
// Update partition statistic
|
||||
func (prt *partitionStats) update(m *types.Meta) {
|
||||
if prt.maxts < m.Timestamp {
|
||||
prt.maxts = m.Timestamp
|
||||
func (prt *partitionStats) update(m *messages.BatchInfo) {
|
||||
if prt.maxts < m.Timestamp() {
|
||||
prt.maxts = m.Timestamp()
|
||||
}
|
||||
if prt.mints > m.Timestamp || prt.mints == 0 {
|
||||
prt.mints = m.Timestamp
|
||||
if prt.mints > m.Timestamp() || prt.mints == 0 {
|
||||
prt.mints = m.Timestamp()
|
||||
}
|
||||
prt.lastts = m.Timestamp
|
||||
prt.lastID = m.ID
|
||||
prt.lastts = m.Timestamp()
|
||||
prt.lastID = m.ID()
|
||||
prt.count += 1
|
||||
}
|
||||
|
||||
|
|
@ -35,6 +34,10 @@ type queueStats struct {
|
|||
tick <-chan time.Time
|
||||
}
|
||||
|
||||
type QueueStats interface {
|
||||
Collect(msg messages.Message)
|
||||
}
|
||||
|
||||
func NewQueueStats(sec int) *queueStats {
|
||||
return &queueStats{
|
||||
prts: make(map[int32]*partitionStats),
|
||||
|
|
@ -43,14 +46,14 @@ func NewQueueStats(sec int) *queueStats {
|
|||
}
|
||||
|
||||
// Collect writes new data to partition statistic
|
||||
func (qs *queueStats) Collect(sessionID uint64, m *types.Meta) {
|
||||
prti := int32(sessionID % 16) // TODO use GetKeyPartition from kafka/key.go
|
||||
func (qs *queueStats) Collect(msg messages.Message) {
|
||||
prti := int32(msg.SessionID() % 16) // TODO use GetKeyPartition from kafka/key.go
|
||||
prt, ok := qs.prts[prti]
|
||||
if !ok {
|
||||
qs.prts[prti] = &partitionStats{}
|
||||
prt = qs.prts[prti]
|
||||
}
|
||||
prt.update(m)
|
||||
prt.update(msg.Meta().Batch())
|
||||
|
||||
select {
|
||||
case <-qs.tick:
|
||||
|
|
|
|||
|
|
@ -1,197 +0,0 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"io"
|
||||
"log"
|
||||
"strings"
|
||||
)
|
||||
|
||||
type Iterator interface {
|
||||
Next() bool // Return true if we have next message
|
||||
Type() int // Return type of the next message
|
||||
Message() Message // Return raw or decoded message
|
||||
Close()
|
||||
}
|
||||
|
||||
type iteratorImpl struct {
|
||||
data *bytes.Reader
|
||||
index uint64
|
||||
timestamp int64
|
||||
version uint64
|
||||
msgType uint64
|
||||
msgSize uint64
|
||||
canSkip bool
|
||||
msg Message
|
||||
url string
|
||||
}
|
||||
|
||||
func NewIterator(data []byte) Iterator {
|
||||
return &iteratorImpl{
|
||||
data: bytes.NewReader(data),
|
||||
}
|
||||
}
|
||||
|
||||
func (i *iteratorImpl) Next() bool {
|
||||
if i.canSkip {
|
||||
if _, err := i.data.Seek(int64(i.msgSize), io.SeekCurrent); err != nil {
|
||||
log.Printf("seek err: %s", err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
i.canSkip = false
|
||||
|
||||
var err error
|
||||
i.msgType, err = ReadUint(i.data)
|
||||
if err != nil {
|
||||
if err == io.EOF {
|
||||
return false
|
||||
}
|
||||
log.Printf("can't read message type: %s", err)
|
||||
return false
|
||||
}
|
||||
|
||||
if i.version > 0 && messageHasSize(i.msgType) {
|
||||
// Read message size if it is a new protocol version
|
||||
i.msgSize, err = ReadSize(i.data)
|
||||
if err != nil {
|
||||
log.Printf("can't read message size: %s", err)
|
||||
return false
|
||||
}
|
||||
i.msg = &RawMessage{
|
||||
tp: i.msgType,
|
||||
size: i.msgSize,
|
||||
meta: &message{},
|
||||
reader: i.data,
|
||||
skipped: &i.canSkip,
|
||||
}
|
||||
i.canSkip = true
|
||||
} else {
|
||||
i.msg, err = ReadMessage(i.msgType, i.data)
|
||||
if err == io.EOF {
|
||||
return false
|
||||
} else if err != nil {
|
||||
if strings.HasPrefix(err.Error(), "Unknown message code:") {
|
||||
code := strings.TrimPrefix(err.Error(), "Unknown message code: ")
|
||||
i.msg, err = DecodeExtraMessage(code, i.data)
|
||||
if err != nil {
|
||||
log.Printf("can't decode msg: %s", err)
|
||||
return false
|
||||
}
|
||||
} else {
|
||||
log.Printf("Batch Message decoding error on message with index %v, err: %s", i.index, err)
|
||||
return false
|
||||
}
|
||||
}
|
||||
i.msg = transformDeprecated(i.msg)
|
||||
}
|
||||
|
||||
// Process meta information
|
||||
isBatchMeta := false
|
||||
switch i.msgType {
|
||||
case MsgBatchMetadata:
|
||||
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
log.Printf("Batch Metadata found at the end of the batch")
|
||||
return false
|
||||
}
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*BatchMetadata)
|
||||
i.index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.timestamp = m.Timestamp
|
||||
i.version = m.Version
|
||||
i.url = m.Url
|
||||
isBatchMeta = true
|
||||
if i.version > 1 {
|
||||
log.Printf("incorrect batch version, skip current batch")
|
||||
return false
|
||||
}
|
||||
case MsgBatchMeta: // Is not required to be present in batch since IOS doesn't have it (though we might change it)
|
||||
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
log.Printf("Batch Meta found at the end of the batch")
|
||||
return false
|
||||
}
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*BatchMeta)
|
||||
i.index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.timestamp = m.Timestamp
|
||||
isBatchMeta = true
|
||||
// continue readLoop
|
||||
case MsgIOSBatchMeta:
|
||||
if i.index != 0 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
log.Printf("Batch Meta found at the end of the batch")
|
||||
return false
|
||||
}
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*IOSBatchMeta)
|
||||
i.index = m.FirstIndex
|
||||
i.timestamp = int64(m.Timestamp)
|
||||
isBatchMeta = true
|
||||
// continue readLoop
|
||||
case MsgTimestamp:
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*Timestamp)
|
||||
i.timestamp = int64(m.Timestamp)
|
||||
// No skipping here for making it easy to encode back the same sequence of message
|
||||
// continue readLoop
|
||||
case MsgSessionStart:
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*SessionStart)
|
||||
i.timestamp = int64(m.Timestamp)
|
||||
case MsgSessionEnd:
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*SessionEnd)
|
||||
i.timestamp = int64(m.Timestamp)
|
||||
case MsgSetPageLocation:
|
||||
msg := i.msg.Decode()
|
||||
if msg == nil {
|
||||
return false
|
||||
}
|
||||
m := msg.(*SetPageLocation)
|
||||
i.url = m.URL
|
||||
}
|
||||
i.msg.Meta().Index = i.index
|
||||
i.msg.Meta().Timestamp = i.timestamp
|
||||
i.msg.Meta().Url = i.url
|
||||
|
||||
if !isBatchMeta { // Without that indexes will be unique anyway, though shifted by 1 because BatchMeta is not counted in tracker
|
||||
i.index++
|
||||
}
|
||||
return true
|
||||
}
|
||||
|
||||
func (i *iteratorImpl) Type() int {
|
||||
return int(i.msgType)
|
||||
}
|
||||
|
||||
func (i *iteratorImpl) Message() Message {
|
||||
return i.msg
|
||||
}
|
||||
|
||||
func (i *iteratorImpl) Close() {
|
||||
_, err := i.data.Seek(0, io.SeekEnd)
|
||||
if err != nil {
|
||||
log.Printf("can't set seek pointer at the end: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func messageHasSize(msgType uint64) bool {
|
||||
return !(msgType == 80 || msgType == 81 || msgType == 82)
|
||||
}
|
||||
|
|
@ -1,56 +0,0 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"io"
|
||||
)
|
||||
|
||||
type SessionSearch struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Partition uint64
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) Encode() []byte {
|
||||
buf := make([]byte, 11)
|
||||
buf[0] = 127
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Partition, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) TypeID() int {
|
||||
return 127
|
||||
}
|
||||
|
||||
func DecodeExtraMessage(code string, reader io.Reader) (Message, error) {
|
||||
var err error
|
||||
if code != "127" {
|
||||
return nil, fmt.Errorf("unknown message code: %s", code)
|
||||
}
|
||||
msg := &SessionSearch{}
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil {
|
||||
return nil, fmt.Errorf("can't read message timestamp: %s", err)
|
||||
}
|
||||
if msg.Partition, err = ReadUint(reader); err != nil {
|
||||
return nil, fmt.Errorf("can't read last partition: %s", err)
|
||||
}
|
||||
return msg, nil
|
||||
}
|
||||
|
|
@ -1,5 +0,0 @@
|
|||
package messages
|
||||
|
||||
func Encode(msg Message) []byte {
|
||||
return msg.Encode()
|
||||
}
|
||||
|
|
@ -2,9 +2,13 @@
|
|||
package messages
|
||||
|
||||
func IsReplayerType(id int) bool {
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 22 == id || 37 == id || 38 == id || 39 == id || 40 == id || 41 == id || 44 == id || 45 == id || 46 == id || 47 == id || 48 == id || 49 == id || 54 == id || 55 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 79 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
|
||||
return 80 != id && 81 != id && 82 != id && 1 != id && 3 != id && 17 != id && 23 != id && 24 != id && 25 != id && 26 != id && 27 != id && 28 != id && 29 != id && 30 != id && 31 != id && 32 != id && 33 != id && 35 != id && 36 != id && 42 != id && 43 != id && 50 != id && 51 != id && 52 != id && 53 != id && 56 != id && 62 != id && 63 != id && 64 != id && 66 != id && 78 != id && 126 != id && 127 != id && 107 != id && 91 != id && 92 != id && 94 != id && 95 != id && 97 != id && 98 != id && 99 != id && 101 != id && 104 != id && 110 != id && 111 != id
|
||||
}
|
||||
|
||||
func IsIOSType(id int) bool {
|
||||
return 107 == id || 90 == id || 91 == id || 92 == id || 93 == id || 94 == id || 95 == id || 96 == id || 97 == id || 98 == id || 99 == id || 100 == id || 101 == id || 102 == id || 103 == id || 104 == id || 105 == id || 110 == id || 111 == id
|
||||
}
|
||||
|
||||
func IsDOMType(id int) bool {
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 37 == id || 38 == id || 49 == id || 54 == id || 55 == id || 57 == id || 58 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
|
||||
}
|
||||
|
|
|
|||
215
backend/pkg/messages/iterator.go
Normal file
215
backend/pkg/messages/iterator.go
Normal file
|
|
@ -0,0 +1,215 @@
|
|||
package messages
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"io"
|
||||
"log"
|
||||
)
|
||||
|
||||
// MessageHandler processes one message using service logic
|
||||
type MessageHandler func(Message)
|
||||
|
||||
// MessageIterator iterates by all messages in batch
|
||||
type MessageIterator interface {
|
||||
Iterate(batchData []byte, batchInfo *BatchInfo)
|
||||
}
|
||||
|
||||
type messageIteratorImpl struct {
|
||||
filter map[int]struct{}
|
||||
preFilter map[int]struct{}
|
||||
handler MessageHandler
|
||||
autoDecode bool
|
||||
version uint64
|
||||
size uint64
|
||||
canSkip bool
|
||||
broken bool
|
||||
messageInfo *message
|
||||
batchInfo *BatchInfo
|
||||
}
|
||||
|
||||
func NewMessageIterator(messageHandler MessageHandler, messageFilter []int, autoDecode bool) MessageIterator {
|
||||
iter := &messageIteratorImpl{handler: messageHandler, autoDecode: autoDecode}
|
||||
if len(messageFilter) != 0 {
|
||||
filter := make(map[int]struct{}, len(messageFilter))
|
||||
for _, msgType := range messageFilter {
|
||||
filter[msgType] = struct{}{}
|
||||
}
|
||||
iter.filter = filter
|
||||
}
|
||||
iter.preFilter = map[int]struct{}{
|
||||
MsgBatchMetadata: {}, MsgBatchMeta: {}, MsgTimestamp: {},
|
||||
MsgSessionStart: {}, MsgSessionEnd: {}, MsgSetPageLocation: {},
|
||||
MsgSessionEndDeprecated: {}}
|
||||
return iter
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) prepareVars(batchInfo *BatchInfo) {
|
||||
i.batchInfo = batchInfo
|
||||
i.messageInfo = &message{batch: batchInfo}
|
||||
i.version = 0
|
||||
i.canSkip = false
|
||||
i.broken = false
|
||||
i.size = 0
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
// Prepare iterator before processing messages in batch
|
||||
i.prepareVars(batchInfo)
|
||||
|
||||
// Initialize batch reader
|
||||
reader := bytes.NewReader(batchData)
|
||||
|
||||
// Process until end of batch or parsing error
|
||||
for {
|
||||
// Increase message index (can be overwritten by batch info message)
|
||||
i.messageInfo.Index++
|
||||
|
||||
if i.broken {
|
||||
log.Printf("skipping broken batch, info: %s", i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
|
||||
if i.canSkip {
|
||||
if _, err := reader.Seek(int64(i.size), io.SeekCurrent); err != nil {
|
||||
log.Printf("can't skip message: %s, info: %s", err, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
i.canSkip = false
|
||||
|
||||
// Read message type
|
||||
msgType, err := ReadUint(reader)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("can't read message type: %s, info: %s", err, i.batchInfo.Info())
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var msg Message
|
||||
// Read message body (and decode if protocol version less than 1)
|
||||
if i.version > 0 && messageHasSize(msgType) {
|
||||
// Read message size if it is a new protocol version
|
||||
i.size, err = ReadSize(reader)
|
||||
if err != nil {
|
||||
log.Printf("can't read message size: %s, info: %s", err, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = &RawMessage{
|
||||
tp: msgType,
|
||||
size: i.size,
|
||||
reader: reader,
|
||||
skipped: &i.canSkip,
|
||||
broken: &i.broken,
|
||||
meta: i.messageInfo,
|
||||
}
|
||||
i.canSkip = true
|
||||
} else {
|
||||
msg, err = ReadMessage(msgType, reader)
|
||||
if err != nil {
|
||||
if err != io.EOF {
|
||||
log.Printf("can't read message body: %s, info: %s", err, i.batchInfo.Info())
|
||||
}
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
}
|
||||
|
||||
// Preprocess "system" messages
|
||||
if _, ok := i.preFilter[msg.TypeID()]; ok {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
if err := i.preprocessing(msg); err != nil {
|
||||
log.Printf("message preprocessing err: %s", err)
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Skip messages we don't have in filter
|
||||
if i.filter != nil {
|
||||
if _, ok := i.filter[msg.TypeID()]; !ok {
|
||||
continue
|
||||
}
|
||||
}
|
||||
|
||||
if i.autoDecode {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
||||
// Set meta information for message
|
||||
msg.Meta().SetMeta(i.messageInfo)
|
||||
|
||||
// Process message
|
||||
i.handler(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) zeroTsLog(msgType string) {
|
||||
log.Printf("zero timestamp in %s, info: %s", msgType, i.batchInfo.Info())
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
case *BatchMetadata:
|
||||
if i.messageInfo.Index > 1 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
return fmt.Errorf("batchMetadata found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
if m.Version > 1 {
|
||||
return fmt.Errorf("incorrect batch version: %d, skip current batch, info: %s", i.version, i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMetadata")
|
||||
}
|
||||
i.messageInfo.Url = m.Url
|
||||
i.version = m.Version
|
||||
i.batchInfo.version = m.Version
|
||||
|
||||
case *BatchMeta: // Is not required to be present in batch since IOS doesn't have it (though we might change it)
|
||||
if i.messageInfo.Index > 1 { // Might be several 0-0 BatchMeta in a row without an error though
|
||||
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMeta")
|
||||
}
|
||||
|
||||
case *Timestamp:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("Timestamp")
|
||||
}
|
||||
|
||||
case *SessionStart:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionStart")
|
||||
log.Printf("zero session start, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
m.ProjectID, m.UserAgent, m.TrackerVersion, i.batchInfo.Info())
|
||||
}
|
||||
|
||||
case *SessionEnd:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionEnd")
|
||||
}
|
||||
|
||||
case *SetPageLocation:
|
||||
i.messageInfo.Url = m.URL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func messageHasSize(msgType uint64) bool {
|
||||
return !(msgType == 80 || msgType == 81 || msgType == 82)
|
||||
}
|
||||
|
|
@ -2,13 +2,18 @@ package messages
|
|||
|
||||
func transformDeprecated(msg Message) Message {
|
||||
switch m := msg.(type) {
|
||||
case *MouseClickDepricated:
|
||||
return &MouseClick{
|
||||
ID: m.ID,
|
||||
HesitationTime: m.HesitationTime,
|
||||
Label: m.Label,
|
||||
case *JSExceptionDeprecated:
|
||||
return &JSException{
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
Metadata: "{}",
|
||||
}
|
||||
case *SessionEndDeprecated:
|
||||
return &SessionEnd{
|
||||
Timestamp: m.Timestamp,
|
||||
EncryptionKey: "",
|
||||
}
|
||||
default:
|
||||
return msg
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,20 +1,6 @@
|
|||
package messages
|
||||
|
||||
type message struct {
|
||||
Timestamp int64
|
||||
Index uint64
|
||||
Url string
|
||||
}
|
||||
|
||||
func (m *message) Meta() *message {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *message) SetMeta(origin *message) {
|
||||
m.Timestamp = origin.Timestamp
|
||||
m.Index = origin.Index
|
||||
m.Url = origin.Url
|
||||
}
|
||||
import "fmt"
|
||||
|
||||
type Message interface {
|
||||
Encode() []byte
|
||||
|
|
@ -22,4 +8,74 @@ type Message interface {
|
|||
Decode() Message
|
||||
TypeID() int
|
||||
Meta() *message
|
||||
SessionID() uint64
|
||||
}
|
||||
|
||||
// BatchInfo represents common information for all messages inside data batch
|
||||
type BatchInfo struct {
|
||||
sessionID uint64
|
||||
id uint64
|
||||
topic string
|
||||
partition uint64
|
||||
timestamp int64
|
||||
version uint64
|
||||
}
|
||||
|
||||
func NewBatchInfo(sessID uint64, topic string, id, partition uint64, ts int64) *BatchInfo {
|
||||
return &BatchInfo{
|
||||
sessionID: sessID,
|
||||
id: id,
|
||||
topic: topic,
|
||||
partition: partition,
|
||||
timestamp: ts,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *BatchInfo) SessionID() uint64 {
|
||||
return b.sessionID
|
||||
}
|
||||
|
||||
func (b *BatchInfo) ID() uint64 {
|
||||
return b.id
|
||||
}
|
||||
|
||||
func (b *BatchInfo) Timestamp() int64 {
|
||||
return b.timestamp
|
||||
}
|
||||
|
||||
func (b *BatchInfo) Info() string {
|
||||
return fmt.Sprintf("session: %d, partition: %d, offset: %d, ver: %d", b.sessionID, b.partition, b.id, b.version)
|
||||
}
|
||||
|
||||
type message struct {
|
||||
Timestamp int64
|
||||
Index uint64
|
||||
Url string
|
||||
batch *BatchInfo
|
||||
}
|
||||
|
||||
func (m *message) Batch() *BatchInfo {
|
||||
return m.batch
|
||||
}
|
||||
|
||||
func (m *message) Meta() *message {
|
||||
return m
|
||||
}
|
||||
|
||||
func (m *message) SetMeta(origin *message) {
|
||||
m.batch = origin.batch
|
||||
m.Timestamp = origin.Timestamp
|
||||
m.Index = origin.Index
|
||||
m.Url = origin.Url
|
||||
}
|
||||
|
||||
func (m *message) SessionID() uint64 {
|
||||
return m.batch.sessionID
|
||||
}
|
||||
|
||||
func (m *message) SetSessionID(sessID uint64) {
|
||||
if m.batch == nil {
|
||||
m.batch = &BatchInfo{}
|
||||
}
|
||||
m.batch.sessionID = sessID
|
||||
}
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ const (
|
|||
|
||||
MsgSessionStart = 1
|
||||
|
||||
MsgSessionEnd = 3
|
||||
MsgSessionEndDeprecated = 3
|
||||
|
||||
MsgSetPageLocation = 4
|
||||
|
||||
|
|
@ -50,15 +50,13 @@ const (
|
|||
|
||||
MsgMouseMove = 20
|
||||
|
||||
MsgMouseClickDepricated = 21
|
||||
|
||||
MsgConsoleLog = 22
|
||||
|
||||
MsgPageLoadTiming = 23
|
||||
|
||||
MsgPageRenderTiming = 24
|
||||
|
||||
MsgJSException = 25
|
||||
MsgJSExceptionDeprecated = 25
|
||||
|
||||
MsgIntegrationEvent = 26
|
||||
|
||||
|
|
@ -76,8 +74,6 @@ const (
|
|||
|
||||
MsgClickEvent = 33
|
||||
|
||||
MsgErrorEvent = 34
|
||||
|
||||
MsgResourceEvent = 35
|
||||
|
||||
MsgCustomEvent = 36
|
||||
|
|
@ -122,6 +118,10 @@ const (
|
|||
|
||||
MsgPerformanceTrackAggr = 56
|
||||
|
||||
MsgLoadFontFace = 57
|
||||
|
||||
MsgSetNodeFocus = 58
|
||||
|
||||
MsgLongTask = 59
|
||||
|
||||
MsgSetNodeAttributeURLBased = 60
|
||||
|
|
@ -158,6 +158,12 @@ const (
|
|||
|
||||
MsgZustand = 79
|
||||
|
||||
MsgJSException = 78
|
||||
|
||||
MsgSessionEnd = 126
|
||||
|
||||
MsgSessionSearch = 127
|
||||
|
||||
MsgIOSBatchMeta = 107
|
||||
|
||||
MsgIOSSessionStart = 90
|
||||
|
|
@ -401,12 +407,12 @@ func (msg *SessionStart) TypeID() int {
|
|||
return 1
|
||||
}
|
||||
|
||||
type SessionEnd struct {
|
||||
type SessionEndDeprecated struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) Encode() []byte {
|
||||
func (msg *SessionEndDeprecated) Encode() []byte {
|
||||
buf := make([]byte, 11)
|
||||
buf[0] = 3
|
||||
p := 1
|
||||
|
|
@ -414,7 +420,7 @@ func (msg *SessionEnd) Encode() []byte {
|
|||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) EncodeWithIndex() []byte {
|
||||
func (msg *SessionEndDeprecated) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
|
|
@ -425,11 +431,11 @@ func (msg *SessionEnd) EncodeWithIndex() []byte {
|
|||
return data
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) Decode() Message {
|
||||
func (msg *SessionEndDeprecated) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) TypeID() int {
|
||||
func (msg *SessionEndDeprecated) TypeID() int {
|
||||
return 3
|
||||
}
|
||||
|
||||
|
|
@ -1024,42 +1030,6 @@ func (msg *MouseMove) TypeID() int {
|
|||
return 20
|
||||
}
|
||||
|
||||
type MouseClickDepricated struct {
|
||||
message
|
||||
ID uint64
|
||||
HesitationTime uint64
|
||||
Label string
|
||||
}
|
||||
|
||||
func (msg *MouseClickDepricated) Encode() []byte {
|
||||
buf := make([]byte, 31+len(msg.Label))
|
||||
buf[0] = 21
|
||||
p := 1
|
||||
p = WriteUint(msg.ID, buf, p)
|
||||
p = WriteUint(msg.HesitationTime, buf, p)
|
||||
p = WriteString(msg.Label, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *MouseClickDepricated) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *MouseClickDepricated) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *MouseClickDepricated) TypeID() int {
|
||||
return 21
|
||||
}
|
||||
|
||||
type ConsoleLog struct {
|
||||
message
|
||||
Level string
|
||||
|
|
@ -1178,14 +1148,14 @@ func (msg *PageRenderTiming) TypeID() int {
|
|||
return 24
|
||||
}
|
||||
|
||||
type JSException struct {
|
||||
type JSExceptionDeprecated struct {
|
||||
message
|
||||
Name string
|
||||
Message string
|
||||
Payload string
|
||||
}
|
||||
|
||||
func (msg *JSException) Encode() []byte {
|
||||
func (msg *JSExceptionDeprecated) Encode() []byte {
|
||||
buf := make([]byte, 31+len(msg.Name)+len(msg.Message)+len(msg.Payload))
|
||||
buf[0] = 25
|
||||
p := 1
|
||||
|
|
@ -1195,7 +1165,7 @@ func (msg *JSException) Encode() []byte {
|
|||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *JSException) EncodeWithIndex() []byte {
|
||||
func (msg *JSExceptionDeprecated) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
|
|
@ -1206,11 +1176,11 @@ func (msg *JSException) EncodeWithIndex() []byte {
|
|||
return data
|
||||
}
|
||||
|
||||
func (msg *JSException) Decode() Message {
|
||||
func (msg *JSExceptionDeprecated) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *JSException) TypeID() int {
|
||||
func (msg *JSExceptionDeprecated) TypeID() int {
|
||||
return 25
|
||||
}
|
||||
|
||||
|
|
@ -1530,48 +1500,6 @@ func (msg *ClickEvent) TypeID() int {
|
|||
return 33
|
||||
}
|
||||
|
||||
type ErrorEvent struct {
|
||||
message
|
||||
MessageID uint64
|
||||
Timestamp uint64
|
||||
Source string
|
||||
Name string
|
||||
Message string
|
||||
Payload string
|
||||
}
|
||||
|
||||
func (msg *ErrorEvent) Encode() []byte {
|
||||
buf := make([]byte, 61+len(msg.Source)+len(msg.Name)+len(msg.Message)+len(msg.Payload))
|
||||
buf[0] = 34
|
||||
p := 1
|
||||
p = WriteUint(msg.MessageID, buf, p)
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteString(msg.Source, buf, p)
|
||||
p = WriteString(msg.Name, buf, p)
|
||||
p = WriteString(msg.Message, buf, p)
|
||||
p = WriteString(msg.Payload, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *ErrorEvent) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *ErrorEvent) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *ErrorEvent) TypeID() int {
|
||||
return 34
|
||||
}
|
||||
|
||||
type ResourceEvent struct {
|
||||
message
|
||||
MessageID uint64
|
||||
|
|
@ -2424,6 +2352,76 @@ func (msg *PerformanceTrackAggr) TypeID() int {
|
|||
return 56
|
||||
}
|
||||
|
||||
type LoadFontFace struct {
|
||||
message
|
||||
ParentID uint64
|
||||
Family string
|
||||
Source string
|
||||
Descriptors string
|
||||
}
|
||||
|
||||
func (msg *LoadFontFace) Encode() []byte {
|
||||
buf := make([]byte, 41+len(msg.Family)+len(msg.Source)+len(msg.Descriptors))
|
||||
buf[0] = 57
|
||||
p := 1
|
||||
p = WriteUint(msg.ParentID, buf, p)
|
||||
p = WriteString(msg.Family, buf, p)
|
||||
p = WriteString(msg.Source, buf, p)
|
||||
p = WriteString(msg.Descriptors, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *LoadFontFace) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *LoadFontFace) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *LoadFontFace) TypeID() int {
|
||||
return 57
|
||||
}
|
||||
|
||||
type SetNodeFocus struct {
|
||||
message
|
||||
ID int64
|
||||
}
|
||||
|
||||
func (msg *SetNodeFocus) Encode() []byte {
|
||||
buf := make([]byte, 11)
|
||||
buf[0] = 58
|
||||
p := 1
|
||||
p = WriteInt(msg.ID, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *SetNodeFocus) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *SetNodeFocus) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *SetNodeFocus) TypeID() int {
|
||||
return 58
|
||||
}
|
||||
|
||||
type LongTask struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
|
|
@ -3074,6 +3072,112 @@ func (msg *Zustand) TypeID() int {
|
|||
return 79
|
||||
}
|
||||
|
||||
type JSException struct {
|
||||
message
|
||||
Name string
|
||||
Message string
|
||||
Payload string
|
||||
Metadata string
|
||||
}
|
||||
|
||||
func (msg *JSException) Encode() []byte {
|
||||
buf := make([]byte, 41+len(msg.Name)+len(msg.Message)+len(msg.Payload)+len(msg.Metadata))
|
||||
buf[0] = 78
|
||||
p := 1
|
||||
p = WriteString(msg.Name, buf, p)
|
||||
p = WriteString(msg.Message, buf, p)
|
||||
p = WriteString(msg.Payload, buf, p)
|
||||
p = WriteString(msg.Metadata, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *JSException) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *JSException) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *JSException) TypeID() int {
|
||||
return 78
|
||||
}
|
||||
|
||||
type SessionEnd struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
EncryptionKey string
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) Encode() []byte {
|
||||
buf := make([]byte, 21+len(msg.EncryptionKey))
|
||||
buf[0] = 126
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteString(msg.EncryptionKey, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *SessionEnd) TypeID() int {
|
||||
return 126
|
||||
}
|
||||
|
||||
type SessionSearch struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
Partition uint64
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) Encode() []byte {
|
||||
buf := make([]byte, 21)
|
||||
buf[0] = 127
|
||||
p := 1
|
||||
p = WriteUint(msg.Timestamp, buf, p)
|
||||
p = WriteUint(msg.Partition, buf, p)
|
||||
return buf[:p]
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) EncodeWithIndex() []byte {
|
||||
encoded := msg.Encode()
|
||||
if IsIOSType(msg.TypeID()) {
|
||||
return encoded
|
||||
}
|
||||
data := make([]byte, len(encoded)+8)
|
||||
copy(data[8:], encoded[:])
|
||||
binary.LittleEndian.PutUint64(data[0:], msg.Meta().Index)
|
||||
return data
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) Decode() Message {
|
||||
return msg
|
||||
}
|
||||
|
||||
func (msg *SessionSearch) TypeID() int {
|
||||
return 127
|
||||
}
|
||||
|
||||
type IOSBatchMeta struct {
|
||||
message
|
||||
Timestamp uint64
|
||||
|
|
|
|||
|
|
@ -16,6 +16,7 @@ type RawMessage struct {
|
|||
meta *message
|
||||
encoded bool
|
||||
skipped *bool
|
||||
broken *bool
|
||||
}
|
||||
|
||||
func (m *RawMessage) Encode() []byte {
|
||||
|
|
@ -28,7 +29,7 @@ func (m *RawMessage) Encode() []byte {
|
|||
*m.skipped = false
|
||||
_, err := io.ReadFull(m.reader, m.data[1:])
|
||||
if err != nil {
|
||||
log.Printf("message encode err: %s", err)
|
||||
log.Printf("message encode err: %s, type: %d, sess: %d", err, m.tp, m.SessionID())
|
||||
return nil
|
||||
}
|
||||
return m.data
|
||||
|
|
@ -36,7 +37,10 @@ func (m *RawMessage) Encode() []byte {
|
|||
|
||||
func (m *RawMessage) EncodeWithIndex() []byte {
|
||||
if !m.encoded {
|
||||
m.Encode()
|
||||
if m.Encode() == nil {
|
||||
*m.broken = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
if IsIOSType(int(m.tp)) {
|
||||
return m.data
|
||||
|
|
@ -49,13 +53,18 @@ func (m *RawMessage) EncodeWithIndex() []byte {
|
|||
|
||||
func (m *RawMessage) Decode() Message {
|
||||
if !m.encoded {
|
||||
m.Encode()
|
||||
if m.Encode() == nil {
|
||||
*m.broken = true
|
||||
return nil
|
||||
}
|
||||
}
|
||||
msg, err := ReadMessage(m.tp, bytes.NewReader(m.data[1:]))
|
||||
if err != nil {
|
||||
log.Printf("decode err: %s", err)
|
||||
*m.broken = true
|
||||
return nil
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
msg.Meta().SetMeta(m.meta)
|
||||
return msg
|
||||
}
|
||||
|
|
@ -67,3 +76,10 @@ func (m *RawMessage) TypeID() int {
|
|||
func (m *RawMessage) Meta() *message {
|
||||
return m.meta
|
||||
}
|
||||
|
||||
func (m *RawMessage) SessionID() uint64 {
|
||||
if m.meta != nil {
|
||||
return m.meta.SessionID()
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
|
|||
|
|
@ -117,9 +117,9 @@ func DecodeSessionStart(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeSessionEnd(reader io.Reader) (Message, error) {
|
||||
func DecodeSessionEndDeprecated(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &SessionEnd{}
|
||||
msg := &SessionEndDeprecated{}
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -349,21 +349,6 @@ func DecodeMouseMove(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeMouseClickDepricated(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &MouseClickDepricated{}
|
||||
if msg.ID, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.HesitationTime, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Label, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeConsoleLog(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &ConsoleLog{}
|
||||
|
|
@ -424,9 +409,9 @@ func DecodePageRenderTiming(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeJSException(reader io.Reader) (Message, error) {
|
||||
func DecodeJSExceptionDeprecated(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &JSException{}
|
||||
msg := &JSExceptionDeprecated{}
|
||||
if msg.Name, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -601,30 +586,6 @@ func DecodeClickEvent(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeErrorEvent(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &ErrorEvent{}
|
||||
if msg.MessageID, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Source, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Name, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Message, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Payload, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeResourceEvent(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &ResourceEvent{}
|
||||
|
|
@ -1045,6 +1006,33 @@ func DecodePerformanceTrackAggr(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeLoadFontFace(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &LoadFontFace{}
|
||||
if msg.ParentID, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Family, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Source, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Descriptors, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeSetNodeFocus(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &SetNodeFocus{}
|
||||
if msg.ID, err = ReadInt(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeLongTask(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &LongTask{}
|
||||
|
|
@ -1318,6 +1306,48 @@ func DecodeZustand(reader io.Reader) (Message, error) {
|
|||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeJSException(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &JSException{}
|
||||
if msg.Name, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Message, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Payload, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Metadata, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeSessionEnd(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &SessionEnd{}
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.EncryptionKey, err = ReadString(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeSessionSearch(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &SessionSearch{}
|
||||
if msg.Timestamp, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
if msg.Partition, err = ReadUint(reader); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return msg, err
|
||||
}
|
||||
|
||||
func DecodeIOSBatchMeta(reader io.Reader) (Message, error) {
|
||||
var err error = nil
|
||||
msg := &IOSBatchMeta{}
|
||||
|
|
@ -1739,7 +1769,7 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
return DecodeSessionStart(reader)
|
||||
|
||||
case 3:
|
||||
return DecodeSessionEnd(reader)
|
||||
return DecodeSessionEndDeprecated(reader)
|
||||
|
||||
case 4:
|
||||
return DecodeSetPageLocation(reader)
|
||||
|
|
@ -1792,9 +1822,6 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
case 20:
|
||||
return DecodeMouseMove(reader)
|
||||
|
||||
case 21:
|
||||
return DecodeMouseClickDepricated(reader)
|
||||
|
||||
case 22:
|
||||
return DecodeConsoleLog(reader)
|
||||
|
||||
|
|
@ -1805,7 +1832,7 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
return DecodePageRenderTiming(reader)
|
||||
|
||||
case 25:
|
||||
return DecodeJSException(reader)
|
||||
return DecodeJSExceptionDeprecated(reader)
|
||||
|
||||
case 26:
|
||||
return DecodeIntegrationEvent(reader)
|
||||
|
|
@ -1831,9 +1858,6 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
case 33:
|
||||
return DecodeClickEvent(reader)
|
||||
|
||||
case 34:
|
||||
return DecodeErrorEvent(reader)
|
||||
|
||||
case 35:
|
||||
return DecodeResourceEvent(reader)
|
||||
|
||||
|
|
@ -1900,6 +1924,12 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
case 56:
|
||||
return DecodePerformanceTrackAggr(reader)
|
||||
|
||||
case 57:
|
||||
return DecodeLoadFontFace(reader)
|
||||
|
||||
case 58:
|
||||
return DecodeSetNodeFocus(reader)
|
||||
|
||||
case 59:
|
||||
return DecodeLongTask(reader)
|
||||
|
||||
|
|
@ -1954,6 +1984,15 @@ func ReadMessage(t uint64, reader io.Reader) (Message, error) {
|
|||
case 79:
|
||||
return DecodeZustand(reader)
|
||||
|
||||
case 78:
|
||||
return DecodeJSException(reader)
|
||||
|
||||
case 126:
|
||||
return DecodeSessionEnd(reader)
|
||||
|
||||
case 127:
|
||||
return DecodeSessionSearch(reader)
|
||||
|
||||
case 107:
|
||||
return DecodeIOSBatchMeta(reader)
|
||||
|
||||
|
|
|
|||
|
|
@ -1,12 +1,13 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/redisstream"
|
||||
)
|
||||
|
||||
func NewConsumer(group string, topics []string, handler types.MessageHandler, _ bool, _ int) types.Consumer {
|
||||
return redisstream.NewConsumer(group, topics, handler)
|
||||
func NewConsumer(group string, topics []string, iterator messages.MessageIterator, _ bool, _ int) types.Consumer {
|
||||
return redisstream.NewConsumer(group, topics, iterator)
|
||||
}
|
||||
|
||||
func NewProducer(_ int, _ bool) types.Producer {
|
||||
|
|
|
|||
|
|
@ -1,12 +0,0 @@
|
|||
package queue
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
func NewMessageConsumer(group string, topics []string, handler types.RawMessageHandler, autoCommit bool, messageSizeLimit int) types.Consumer {
|
||||
return NewConsumer(group, topics, func(sessionID uint64, value []byte, meta *types.Meta) {
|
||||
handler(sessionID, messages.NewIterator(value), meta)
|
||||
}, autoCommit, messageSizeLimit)
|
||||
}
|
||||
|
|
@ -1,30 +1,17 @@
|
|||
package types
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
// Consumer reads batches of session data from queue (redis or kafka)
|
||||
type Consumer interface {
|
||||
ConsumeNext() error
|
||||
Commit() error
|
||||
CommitBack(gap int64) error
|
||||
Commit() error
|
||||
Close()
|
||||
HasFirstPartition() bool
|
||||
}
|
||||
|
||||
// Producer sends batches of session data to queue (redis or kafka)
|
||||
type Producer interface {
|
||||
Produce(topic string, key uint64, value []byte) error
|
||||
ProduceToPartition(topic string, partition, key uint64, value []byte) error
|
||||
Close(timeout int)
|
||||
Flush(timeout int)
|
||||
Close(timeout int)
|
||||
}
|
||||
|
||||
type Meta struct {
|
||||
ID uint64
|
||||
Topic string
|
||||
Timestamp int64
|
||||
}
|
||||
|
||||
type MessageHandler func(uint64, []byte, *Meta)
|
||||
type DecodedMessageHandler func(uint64, messages.Message, *Meta)
|
||||
type RawMessageHandler func(uint64, messages.Iterator, *Meta)
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package redisstream
|
|||
import (
|
||||
"log"
|
||||
"net"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"sort"
|
||||
"strconv"
|
||||
"strings"
|
||||
|
|
@ -10,8 +11,6 @@ import (
|
|||
|
||||
_redis "github.com/go-redis/redis"
|
||||
"github.com/pkg/errors"
|
||||
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type idsInfo struct {
|
||||
|
|
@ -21,16 +20,16 @@ type idsInfo struct {
|
|||
type streamPendingIDsMap map[string]*idsInfo
|
||||
|
||||
type Consumer struct {
|
||||
redis *_redis.Client
|
||||
streams []string
|
||||
group string
|
||||
messageHandler types.MessageHandler
|
||||
idsPending streamPendingIDsMap
|
||||
lastTs int64
|
||||
autoCommit bool
|
||||
redis *_redis.Client
|
||||
streams []string
|
||||
group string
|
||||
messageIterator messages.MessageIterator
|
||||
idsPending streamPendingIDsMap
|
||||
lastTs int64
|
||||
autoCommit bool
|
||||
}
|
||||
|
||||
func NewConsumer(group string, streams []string, messageHandler types.MessageHandler) *Consumer {
|
||||
func NewConsumer(group string, streams []string, messageIterator messages.MessageIterator) *Consumer {
|
||||
redis := getRedisClient()
|
||||
for _, stream := range streams {
|
||||
err := redis.XGroupCreateMkStream(stream, group, "0").Err()
|
||||
|
|
@ -52,12 +51,12 @@ func NewConsumer(group string, streams []string, messageHandler types.MessageHan
|
|||
}
|
||||
|
||||
return &Consumer{
|
||||
redis: redis,
|
||||
messageHandler: messageHandler,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
redis: redis,
|
||||
messageIterator: messageIterator,
|
||||
streams: streams,
|
||||
group: group,
|
||||
autoCommit: true,
|
||||
idsPending: idsPending,
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -102,11 +101,8 @@ func (c *Consumer) ConsumeNext() error {
|
|||
if idx > 0x1FFF {
|
||||
return errors.New("Too many messages per ms in redis")
|
||||
}
|
||||
c.messageHandler(sessionID, []byte(valueString), &types.Meta{
|
||||
Topic: r.Stream,
|
||||
Timestamp: int64(ts),
|
||||
ID: ts<<13 | (idx & 0x1FFF), // Max: 4096 messages/ms for 69 years
|
||||
})
|
||||
bID := ts<<13 | (idx & 0x1FFF) // Max: 4096 messages/ms for 69 years
|
||||
c.messageIterator.Iterate([]byte(valueString), messages.NewBatchInfo(sessionID, r.Stream, bID, 0, int64(ts)))
|
||||
if c.autoCommit {
|
||||
if err = c.redis.XAck(r.Stream, c.group, m.ID).Err(); err != nil {
|
||||
return errors.Wrapf(err, "Acknoledgment error for messageID %v", m.ID)
|
||||
|
|
@ -161,7 +157,3 @@ func (c *Consumer) CommitBack(gap int64) error {
|
|||
func (c *Consumer) Close() {
|
||||
// noop
|
||||
}
|
||||
|
||||
func (c *Consumer) HasFirstPartition() bool {
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -66,6 +66,7 @@ func (b *builder) handleMessage(message Message, messageID uint64) {
|
|||
b.lastSystemTime = time.Now()
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Handle(message, messageID, b.timestamp); rm != nil {
|
||||
rm.Meta().SetMeta(message.Meta())
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -30,7 +30,9 @@ func (m *builderMap) GetBuilder(sessionID uint64) *builder {
|
|||
return b
|
||||
}
|
||||
|
||||
func (m *builderMap) HandleMessage(sessionID uint64, msg Message, messageID uint64) {
|
||||
func (m *builderMap) HandleMessage(msg Message) {
|
||||
sessionID := msg.SessionID()
|
||||
messageID := msg.Meta().Index
|
||||
b := m.GetBuilder(sessionID)
|
||||
b.handleMessage(msg, messageID)
|
||||
}
|
||||
|
|
@ -39,6 +41,7 @@ func (m *builderMap) iterateSessionReadyMessages(sessionID uint64, b *builder, i
|
|||
if b.ended || b.lastSystemTime.Add(FORCE_DELETE_TIMEOUT).Before(time.Now()) {
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Build(); rm != nil {
|
||||
rm.Meta().SetSessionID(sessionID)
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue