Merge branch 'dev' into new-frustrations
This commit is contained in:
commit
a3c7681dfa
255 changed files with 5542 additions and 4835 deletions
3
.github/workflows/alerts-ee.yaml
vendored
3
.github/workflows/alerts-ee.yaml
vendored
|
|
@ -8,7 +8,8 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
|
|||
3
.github/workflows/alerts.yaml
vendored
3
.github/workflows/alerts.yaml
vendored
|
|
@ -8,7 +8,8 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
|
|||
3
.github/workflows/api-ee.yaml
vendored
3
.github/workflows/api-ee.yaml
vendored
|
|
@ -8,7 +8,8 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
|
|||
3
.github/workflows/api.yaml
vendored
3
.github/workflows/api.yaml
vendored
|
|
@ -8,7 +8,8 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "api/**"
|
||||
- "!api/.gitignore"
|
||||
|
|
|
|||
121
.github/workflows/assist-ee.yaml
vendored
Normal file
121
.github/workflows/assist-ee.yaml
vendored
Normal file
|
|
@ -0,0 +1,121 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/utilities/**"
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
120
.github/workflows/assist.yaml
vendored
Normal file
120
.github/workflows/assist.yaml
vendored
Normal file
|
|
@ -0,0 +1,120 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing Assist image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd utilities
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("assist")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.OSS_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/assist/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
3
.github/workflows/crons-ee.yaml
vendored
3
.github/workflows/crons-ee.yaml
vendored
|
|
@ -8,7 +8,8 @@ on:
|
|||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/api/**"
|
||||
- "api/**"
|
||||
|
|
|
|||
1
.github/workflows/peers-ee.yaml
vendored
1
.github/workflows/peers-ee.yaml
vendored
|
|
@ -4,6 +4,7 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
|
|
|
|||
1
.github/workflows/peers.yaml
vendored
1
.github/workflows/peers.yaml
vendored
|
|
@ -4,6 +4,7 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
|
|
|
|||
1
.github/workflows/sourcemaps-reader.yaml
vendored
1
.github/workflows/sourcemaps-reader.yaml
vendored
|
|
@ -4,6 +4,7 @@ on:
|
|||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-*
|
||||
paths:
|
||||
- "sourcemap-reader/**"
|
||||
- "!sourcemap-reader/.gitignore"
|
||||
|
|
|
|||
69
.github/workflows/utilities-ee.yaml
vendored
69
.github/workflows/utilities-ee.yaml
vendored
|
|
@ -1,69 +0,0 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/utilities/**"
|
||||
- "utilities/*/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd utilities
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.EE_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"ee.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${EE_KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app utilities
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
68
.github/workflows/utilities.yaml
vendored
68
.github/workflows/utilities.yaml
vendored
|
|
@ -1,68 +0,0 @@
|
|||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- api-v1.10.0
|
||||
paths:
|
||||
- "utilities/**"
|
||||
- "!utilities/.gitignore"
|
||||
- "!utilities/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Assist
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Building and Pushing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd utilities
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app utilities
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
|
|
@ -34,7 +34,7 @@
|
|||
OpenReplay is a session replay suite you can host yourself, that lets you see what users do on your web app, helping you troubleshoot issues faster. It's the only open-source alternative to products such as FullStory and LogRocket.
|
||||
|
||||
- **Session replay.** OpenReplay replays what users do, but not only. It also shows you what went under the hood, how your website or app behaves by capturing network activity, console logs, JS errors, store actions/state, page speed metrics, cpu/memory usage and much more.
|
||||
- **Low footprint**. With a ~18KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Low footprint**. With a ~19KB (.gz) tracker that asynchronously sends minimal data for a very limited impact on performance.
|
||||
- **Self-hosted**. No more security compliance checks, 3rd-parties processing user data. Everything OpenReplay captures stays in your cloud for a complete control over your data.
|
||||
- **Privacy controls**. Fine-grained security features for sanitizing user data.
|
||||
- **Easy deploy**. With support of major public cloud providers (AWS, GCP, Azure, DigitalOcean).
|
||||
|
|
|
|||
|
|
@ -53,3 +53,10 @@ async def stop_server():
|
|||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
||||
|
||||
if config("LOCAL_DEV", default=False, cast=bool):
|
||||
@app.get('/private/trigger', tags=["private"])
|
||||
async def trigger_main_cron():
|
||||
logging.info("Triggering main cron")
|
||||
alerts_processor.process()
|
||||
|
|
|
|||
|
|
@ -49,10 +49,12 @@ LeftToDb = {
|
|||
schemas.AlertColumn.errors__4xx_5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)", "formula": "COUNT(session_id)",
|
||||
"condition": "status/100!=2"},
|
||||
schemas.AlertColumn.errors__4xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__4xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=4"},
|
||||
schemas.AlertColumn.errors__5xx__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(session_id)", "condition": "status/100=5"},
|
||||
schemas.AlertColumn.errors__javascript__impacted_sessions__count: {
|
||||
"table": "events.resources INNER JOIN public.sessions USING(session_id)",
|
||||
"formula": "COUNT(DISTINCT session_id)", "condition": "success= FALSE AND type='script'"},
|
||||
|
|
@ -95,7 +97,7 @@ def can_check(a) -> bool:
|
|||
a["options"].get("lastNotification") is None or
|
||||
a["options"]["lastNotification"] <= 0 or
|
||||
((now - a["options"]["lastNotification"]) > a["options"]["renotifyInterval"] * 60 * 1000)) \
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
and ((now - a["createdAt"]) % (TimeInterval[repetitionBase] * 60 * 1000)) < 60 * 1000
|
||||
|
||||
|
||||
def Build(a):
|
||||
|
|
@ -119,7 +121,7 @@ def Build(a):
|
|||
subQ = f"""SELECT {colDef["formula"]} AS value
|
||||
FROM {colDef["table"]}
|
||||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
|
|
@ -142,8 +144,7 @@ def Build(a):
|
|||
"startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000,
|
||||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
|
||||
AND timestamp<=%(now)s
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
|
|
@ -206,7 +207,7 @@ def process():
|
|||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.Alerts
|
||||
cur.mogrify(f"""UPDATE public.alerts
|
||||
SET options = options||'{{"lastNotification":{TimeUTC.now()}}}'::jsonb
|
||||
WHERE alert_id IN %(ids)s;""", {"ids": tuple([n["alertId"] for n in notifications])}))
|
||||
if len(notifications) > 0:
|
||||
|
|
|
|||
|
|
@ -114,17 +114,21 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
row = cur.fetchone()
|
||||
offset = row["count"]
|
||||
pg_query = f"""UPDATE dashboards
|
||||
SET name = %(name)s,
|
||||
SET name = %(name)s,
|
||||
description= %(description)s
|
||||
{", is_public = %(is_public)s" if data.is_public is not None else ""}
|
||||
{", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""}
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
WHERE dashboards.project_id = %(projectId)s
|
||||
AND dashboard_id = %(dashboard_id)s
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)"""
|
||||
AND (dashboards.user_id = %(userId)s OR is_public)
|
||||
RETURNING dashboard_id,name,description,is_public,created_at"""
|
||||
if data.metrics is not None and len(data.metrics) > 0:
|
||||
pg_query = f"""WITH dash AS ({pg_query})
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])};"""
|
||||
INSERT INTO dashboard_widgets(dashboard_id, metric_id, user_id, config)
|
||||
VALUES {",".join([f"(%(dashboard_id)s, %(metric_id_{i})s, %(userId)s, (SELECT default_config FROM metrics WHERE metric_id=%(metric_id_{i})s)||%(config_{i})s)" for i in range(len(data.metrics))])}
|
||||
RETURNING (SELECT dashboard_id FROM dash),(SELECT name FROM dash),
|
||||
(SELECT description FROM dash),(SELECT is_public FROM dash),
|
||||
(SELECT created_at FROM dash);"""
|
||||
for i, m in enumerate(data.metrics):
|
||||
params[f"metric_id_{i}"] = m
|
||||
# params[f"config_{i}"] = schemas.AddWidgetToDashboardPayloadSchema.schema() \
|
||||
|
|
@ -134,8 +138,10 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
|
|||
params[f"config_{i}"] = json.dumps({"position": i + offset})
|
||||
|
||||
cur.execute(cur.mogrify(pg_query, params))
|
||||
|
||||
return {"success": True}
|
||||
row = cur.fetchone()
|
||||
if row:
|
||||
row["created_at"] = TimeUTC.datetime_to_timestamp(row["created_at"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
||||
|
||||
def get_widget(project_id, user_id, dashboard_id, widget_id):
|
||||
|
|
|
|||
|
|
@ -47,7 +47,7 @@ def get_by_url(project_id, data: schemas.GetHeatmapPayloadSchema):
|
|||
for j, sf in enumerate(f.filters):
|
||||
f_k = f"issue_svalue{i}{j}"
|
||||
args = {**args, **sh.multi_values(sf.value, value_key=f_k)}
|
||||
if sf.type == schemas.IssueFilterType._on_selector and len(sf.value) > 0:
|
||||
if sf.type == schemas.IssueFilterType._selector and len(sf.value) > 0:
|
||||
constraints.append(sh.multi_conditions(f"clicks.selector = %({f_k})s",
|
||||
sf.value, value_key=f_k))
|
||||
|
||||
|
|
@ -55,9 +55,8 @@ def get_by_url(project_id, data: schemas.GetHeatmapPayloadSchema):
|
|||
constraints.append("""(issues.session_id IS NULL
|
||||
OR (issues.timestamp >= %(startDate)s
|
||||
AND issues.timestamp <= %(endDate)s
|
||||
AND mis.project_id = %(project_id)s
|
||||
AND mis.type = 'click_rage'))""")
|
||||
q_count += ",COALESCE(bool_or(mis.issue_id IS NOT NULL), FALSE) AS click_rage"
|
||||
AND mis.project_id = %(project_id)s))""")
|
||||
q_count += ",COALESCE(bool_or(mis.type = 'click_rage'), FALSE) AS click_rage"
|
||||
query_from += """LEFT JOIN events_common.issues USING (timestamp, session_id)
|
||||
LEFT JOIN issues AS mis USING (issue_id)"""
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
|
|||
|
|
@ -19,13 +19,13 @@ def __exists_by_name(project_id: int, name: str, exclude_index: Optional[int]) -
|
|||
constraints = column_names()
|
||||
if exclude_index:
|
||||
del constraints[exclude_index - 1]
|
||||
for c in constraints:
|
||||
c += " ILIKE %(name)s"
|
||||
for i in range(len(constraints)):
|
||||
constraints[i] += " ILIKE %(name)s"
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.projects
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
AND ({" OR ".join(constraints)})) AS exists;""",
|
||||
FROM public.projects
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
AND ({" OR ".join(constraints)})) AS exists;""",
|
||||
{"project_id": project_id, "name": name})
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -13,10 +13,10 @@ from chalicelib.utils.TimeUTC import TimeUTC
|
|||
def __exists_by_name(name: str, exclude_id: Optional[int]) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
{"AND project_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.projects
|
||||
WHERE deleted_at IS NULL
|
||||
AND name ILIKE %(name)s
|
||||
{"AND project_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"name": name, "exclude_id": exclude_id})
|
||||
|
||||
cur.execute(query=query)
|
||||
|
|
@ -212,15 +212,6 @@ def delete(tenant_id, user_id, project_id):
|
|||
return {"data": {"state": "success"}}
|
||||
|
||||
|
||||
def count_by_tenant(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = """SELECT count(1) AS count
|
||||
FROM public.projects AS s
|
||||
WHERE s.deleted_at IS NULL;"""
|
||||
cur.execute(query=query)
|
||||
return cur.fetchone()["count"]
|
||||
|
||||
|
||||
def get_gdpr(project_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT gdpr
|
||||
|
|
|
|||
|
|
@ -57,7 +57,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
f_k = f"f_value{i}"
|
||||
values = {**values,
|
||||
**sh.multi_values(helper.values_for_operator(value=f["value"], op=f["operator"]),
|
||||
value_key=f_k)}
|
||||
value_key=f_k)}
|
||||
if filter_type == schemas.FilterType.user_browser:
|
||||
# op = sessions.__get_sql_operator_multiple(f["operator"])
|
||||
first_stage_extra_constraints.append(
|
||||
|
|
@ -166,13 +166,15 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
continue
|
||||
|
||||
values = {**values, **sh.multi_values(helper.values_for_operator(value=s["value"], op=s["operator"]),
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(op) and i > 0:
|
||||
value_key=f"value{i + 1}")}
|
||||
if sh.is_negation_operator(s["operator"]) and i > 0:
|
||||
op = sh.reverse_sql_operator(op)
|
||||
main_condition = "left_not.session_id ISNULL"
|
||||
extra_from.append(f"""LEFT JOIN LATERAL (SELECT session_id
|
||||
FROM {next_table} AS s_main
|
||||
WHERE s_main.{next_col_name} {op} %(value{i + 1})s
|
||||
WHERE
|
||||
{sh.multi_conditions(f"s_main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s["value"], value_key=f"value{i + 1}")}
|
||||
AND s_main.timestamp >= T{i}.stage{i}_timestamp
|
||||
AND s_main.session_id = T1.session_id) AS left_not ON (TRUE)""")
|
||||
else:
|
||||
|
|
@ -180,7 +182,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
main_condition = "TRUE"
|
||||
else:
|
||||
main_condition = sh.multi_conditions(f"main.{next_col_name} {op} %(value{i + 1})s",
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
values=s["value"], value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
|
||||
|
|
@ -258,7 +260,7 @@ def pearson_corr(x: list, y: list):
|
|||
return None, None, False
|
||||
|
||||
if n == 2:
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0
|
||||
return math.copysign(1, x[1] - x[0]) * math.copysign(1, y[1] - y[0]), 1.0, True
|
||||
|
||||
xmean = sum(x) / len(x)
|
||||
ymean = sum(y) / len(y)
|
||||
|
|
@ -574,8 +576,10 @@ def get_top_insights(filter_d, project_id):
|
|||
# Obtain the first part of the output
|
||||
stages_list = get_stages(stages, rows)
|
||||
# Obtain the second part of the output
|
||||
total_drop_due_to_issues = get_issues(stages, rows, first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"), drop_only=True)
|
||||
total_drop_due_to_issues = get_issues(stages, rows,
|
||||
first_stage=filter_d.get("firstStage"),
|
||||
last_stage=filter_d.get("lastStage"),
|
||||
drop_only=True)
|
||||
return stages_list, total_drop_due_to_issues
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -514,14 +514,6 @@ def set_password_invitation(user_id, new_password):
|
|||
}
|
||||
|
||||
|
||||
def count_members():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute("""SELECT COUNT(user_id)
|
||||
FROM public.users WHERE deleted_at IS NULL;""")
|
||||
r = cur.fetchone()
|
||||
return r["count"]
|
||||
|
||||
|
||||
def email_exists(email):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
|
|
|
|||
|
|
@ -110,11 +110,11 @@ def exists_by_name(name: str, exclude_id: Optional[int], webhook_type: str = sch
|
|||
tenant_id: Optional[int] = None) -> bool:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS(SELECT 1
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id))s" if exclude_id else ""}) AS exists;""",
|
||||
FROM public.webhooks
|
||||
WHERE name ILIKE %(name)s
|
||||
AND deleted_at ISNULL
|
||||
AND type=%(webhook_type)s
|
||||
{"AND webhook_id!=%(exclude_id)s" if exclude_id else ""}) AS exists;""",
|
||||
{"name": name, "exclude_id": exclude_id, "webhook_type": webhook_type})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -283,6 +283,7 @@ def custom_alert_to_front(values):
|
|||
# to support frontend format for payload
|
||||
if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom:
|
||||
values["query"]["left"] = values["seriesId"]
|
||||
values["seriesId"] = None
|
||||
return values
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -63,25 +63,25 @@ async def logout_user(response: Response, context: schemas.CurrentContext = Depe
|
|||
|
||||
@app.post('/{projectId}/sessions/search', tags=["sessions"])
|
||||
async def sessions_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/search/ids', tags=["sessions"])
|
||||
async def session_ids_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id, ids_only=True)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/events/search', tags=["events"])
|
||||
async def events_search(projectId: int, q: str,
|
||||
type: Union[schemas.FilterType, schemas.EventType,
|
||||
schemas.PerformanceEventType, schemas.FetchFilterType,
|
||||
schemas.GraphqlFilterType, str] = None,
|
||||
key: str = None, source: str = None, live: bool = False,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
type: Union[schemas.FilterType, schemas.EventType,
|
||||
schemas.PerformanceEventType, schemas.FetchFilterType,
|
||||
schemas.GraphqlFilterType, str] = None,
|
||||
key: str = None, source: str = None, live: bool = False,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if len(q) == 0:
|
||||
return {"data": []}
|
||||
if live:
|
||||
|
|
@ -117,8 +117,8 @@ async def get_integrations_status(projectId: int, context: schemas.CurrentContex
|
|||
|
||||
@app.post('/{projectId}/integrations/{integration}/notify/{webhookId}/{source}/{sourceId}', tags=["integrations"])
|
||||
async def integration_notify(projectId: int, integration: str, webhookId: int, source: str, sourceId: str,
|
||||
data: schemas.IntegrationNotificationSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data: schemas.IntegrationNotificationSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
comment = None
|
||||
if data.comment:
|
||||
comment = data.comment
|
||||
|
|
@ -151,7 +151,7 @@ async def get_sentry(projectId: int, context: schemas.CurrentContext = Depends(O
|
|||
|
||||
@app.post('/{projectId}/integrations/sentry', tags=["integrations"])
|
||||
async def add_edit_sentry(projectId: int, data: schemas.SentrySchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_sentry.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -177,7 +177,7 @@ async def get_datadog(projectId: int, context: schemas.CurrentContext = Depends(
|
|||
|
||||
@app.post('/{projectId}/integrations/datadog', tags=["integrations"])
|
||||
async def add_edit_datadog(projectId: int, data: schemas.DatadogSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_datadog.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -198,7 +198,7 @@ async def get_stackdriver(projectId: int, context: schemas.CurrentContext = Depe
|
|||
|
||||
@app.post('/{projectId}/integrations/stackdriver', tags=["integrations"])
|
||||
async def add_edit_stackdriver(projectId: int, data: schemas.StackdriverSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_stackdriver.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -219,7 +219,7 @@ async def get_newrelic(projectId: int, context: schemas.CurrentContext = Depends
|
|||
|
||||
@app.post('/{projectId}/integrations/newrelic', tags=["integrations"])
|
||||
async def add_edit_newrelic(projectId: int, data: schemas.NewrelicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_newrelic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -240,7 +240,7 @@ async def get_rollbar(projectId: int, context: schemas.CurrentContext = Depends(
|
|||
|
||||
@app.post('/{projectId}/integrations/rollbar', tags=["integrations"])
|
||||
async def add_edit_rollbar(projectId: int, data: schemas.RollbarSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_rollbar.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -251,7 +251,7 @@ async def delete_datadog(projectId: int, context: schemas.CurrentContext = Depen
|
|||
|
||||
@app.post('/integrations/bugsnag/list_projects', tags=["integrations"])
|
||||
async def list_projects_bugsnag(data: schemas.BugsnagBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_bugsnag.list_projects(auth_token=data.authorizationToken)}
|
||||
|
||||
|
||||
|
|
@ -267,7 +267,7 @@ async def get_bugsnag(projectId: int, context: schemas.CurrentContext = Depends(
|
|||
|
||||
@app.post('/{projectId}/integrations/bugsnag', tags=["integrations"])
|
||||
async def add_edit_bugsnag(projectId: int, data: schemas.BugsnagSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_bugsnag.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -278,7 +278,7 @@ async def delete_bugsnag(projectId: int, context: schemas.CurrentContext = Depen
|
|||
|
||||
@app.post('/integrations/cloudwatch/list_groups', tags=["integrations"])
|
||||
async def list_groups_cloudwatch(data: schemas.CloudwatchBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_cloudwatch.list_log_groups(aws_access_key_id=data.awsAccessKeyId,
|
||||
aws_secret_access_key=data.awsSecretAccessKey,
|
||||
region=data.region)}
|
||||
|
|
@ -296,7 +296,7 @@ async def get_cloudwatch(projectId: int, context: schemas.CurrentContext = Depen
|
|||
|
||||
@app.post('/{projectId}/integrations/cloudwatch', tags=["integrations"])
|
||||
async def add_edit_cloudwatch(projectId: int, data: schemas.CloudwatchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_cloudwatch.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -317,13 +317,13 @@ async def get_elasticsearch(projectId: int, context: schemas.CurrentContext = De
|
|||
|
||||
@app.post('/integrations/elasticsearch/test', tags=["integrations"])
|
||||
async def test_elasticsearch_connection(data: schemas.ElasticsearchBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_elasticsearch.ping(tenant_id=context.tenant_id, **data.dict())}
|
||||
|
||||
|
||||
@app.post('/{projectId}/integrations/elasticsearch', tags=["integrations"])
|
||||
async def add_edit_elasticsearch(projectId: int, data: schemas.ElasticsearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": log_tool_elasticsearch.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
|
@ -345,7 +345,7 @@ async def get_sumologic(projectId: int, context: schemas.CurrentContext = Depend
|
|||
|
||||
@app.post('/{projectId}/integrations/sumologic', tags=["integrations"])
|
||||
async def add_edit_sumologic(projectId: int, data: schemas.SumologicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_sumologic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -385,7 +385,7 @@ async def get_integration_status_github(context: schemas.CurrentContext = Depend
|
|||
|
||||
@app.post('/integrations/jira', tags=["integrations"])
|
||||
async def add_edit_jira_cloud(data: schemas.JiraSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if not data.url.endswith('atlassian.net'):
|
||||
return {"errors": ["url must be a valid JIRA URL (example.atlassian.net)"]}
|
||||
error, integration = integrations_manager.get_integration(tool=integration_jira_cloud.PROVIDER,
|
||||
|
|
@ -398,7 +398,7 @@ async def add_edit_jira_cloud(data: schemas.JiraSchema = Body(...),
|
|||
|
||||
@app.post('/integrations/github', tags=["integrations"])
|
||||
async def add_edit_github(data: schemas.GithubSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER,
|
||||
tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
|
|
@ -472,8 +472,8 @@ async def get_all_assignments(projectId: int, context: schemas.CurrentContext =
|
|||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
async def create_issue_assignment(projectId: int, sessionId: int, integrationProjectId,
|
||||
data: schemas.AssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data: schemas.AssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.create_new_assignment(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId,
|
||||
creator_id=context.user_id, assignee=data.assignee,
|
||||
|
|
@ -494,7 +494,7 @@ async def get_gdpr(projectId: int, context: schemas.CurrentContext = Depends(OR_
|
|||
|
||||
@app.post('/{projectId}/gdpr', tags=["projects", "gdpr"])
|
||||
async def edit_gdpr(projectId: int, data: schemas.GdprSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
result = projects.edit_gdpr(project_id=projectId, gdpr=data.dict())
|
||||
if "errors" in result:
|
||||
return result
|
||||
|
|
@ -515,19 +515,19 @@ async def get_metadata(projectId: int, context: schemas.CurrentContext = Depends
|
|||
|
||||
@app.post('/{projectId}/metadata/list', tags=["metadata"])
|
||||
async def add_edit_delete_metadata(projectId: int, data: schemas.MetadataListSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.add_edit_delete(tenant_id=context.tenant_id, project_id=projectId, new_metas=data.list)
|
||||
|
||||
|
||||
@app.post('/{projectId}/metadata', tags=["metadata"])
|
||||
async def add_metadata(projectId: int, data: schemas.MetadataBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.add(tenant_id=context.tenant_id, project_id=projectId, new_name=data.key)
|
||||
|
||||
|
||||
@app.post('/{projectId}/metadata/{index}', tags=["metadata"])
|
||||
async def edit_metadata(projectId: int, index: int, data: schemas.MetadataBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.edit(tenant_id=context.tenant_id, project_id=projectId, index=index,
|
||||
new_name=data.key)
|
||||
|
||||
|
|
@ -560,7 +560,7 @@ async def get_capture_status(projectId: int, context: schemas.CurrentContext = D
|
|||
|
||||
@app.post('/{projectId}/sample_rate', tags=["projects"])
|
||||
async def update_capture_status(projectId: int, data: schemas.SampleRateSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": projects.update_capture_status(project_id=projectId, changes=data.dict())}
|
||||
|
||||
|
||||
|
|
@ -581,7 +581,7 @@ async def errors_merge(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
@app.post('/{projectId}/alerts', tags=["alerts"])
|
||||
async def create_alert(projectId: int, data: schemas.AlertSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return alerts.create(project_id=projectId, data=data)
|
||||
|
||||
|
||||
|
|
@ -603,7 +603,7 @@ async def get_alert(projectId: int, alertId: int, context: schemas.CurrentContex
|
|||
|
||||
@app.post('/{projectId}/alerts/{alertId}', tags=["alerts"])
|
||||
async def update_alert(projectId: int, alertId: int, data: schemas.AlertSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return alerts.update(id=alertId, data=data)
|
||||
|
||||
|
||||
|
|
@ -615,7 +615,7 @@ async def delete_alert(projectId: int, alertId: int, context: schemas.CurrentCon
|
|||
@app_apikey.put('/{projectKey}/sourcemaps/', tags=["sourcemaps"])
|
||||
@app_apikey.put('/{projectKey}/sourcemaps', tags=["sourcemaps"])
|
||||
async def sign_sourcemap_for_upload(projectKey: str, data: schemas.SourcemapUploadPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
project_id = projects.get_internal_project_id(projectKey)
|
||||
if project_id is None:
|
||||
return {"errors": ["Project not found."]}
|
||||
|
|
@ -630,7 +630,7 @@ async def get_weekly_report_config(context: schemas.CurrentContext = Depends(OR_
|
|||
|
||||
@app.post('/config/weekly_report', tags=["weekly report config"])
|
||||
async def edit_weekly_report_config(data: schemas.WeeklyReportConfigSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": weekly_report.edit_config(user_id=context.user_id, weekly_report=data.weekly_report)}
|
||||
|
||||
|
||||
|
|
@ -652,14 +652,14 @@ async def get_sessions_live(projectId: int, userId: str = None, context: schemas
|
|||
|
||||
@app.post('/{projectId}/assist/sessions', tags=["assist"])
|
||||
async def sessions_live(projectId: int, data: schemas.LiveSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_sessions_ws(projectId, body=data)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/mobile/{sessionId}/urls', tags=['mobile'])
|
||||
async def mobile_signe(projectId: int, sessionId: int, data: schemas.MobileSignPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": mobile.sign_keys(project_id=projectId, session_id=sessionId, keys=data.keys)}
|
||||
|
||||
|
||||
|
|
@ -671,7 +671,7 @@ async def signup_handler(data: schemas.UserSignupSchema = Body(...)):
|
|||
|
||||
@app.post('/projects', tags=['projects'])
|
||||
async def create_project(data: schemas.CreateProjectSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return projects.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
|
|
@ -686,7 +686,7 @@ async def get_project(projectId: int, context: schemas.CurrentContext = Depends(
|
|||
|
||||
@app.put('/projects/{projectId}', tags=['projects'])
|
||||
async def edit_project(projectId: int, data: schemas.CreateProjectSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return projects.edit(tenant_id=context.tenant_id, user_id=context.user_id, data=data, project_id=projectId)
|
||||
|
||||
|
||||
|
|
@ -705,7 +705,7 @@ async def generate_new_tenant_token(context: schemas.CurrentContext = Depends(OR
|
|||
@app.post('/client', tags=['client'])
|
||||
@app.put('/client', tags=['client'])
|
||||
async def edit_client(data: schemas.UpdateTenantSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return tenants.update(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
|
|
@ -726,7 +726,7 @@ async def view_notifications(notificationId: int, context: schemas.CurrentContex
|
|||
|
||||
@app.post('/notifications/view', tags=['notifications'])
|
||||
async def batch_view_notifications(data: schemas.NotificationsViewSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": notifications.view_notification(notification_ids=data.ids,
|
||||
startTimestamp=data.startTimestamp,
|
||||
endTimestamp=data.endTimestamp,
|
||||
|
|
@ -776,7 +776,7 @@ async def delete_slack_integration(webhookId: int, context: schemas.CurrentConte
|
|||
|
||||
@app.put('/webhooks', tags=["webhooks"])
|
||||
async def add_edit_webhook(data: schemas.CreateEditWebhookSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": webhook.add_edit(tenant_id=context.tenant_id, data=data.dict(), replace_none=True)}
|
||||
|
||||
|
||||
|
|
@ -812,7 +812,7 @@ async def generate_new_user_token(context: schemas.CurrentContext = Depends(OR_c
|
|||
|
||||
@app.post('/account/password', tags=["account"])
|
||||
async def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.change_password(email=context.email, old_password=data.old_password,
|
||||
new_password=data.new_password, tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
|
|
@ -820,7 +820,7 @@ async def change_client_password(data: schemas.EditUserPasswordSchema = Body(...
|
|||
|
||||
@app.post('/{projectId}/saved_search', tags=["savedSearch"])
|
||||
async def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return saved_search.create(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
|
|
@ -836,7 +836,7 @@ async def get_saved_search(projectId: int, search_id: int, context: schemas.Curr
|
|||
|
||||
@app.post('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
|
||||
async def update_saved_search(projectId: int, search_id: int, data: schemas.SavedSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": saved_search.update(user_id=context.user_id, search_id=search_id, data=data, project_id=projectId)}
|
||||
|
||||
|
||||
|
|
@ -862,7 +862,7 @@ async def get_msteams_channels(context: schemas.CurrentContext = Depends(OR_cont
|
|||
|
||||
@app.post('/integrations/msteams', tags=['integrations'])
|
||||
async def add_msteams_integration(data: schemas.AddCollaborationSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
n = MSTeams.add(tenant_id=context.tenant_id, data=data)
|
||||
if n is None:
|
||||
return {
|
||||
|
|
@ -874,7 +874,7 @@ async def add_msteams_integration(data: schemas.AddCollaborationSchema,
|
|||
|
||||
@app.post('/integrations/msteams/{webhookId}', tags=['integrations'])
|
||||
async def edit_msteams_integration(webhookId: int, data: schemas.EditCollaborationSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if len(data.url) > 0:
|
||||
old = MSTeams.get_integration(tenant_id=context.tenant_id, integration_id=webhookId)
|
||||
if not old:
|
||||
|
|
@ -894,15 +894,6 @@ async def delete_msteams_integration(webhookId: int, context: schemas.CurrentCon
|
|||
return webhook.delete(tenant_id=context.tenant_id, webhook_id=webhookId)
|
||||
|
||||
|
||||
@public_app.get('/general_stats', tags=["private"], include_in_schema=False)
|
||||
async def get_general_stats():
|
||||
return {"data": {"sessions:": sessions.count_all()}}
|
||||
|
||||
|
||||
@public_app.get('/', tags=["health"])
|
||||
@public_app.post('/', tags=["health"])
|
||||
@public_app.put('/', tags=["health"])
|
||||
@public_app.delete('/', tags=["health"])
|
||||
async def health_check():
|
||||
return {"data": {"stage": f"live {config('version_number', default='')}",
|
||||
"internalCrons": config("LOCAL_CRONS", default=False, cast=bool)}}
|
||||
return {}
|
||||
|
|
|
|||
|
|
@ -47,22 +47,15 @@ async def get_account(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
@app.post('/account', tags=["account"])
|
||||
async def edit_account(data: schemas.EditUserSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data,
|
||||
editor_id=context.user_id)
|
||||
|
||||
|
||||
@app.get('/projects/limit', tags=['projects'])
|
||||
async def get_projects_limit(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": {
|
||||
"current": projects.count_by_tenant(tenant_id=context.tenant_id),
|
||||
"remaining": -1
|
||||
}}
|
||||
|
||||
|
||||
@app.post('/integrations/slack', tags=['integrations'])
|
||||
@app.put('/integrations/slack', tags=['integrations'])
|
||||
async def add_slack_integration(data: schemas.AddCollaborationSchema, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
async def add_slack_integration(data: schemas.AddCollaborationSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
n = Slack.add(tenant_id=context.tenant_id, data=data)
|
||||
if n is None:
|
||||
return {
|
||||
|
|
@ -73,7 +66,7 @@ async def add_slack_integration(data: schemas.AddCollaborationSchema, context: s
|
|||
|
||||
@app.post('/integrations/slack/{integrationId}', tags=['integrations'])
|
||||
async def edit_slack_integration(integrationId: int, data: schemas.EditCollaborationSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if len(data.url) > 0:
|
||||
old = Slack.get_integration(tenant_id=context.tenant_id, integration_id=integrationId)
|
||||
if not old:
|
||||
|
|
@ -90,7 +83,7 @@ async def edit_slack_integration(integrationId: int, data: schemas.EditCollabora
|
|||
|
||||
@app.post('/client/members', tags=["client"])
|
||||
async def add_member(background_tasks: BackgroundTasks, data: schemas.CreateMemberSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.create_member(tenant_id=context.tenant_id, user_id=context.user_id, data=data.dict(),
|
||||
background_tasks=background_tasks)
|
||||
|
||||
|
|
@ -127,14 +120,14 @@ async def change_password_by_invitation(data: schemas.EditPasswordByInvitationSc
|
|||
|
||||
@app.put('/client/members/{memberId}', tags=["client"])
|
||||
async def edit_member(memberId: int, data: schemas.EditMemberSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.edit_member(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
|
||||
user_id_to_update=memberId)
|
||||
|
||||
|
||||
@app.get('/metadata/session_search', tags=["metadata"])
|
||||
async def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] = None,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if key is None or value is None or len(value) == 0 and len(key) == 0:
|
||||
return {"errors": ["please provide a key&value for search"]}
|
||||
if len(value) == 0:
|
||||
|
|
@ -154,7 +147,7 @@ async def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
async def get_session(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
|
|
@ -171,7 +164,7 @@ async def get_session(projectId: int, sessionId: Union[int, str], background_tas
|
|||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
async def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
|
|
@ -182,19 +175,19 @@ async def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
|||
|
||||
@app.post('/{projectId}/errors/search', tags=['errors'])
|
||||
async def errors_search(projectId: int, data: schemas.SearchErrorsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": errors.search(data, projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/stats', tags=['errors'])
|
||||
async def errors_stats(projectId: int, startTimestamp: int, endTimestamp: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return errors.stats(projectId, user_id=context.user_id, startTimestamp=startTimestamp, endTimestamp=endTimestamp)
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
|
||||
async def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
|
|
@ -205,8 +198,8 @@ async def errors_get_details(projectId: int, errorId: str, background_tasks: Bac
|
|||
|
||||
@app.get('/{projectId}/errors/{errorId}/stats', tags=['errors'])
|
||||
async def errors_get_details_right_column(projectId: int, errorId: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), density: int = 7,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
endDate: int = TimeUTC.now(), density: int = 7,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details_chart(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"startDate": startDate, "endDate": endDate, "density": density})
|
||||
return data
|
||||
|
|
@ -214,7 +207,7 @@ async def errors_get_details_right_column(projectId: int, errorId: str, startDat
|
|||
|
||||
@app.get('/{projectId}/errors/{errorId}/sourcemaps', tags=['errors'])
|
||||
async def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
|
|
@ -225,7 +218,8 @@ async def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
|||
|
||||
@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"])
|
||||
async def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), context: schemas.CurrentContext = Depends(OR_context)):
|
||||
endDate: int = TimeUTC.now(),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if action == "favorite":
|
||||
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
elif action == "sessions":
|
||||
|
|
@ -242,7 +236,7 @@ async def add_remove_favorite_error(projectId: int, errorId: str, action: str, s
|
|||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"])
|
||||
async def get_live_session(projectId: int, sessionId: str, background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(context=context, project_id=projectId, session_id=sessionId,
|
||||
|
|
@ -257,7 +251,7 @@ async def get_live_session(projectId: int, sessionId: str, background_tasks: Bac
|
|||
|
||||
@app.get('/{projectId}/unprocessed/{sessionId}/dom.mob', tags=["assist"])
|
||||
async def get_live_session_replay_file(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
not_found = {"errors": ["Replay file not found"]}
|
||||
if isinstance(sessionId, str):
|
||||
print(f"{sessionId} not a valid number.")
|
||||
|
|
@ -277,7 +271,7 @@ async def get_live_session_replay_file(projectId: int, sessionId: Union[int, str
|
|||
|
||||
@app.get('/{projectId}/unprocessed/{sessionId}/devtools.mob', tags=["assist"])
|
||||
async def get_live_session_devtools_file(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
not_found = {"errors": ["Devtools file not found"]}
|
||||
if isinstance(sessionId, str):
|
||||
print(f"{sessionId} not a valid number.")
|
||||
|
|
@ -297,13 +291,13 @@ async def get_live_session_devtools_file(projectId: int, sessionId: Union[int, s
|
|||
|
||||
@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"])
|
||||
async def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": heatmaps.get_by_url(project_id=projectId, data=data)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/favorite', tags=["sessions"])
|
||||
async def add_remove_favorite_session2(projectId: int, sessionId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": sessions_favorite.favorite_session(context=context, project_id=projectId, session_id=sessionId)}
|
||||
|
||||
|
|
@ -322,7 +316,7 @@ async def assign_session(projectId: int, sessionId, context: schemas.CurrentCont
|
|||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
async def assign_session(projectId: int, sessionId: int, issueId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId,
|
||||
tenant_id=context.tenant_id, user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
|
|
@ -333,8 +327,9 @@ async def assign_session(projectId: int, sessionId: int, issueId: str,
|
|||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
async def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
async def comment_assignment(projectId: int, sessionId: int, issueId: str,
|
||||
data: schemas.CommentAssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.comment(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId, assignment_id=issueId,
|
||||
user_id=context.user_id, message=data.message)
|
||||
|
|
@ -347,7 +342,7 @@ async def comment_assignment(projectId: int, sessionId: int, issueId: str, data:
|
|||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/notes', tags=["sessions", "notes"])
|
||||
async def create_note(projectId: int, sessionId: int, data: schemas.SessionNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if not sessions.session_exists(project_id=projectId, session_id=sessionId):
|
||||
return {"errors": ["Session not found"]}
|
||||
data = sessions_notes.create(tenant_id=context.tenant_id, project_id=projectId,
|
||||
|
|
@ -372,7 +367,7 @@ async def get_session_notes(projectId: int, sessionId: int, context: schemas.Cur
|
|||
|
||||
@app.post('/{projectId}/notes/{noteId}', tags=["sessions", "notes"])
|
||||
async def edit_note(projectId: int, noteId: int, data: schemas.SessionUpdateNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.edit(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId, data=data)
|
||||
if "errors" in data.keys():
|
||||
|
|
@ -391,21 +386,21 @@ async def delete_note(projectId: int, noteId: int, context: schemas.CurrentConte
|
|||
|
||||
@app.get('/{projectId}/notes/{noteId}/slack/{webhookId}', tags=["sessions", "notes"])
|
||||
async def share_note_to_slack(projectId: int, noteId: int, webhookId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return sessions_notes.share_to_slack(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId, webhook_id=webhookId)
|
||||
|
||||
|
||||
@app.get('/{projectId}/notes/{noteId}/msteams/{webhookId}', tags=["sessions", "notes"])
|
||||
async def share_note_to_msteams(projectId: int, noteId: int, webhookId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return sessions_notes.share_to_msteams(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId, webhook_id=webhookId)
|
||||
|
||||
|
||||
@app.post('/{projectId}/notes', tags=["sessions", "notes"])
|
||||
async def get_all_notes(projectId: int, data: schemas.SearchNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.get_all_notes_by_project_id(tenant_id=context.tenant_id, project_id=projectId,
|
||||
user_id=context.user_id, data=data)
|
||||
if "errors" in data:
|
||||
|
|
@ -415,5 +410,5 @@ async def get_all_notes(projectId: int, data: schemas.SearchNoteSchema = Body(..
|
|||
|
||||
@app.post('/{projectId}/click_maps/search', tags=["click maps"])
|
||||
async def click_map_search(projectId: int, data: schemas.FlatClickMapSessionsSearch = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": click_maps.search_short_session(user_id=context.user_id, data=data, project_id=projectId)}
|
||||
|
|
|
|||
3
api/run-alerts-dev.sh
Executable file
3
api/run-alerts-dev.sh
Executable file
|
|
@ -0,0 +1,3 @@
|
|||
#!/bin/zsh
|
||||
|
||||
uvicorn app_alerts:app --reload
|
||||
|
|
@ -363,7 +363,8 @@ class AlertSchema(BaseModel):
|
|||
|
||||
@root_validator(pre=True)
|
||||
def transform_alert(cls, values):
|
||||
if values.get("seriesId") is None and isinstance(values["query"]["left"], int):
|
||||
values["seriesId"] = None
|
||||
if isinstance(values["query"]["left"], int):
|
||||
values["seriesId"] = values["query"]["left"]
|
||||
values["query"]["left"] = AlertColumn.custom
|
||||
|
||||
|
|
|
|||
|
|
@ -82,7 +82,9 @@ ENV TZ=UTC \
|
|||
COMPRESSION_TYPE=zstd \
|
||||
CH_USERNAME="default" \
|
||||
CH_PASSWORD="" \
|
||||
CH_DATABASE="default"
|
||||
CH_DATABASE="default" \
|
||||
# Max file size to process, default to 100MB
|
||||
MAX_FILE_SIZE=100000000
|
||||
|
||||
|
||||
RUN if [ "$SERVICE_NAME" = "http" ]; then \
|
||||
|
|
|
|||
|
|
@ -1,9 +1,7 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -13,31 +11,26 @@ import (
|
|||
"openreplay/backend/internal/assets/cacher"
|
||||
config "openreplay/backend/internal/config/assets"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
assetsMetrics "openreplay/backend/pkg/metrics/assets"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("assets")
|
||||
m := metrics.New()
|
||||
m.Register(assetsMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
cacher := cacher.NewCacher(cfg, metrics)
|
||||
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cacher := cacher.NewCacher(cfg)
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
assetsMetrics.IncreaseProcessesSessions()
|
||||
// TODO: connect to "raw" topic in order to listen for JSException
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
|
|
|
|||
|
|
@ -1,171 +1,59 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"log"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
config "openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/internal/db"
|
||||
"openreplay/backend/internal/db/datasaver"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
custom2 "openreplay/backend/pkg/handlers/custom"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/terminator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("db")
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := db.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(
|
||||
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
postgres.NewConn(cfg.Postgres.String(), cfg.BatchQueueLimit, cfg.BatchSizeLimit), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
return []handlers.MessageProcessor{
|
||||
&custom2.EventMapper{},
|
||||
custom2.NewInputEventBuilder(),
|
||||
custom2.NewPageEventBuilder(),
|
||||
}
|
||||
}
|
||||
|
||||
// Create handler's aggregator
|
||||
builderMap := sessions.NewBuilderMap(handlersFabric)
|
||||
|
||||
// Init modules
|
||||
saver := datasaver.New(pg, cfg)
|
||||
saver.InitStats()
|
||||
// Init data saver
|
||||
saver := datasaver.New(cfg, pg)
|
||||
|
||||
// Message filter
|
||||
msgFilter := []int{messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgClickEvent,
|
||||
messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming,
|
||||
messages.MsgCustomEvent, messages.MsgCustomIssue, messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL,
|
||||
messages.MsgStateAction, messages.MsgSetInputTarget, messages.MsgSetInputValue, messages.MsgCreateDocument,
|
||||
messages.MsgMouseClick, messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming}
|
||||
|
||||
// Handler logic
|
||||
msgHandler := func(msg messages.Message) {
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
var (
|
||||
session *types2.Session
|
||||
err error
|
||||
)
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
session, err = pg.GetSession(msg.SessionID())
|
||||
} else {
|
||||
session, err = pg.Cache.GetSession(msg.SessionID())
|
||||
}
|
||||
if session == nil {
|
||||
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(msg)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(msg.SessionID(), func(msg messages.Message) {
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
}
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming, messages.MsgCustomEvent, messages.MsgCustomIssue,
|
||||
messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction,
|
||||
messages.MsgSetInputTarget, messages.MsgSetInputValue, messages.MsgCreateDocument, messages.MsgMouseClick,
|
||||
messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming,
|
||||
messages.MsgInputEvent, messages.MsgPageEvent}
|
||||
|
||||
// Init consumer
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupDB,
|
||||
[]string{
|
||||
cfg.TopicRawWeb, // from tracker
|
||||
cfg.TopicAnalytics, // from heuristics
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, msgFilter, true),
|
||||
messages.NewMessageIterator(saver.Handle, msgFilter, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := db.New(cfg, consumer, saver)
|
||||
log.Printf("Db service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
commitTick := time.Tick(cfg.CommitBatchTimeout)
|
||||
|
||||
// Send collected batches to db
|
||||
commitDBUpdates := func() {
|
||||
// Commit collected batches and bulks of information to PG
|
||||
pg.Commit()
|
||||
// Commit collected batches of information to CH
|
||||
if err := saver.CommitStats(); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
// Commit current position in queue
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
}
|
||||
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %s: terminating\n", sig.String())
|
||||
commitDBUpdates()
|
||||
if err := pg.Close(); err != nil {
|
||||
log.Printf("db.Close error: %s", err)
|
||||
}
|
||||
if err := saver.Close(); err != nil {
|
||||
log.Printf("saver.Close error: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-commitTick:
|
||||
commitDBUpdates()
|
||||
builderMap.ClearOldSessions()
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
// Handle new message from queue
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
terminator.Wait(service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strings"
|
||||
|
|
@ -12,27 +10,30 @@ import (
|
|||
|
||||
"openreplay/backend/internal/config/ender"
|
||||
"openreplay/backend/internal/sessionender"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("ender")
|
||||
m := metrics.New()
|
||||
m.Register(enderMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := ender.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessions, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
|
|
@ -67,12 +68,12 @@ func main() {
|
|||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
failedSessionEnds := make(map[uint64]int64)
|
||||
failedSessionEnds := make(map[uint64]uint64)
|
||||
duplicatedSessionEnds := make(map[uint64]uint64)
|
||||
|
||||
// Find ended sessions and send notification to other services
|
||||
sessions.HandleEndedSessions(func(sessionID uint64, timestamp int64) bool {
|
||||
msg := &messages.SessionEnd{Timestamp: uint64(timestamp)}
|
||||
sessions.HandleEndedSessions(func(sessionID uint64, timestamp uint64) bool {
|
||||
msg := &messages.SessionEnd{Timestamp: timestamp}
|
||||
currDuration, err := pg.GetSessionDuration(sessionID)
|
||||
if err != nil {
|
||||
log.Printf("getSessionDuration failed, sessID: %d, err: %s", sessionID, err)
|
||||
|
|
|
|||
|
|
@ -2,90 +2,49 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/heuristics"
|
||||
config "openreplay/backend/internal/config/heuristics"
|
||||
"openreplay/backend/internal/heuristics"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
web2 "openreplay/backend/pkg/handlers/web"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/handlers/custom"
|
||||
"openreplay/backend/pkg/handlers/web"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/terminator"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := heuristics.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
cfg := config.New()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
return []handlers.MessageProcessor{
|
||||
// web handlers
|
||||
&web2.ClickRageDetector{},
|
||||
&web2.CpuIssueDetector{},
|
||||
&web2.DeadClickDetector{},
|
||||
&web2.MemoryIssueDetector{},
|
||||
&web2.NetworkIssueDetector{},
|
||||
&web2.PerformanceAggregator{},
|
||||
// Other handlers (you can add your custom handlers here)
|
||||
//&custom.CustomHandler{},
|
||||
custom.NewInputEventBuilder(),
|
||||
custom.NewPageEventBuilder(),
|
||||
web.NewDeadClickDetector(),
|
||||
&web.ClickRageDetector{},
|
||||
&web.CpuIssueDetector{},
|
||||
&web.MemoryIssueDetector{},
|
||||
&web.NetworkIssueDetector{},
|
||||
&web.PerformanceAggregator{},
|
||||
}
|
||||
}
|
||||
|
||||
// Create handler's aggregator
|
||||
builderMap := sessions.NewBuilderMap(handlersFabric)
|
||||
|
||||
// Init producer and consumer for data bus
|
||||
eventBuilder := sessions.NewBuilderMap(handlersFabric)
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
builderMap.HandleMessage(msg)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupHeuristics,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, nil, true),
|
||||
messages.NewMessageIterator(eventBuilder.HandleMessage, nil, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := heuristics.New(cfg, producer, consumer, eventBuilder)
|
||||
log.Printf("Heuristics service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
producer.Close(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, readyMsg.Encode())
|
||||
})
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
case msg := <-consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
terminator.Wait(service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,43 +2,44 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/config/http"
|
||||
"openreplay/backend/internal/http/router"
|
||||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/internal/config/http"
|
||||
"openreplay/backend/internal/http/router"
|
||||
"openreplay/backend/internal/http/server"
|
||||
"openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
httpMetrics "openreplay/backend/pkg/metrics/http"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("http")
|
||||
m := metrics.New()
|
||||
m.Register(httpMetrics.List())
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := http.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
// Connect to queue
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
// Connect to database
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics), 1000*60*20)
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres.String(), 0, 0), 1000*60*20)
|
||||
defer dbConn.Close()
|
||||
|
||||
// Build all services
|
||||
services := services.New(cfg, producer, dbConn)
|
||||
|
||||
// Init server's routes
|
||||
router, err := router.NewRouter(cfg, services, metrics)
|
||||
router, err := router.NewRouter(cfg, services)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating engine: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,32 +2,30 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("integrations")
|
||||
m := metrics.New()
|
||||
m.Register(databaseMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
pg := postgres.NewConn(cfg.Postgres.String(), 0, 0, metrics)
|
||||
pg := postgres.NewConn(cfg.Postgres.String(), 0, 0)
|
||||
defer pg.Close()
|
||||
|
||||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
|
|
|
|||
|
|
@ -2,10 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -16,19 +14,18 @@ import (
|
|||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("sink")
|
||||
m := metrics.New()
|
||||
m.Register(sinkMetrics.List())
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := sink.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
|
||||
|
|
@ -39,22 +36,8 @@ func main() {
|
|||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics)
|
||||
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
counter := storage.NewLogCounter()
|
||||
// Session message metrics
|
||||
totalMessages, err := metrics.RegisterCounter("messages_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_total metric: %s", err)
|
||||
}
|
||||
savedMessages, err := metrics.RegisterCounter("messages_saved")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_saved metric: %s", err)
|
||||
}
|
||||
messageSize, err := metrics.RegisterHistogram("messages_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
|
||||
var (
|
||||
sessionID uint64
|
||||
|
|
@ -74,11 +57,12 @@ func main() {
|
|||
if domBuffer.Len() <= 0 && devBuffer.Len() <= 0 {
|
||||
return
|
||||
}
|
||||
sinkMetrics.RecordWrittenBytes(float64(domBuffer.Len()), "dom")
|
||||
sinkMetrics.RecordWrittenBytes(float64(devBuffer.Len()), "devtools")
|
||||
|
||||
// Write buffered batches to the session
|
||||
if err := writer.Write(sessionID, domBuffer.Bytes(), devBuffer.Bytes()); err != nil {
|
||||
log.Printf("writer error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// Prepare buffer for the next batch
|
||||
|
|
@ -88,8 +72,7 @@ func main() {
|
|||
return
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
sinkMetrics.IncreaseTotalMessages()
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
|
|
@ -125,7 +108,7 @@ func main() {
|
|||
log.Printf("zero ts; sessID: %d, msgType: %d", msg.SessionID(), msg.TypeID())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(ts))
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(int64(ts)))
|
||||
}
|
||||
|
||||
// Try to encode message to avoid null data inserts
|
||||
|
|
@ -187,9 +170,8 @@ func main() {
|
|||
}
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(msg.Encode())))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
sinkMetrics.IncreaseWrittenMessages()
|
||||
sinkMetrics.RecordMessageSize(float64(len(msg.Encode())))
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -12,22 +11,22 @@ import (
|
|||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/failover"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/queue"
|
||||
s3storage "openreplay/backend/pkg/storage"
|
||||
cloud "openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("storage")
|
||||
m := metrics.New()
|
||||
m.Register(storageMetrics.List())
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
|
||||
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3, metrics)
|
||||
s3 := cloud.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -20,14 +20,11 @@ require (
|
|||
github.com/klauspost/pgzip v1.2.5
|
||||
github.com/oschwald/maxminddb-golang v1.7.0
|
||||
github.com/pkg/errors v0.9.1
|
||||
github.com/prometheus/client_golang v1.12.1
|
||||
github.com/sethvargo/go-envconfig v0.7.0
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
go.opentelemetry.io/otel v1.7.0
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
|
||||
go.opentelemetry.io/otel/metric v0.30.0
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862
|
||||
google.golang.org/api v0.81.0
|
||||
)
|
||||
|
||||
|
|
@ -38,8 +35,6 @@ require (
|
|||
cloud.google.com/go/storage v1.14.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.8 // indirect
|
||||
|
|
@ -55,20 +50,19 @@ require (
|
|||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/paulmach/orb v0.7.1 // indirect
|
||||
github.com/pierrec/lz4/v4 v4.1.15 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
github.com/shopspring/decimal v1.3.1 // indirect
|
||||
github.com/stretchr/testify v1.8.0 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/sys v0.1.0 // indirect
|
||||
golang.org/x/text v0.7.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
|
||||
|
|
|
|||
|
|
@ -80,8 +80,6 @@ github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk5
|
|||
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
|
||||
github.com/aws/aws-sdk-go v1.44.98 h1:fX+NxebSdO/9T6DTNOLhpC+Vv6RNkKRfsMg0a7o/yBo=
|
||||
github.com/aws/aws-sdk-go v1.44.98/go.mod h1:y4AeaBuwd2Lk+GepC1E9v0qOiTws0MIWAX4oIKwKHZo=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
|
|
@ -156,9 +154,7 @@ github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9
|
|||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-ole/go-ole v1.2.4/go.mod h1:XCwSNxSkXRo4vlyPy93sltvi/qJq0jqQhjqQNIwKuxM=
|
||||
github.com/go-ole/go-ole v1.2.6/go.mod h1:pprOEPIfldk/42T2oK7lQ4v4JSDwmV0As9GaiUsvbm0=
|
||||
|
|
@ -489,14 +485,6 @@ go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
|||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
|
||||
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
|
||||
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
|
||||
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
|
||||
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
|
||||
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
|
||||
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
|
||||
|
|
@ -601,8 +589,8 @@ golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su
|
|||
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c h1:yKufUcDwucU5urd+50/Opbt4AYpqthk7wHpHok8f1lo=
|
||||
golang.org/x/net v0.0.0-20220906165146-f3363e06e74c/go.mod h1:YDH+HFinaLZZlnHAfSS6ZXJJ9M9t4Dl22yv3iI2vPwk=
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862 h1:KrLJ+iz8J6j6VVr/OCfULAcK+xozUmWE43fKpMR4MlI=
|
||||
golang.org/x/net v0.1.1-0.20221104162952-702349b0e862/go.mod h1:Cx3nUiGt4eDBEyega/BKRp+/AlGL8hYe7U9odMt2Cco=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -690,7 +678,6 @@ golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
|
|
@ -715,8 +702,8 @@ golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10 h1:WIoqL4EROvwiPdUtaip4VcDdpZ4kha7wBWZrbVKCIZg=
|
||||
golang.org/x/sys v0.0.0-20220728004956-3c1f35247d10/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.1.0 h1:kunALQeHf1/185U1i0GOB/fy1IPRDDpuoOOqRReG57U=
|
||||
golang.org/x/sys v0.1.0/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -728,8 +715,8 @@ golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/text v0.7.0 h1:4BRB4x83lYWy72KwLD/qYDuTu7q9PjSagHvijDw7cLo=
|
||||
golang.org/x/text v0.7.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
|||
|
|
@ -1,16 +1,13 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/tls"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"mime"
|
||||
"net/http"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
metrics "openreplay/backend/pkg/metrics/assets"
|
||||
"path/filepath"
|
||||
"strings"
|
||||
"time"
|
||||
|
|
@ -25,30 +22,22 @@ import (
|
|||
const MAX_CACHE_DEPTH = 5
|
||||
|
||||
type cacher struct {
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
downloadedAssets syncfloat64.Counter
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
timeoutMap *timeoutMap // Concurrency implemented
|
||||
s3 *storage.S3 // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
}
|
||||
|
||||
func (c *cacher) CanCache() bool {
|
||||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
||||
func NewCacher(cfg *config.Config) *cacher {
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
if metrics == nil {
|
||||
log.Fatalf("metrics are empty")
|
||||
}
|
||||
downloadedAssets, err := metrics.RegisterCounter("assets_downloaded")
|
||||
if err != nil {
|
||||
log.Printf("can't create downloaded_assets metric: %s", err)
|
||||
}
|
||||
c := &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
|
||||
|
|
@ -59,11 +48,10 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
|
||||
},
|
||||
},
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
downloadedAssets: downloadedAssets,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
rewriter: rewriter,
|
||||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c
|
||||
|
|
@ -75,6 +63,7 @@ func (c *cacher) CacheFile(task *Task) {
|
|||
|
||||
func (c *cacher) cacheURL(t *Task) {
|
||||
t.retries--
|
||||
start := time.Now()
|
||||
req, _ := http.NewRequest("GET", t.requestURL, nil)
|
||||
if t.retries%2 == 0 {
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
|
|
@ -87,6 +76,7 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
printErr := true
|
||||
|
|
@ -122,12 +112,15 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
start = time.Now()
|
||||
err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false)
|
||||
if err != nil {
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
c.downloadedAssets.Add(context.Background(), 1)
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
metrics.IncreaseSavedSessions()
|
||||
|
||||
if isCSS {
|
||||
if t.depth > 0 {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package heuristics
|
|||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
"openreplay/backend/internal/config/configurator"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
|
|
@ -19,5 +20,8 @@ type Config struct {
|
|||
func New() *Config {
|
||||
cfg := &Config{}
|
||||
configurator.Process(cfg)
|
||||
if cfg.UseProfiler {
|
||||
pprof.StartProfilingServer()
|
||||
}
|
||||
return cfg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,74 +0,0 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (mi *Saver) InsertMessage(msg Message) error {
|
||||
sessionID := msg.SessionID()
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *Metadata:
|
||||
if err := mi.pg.InsertMetadata(sessionID, m); err != nil {
|
||||
return fmt.Errorf("insert metadata err: %s", err)
|
||||
}
|
||||
return nil
|
||||
case *IssueEvent:
|
||||
return mi.pg.InsertIssueEvent(sessionID, m)
|
||||
//TODO: message adapter (transformer) (at the level of pkg/message) for types: *IOSMetadata, *IOSIssueEvent and others
|
||||
|
||||
// Web
|
||||
case *SessionStart:
|
||||
return mi.pg.HandleWebSessionStart(sessionID, m)
|
||||
case *SessionEnd:
|
||||
return mi.pg.HandleWebSessionEnd(sessionID, m)
|
||||
case *UserID:
|
||||
return mi.pg.InsertWebUserID(sessionID, m)
|
||||
case *UserAnonymousID:
|
||||
return mi.pg.InsertWebUserAnonymousID(sessionID, m)
|
||||
case *CustomEvent:
|
||||
return mi.pg.InsertWebCustomEvent(sessionID, m)
|
||||
case *ClickEvent:
|
||||
return mi.pg.InsertWebClickEvent(sessionID, m)
|
||||
case *InputEvent:
|
||||
return mi.pg.InsertWebInputEvent(sessionID, m)
|
||||
|
||||
// Unique Web messages
|
||||
case *PageEvent:
|
||||
return mi.pg.InsertWebPageEvent(sessionID, m)
|
||||
case *NetworkRequest:
|
||||
return mi.pg.InsertWebNetworkRequest(sessionID, m)
|
||||
case *GraphQL:
|
||||
return mi.pg.InsertWebGraphQL(sessionID, m)
|
||||
case *JSException:
|
||||
return mi.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
return mi.pg.InsertWebIntegrationEvent(m)
|
||||
|
||||
// IOS
|
||||
case *IOSSessionStart:
|
||||
return mi.pg.InsertIOSSessionStart(sessionID, m)
|
||||
case *IOSSessionEnd:
|
||||
return mi.pg.InsertIOSSessionEnd(sessionID, m)
|
||||
case *IOSUserID:
|
||||
return mi.pg.InsertIOSUserID(sessionID, m)
|
||||
case *IOSUserAnonymousID:
|
||||
return mi.pg.InsertIOSUserAnonymousID(sessionID, m)
|
||||
case *IOSCustomEvent:
|
||||
return mi.pg.InsertIOSCustomEvent(sessionID, m)
|
||||
case *IOSClickEvent:
|
||||
return mi.pg.InsertIOSClickEvent(sessionID, m)
|
||||
case *IOSInputEvent:
|
||||
return mi.pg.InsertIOSInputEvent(sessionID, m)
|
||||
// Unique IOS messages
|
||||
case *IOSNetworkCall:
|
||||
return mi.pg.InsertIOSNetworkCall(sessionID, m)
|
||||
case *IOSScreenEnter:
|
||||
return mi.pg.InsertIOSScreenEnter(sessionID, m)
|
||||
case *IOSCrash:
|
||||
return mi.pg.InsertIOSCrash(sessionID, m)
|
||||
|
||||
}
|
||||
return nil // "Not implemented"
|
||||
}
|
||||
19
backend/internal/db/datasaver/methods.go
Normal file
19
backend/internal/db/datasaver/methods.go
Normal file
|
|
@ -0,0 +1,19 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (s *saverImpl) init() {
|
||||
// noop
|
||||
}
|
||||
|
||||
func (s *saverImpl) handleExtraMessage(msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
case *PerformanceTrackAggr:
|
||||
return s.pg.InsertWebStatsPerformance(m)
|
||||
case *ResourceTiming:
|
||||
return s.pg.InsertWebStatsResourceEvent(m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,16 +1,126 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
"log"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/db/clickhouse"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
queue "openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type Saver struct {
|
||||
pg *cache.PGCache
|
||||
producer types.Producer
|
||||
type Saver interface {
|
||||
Handle(msg Message)
|
||||
Commit() error
|
||||
Close() error
|
||||
}
|
||||
|
||||
func New(pg *cache.PGCache, _ *db.Config) *Saver {
|
||||
return &Saver{pg: pg, producer: nil}
|
||||
type saverImpl struct {
|
||||
cfg *db.Config
|
||||
pg *cache.PGCache
|
||||
ch clickhouse.Connector
|
||||
producer queue.Producer
|
||||
}
|
||||
|
||||
func New(cfg *db.Config, pg *cache.PGCache) Saver {
|
||||
s := &saverImpl{cfg: cfg, pg: pg}
|
||||
s.init()
|
||||
return s
|
||||
}
|
||||
|
||||
func (s *saverImpl) Handle(msg Message) {
|
||||
if msg.TypeID() == MsgCustomEvent {
|
||||
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||
}
|
||||
if err := s.handleMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
if err := s.handleExtraMessage(msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %d, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (s *saverImpl) handleMessage(msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
case *Metadata:
|
||||
return s.pg.InsertMetadata(m)
|
||||
case *IssueEvent:
|
||||
return s.pg.InsertIssueEvent(m)
|
||||
case *SessionStart:
|
||||
return s.pg.HandleWebSessionStart(m)
|
||||
case *SessionEnd:
|
||||
return s.pg.HandleWebSessionEnd(m)
|
||||
case *UserID:
|
||||
return s.pg.InsertWebUserID(m)
|
||||
case *UserAnonymousID:
|
||||
return s.pg.InsertWebUserAnonymousID(m)
|
||||
case *CustomEvent:
|
||||
return s.pg.InsertWebCustomEvent(m)
|
||||
case *MouseClick:
|
||||
return s.pg.InsertWebClickEvent(m)
|
||||
case *InputEvent:
|
||||
return s.pg.InsertWebInputEvent(m)
|
||||
case *PageEvent:
|
||||
return s.pg.InsertWebPageEvent(m)
|
||||
case *NetworkRequest:
|
||||
return s.pg.InsertWebNetworkRequest(m)
|
||||
case *GraphQL:
|
||||
return s.pg.InsertWebGraphQL(m)
|
||||
case *JSException:
|
||||
return s.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
return s.pg.InsertWebIntegrationEvent(m)
|
||||
case *IOSSessionStart:
|
||||
return s.pg.InsertIOSSessionStart(m)
|
||||
case *IOSSessionEnd:
|
||||
return s.pg.InsertIOSSessionEnd(m)
|
||||
case *IOSUserID:
|
||||
return s.pg.InsertIOSUserID(m)
|
||||
case *IOSUserAnonymousID:
|
||||
return s.pg.InsertIOSUserAnonymousID(m)
|
||||
case *IOSCustomEvent:
|
||||
return s.pg.InsertIOSCustomEvent(m)
|
||||
case *IOSClickEvent:
|
||||
return s.pg.InsertIOSClickEvent(m)
|
||||
case *IOSInputEvent:
|
||||
return s.pg.InsertIOSInputEvent(m)
|
||||
case *IOSNetworkCall:
|
||||
return s.pg.InsertIOSNetworkCall(m)
|
||||
case *IOSScreenEnter:
|
||||
return s.pg.InsertIOSScreenEnter(m)
|
||||
case *IOSCrash:
|
||||
return s.pg.InsertIOSCrash(m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *saverImpl) Commit() error {
|
||||
if s.pg != nil {
|
||||
s.pg.Commit()
|
||||
}
|
||||
if s.ch != nil {
|
||||
s.ch.Commit()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *saverImpl) Close() error {
|
||||
if s.pg != nil {
|
||||
if err := s.pg.Close(); err != nil {
|
||||
log.Printf("pg.Close error: %s", err)
|
||||
}
|
||||
}
|
||||
if s.ch != nil {
|
||||
if err := s.ch.Stop(); err != nil {
|
||||
log.Printf("ch.Close error: %s", err)
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,29 +0,0 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (si *Saver) InitStats() {
|
||||
// noop
|
||||
}
|
||||
|
||||
func (si *Saver) InsertStats(session *Session, msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
// Web
|
||||
case *PerformanceTrackAggr:
|
||||
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
|
||||
case *ResourceEvent:
|
||||
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *Saver) CommitStats() error {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *Saver) Close() error {
|
||||
return nil
|
||||
}
|
||||
56
backend/internal/db/service.go
Normal file
56
backend/internal/db/service.go
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/internal/db/datasaver"
|
||||
"openreplay/backend/internal/service"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type dbImpl struct {
|
||||
cfg *db.Config
|
||||
consumer types.Consumer
|
||||
saver datasaver.Saver
|
||||
}
|
||||
|
||||
func New(cfg *db.Config, consumer types.Consumer, saver datasaver.Saver) service.Interface {
|
||||
s := &dbImpl{
|
||||
cfg: cfg,
|
||||
consumer: consumer,
|
||||
saver: saver,
|
||||
}
|
||||
go s.run()
|
||||
return s
|
||||
}
|
||||
|
||||
func (d *dbImpl) run() {
|
||||
commitTick := time.Tick(d.cfg.CommitBatchTimeout)
|
||||
for {
|
||||
select {
|
||||
case <-commitTick:
|
||||
d.commit()
|
||||
case msg := <-d.consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
if err := d.consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (d *dbImpl) commit() {
|
||||
d.saver.Commit()
|
||||
d.consumer.Commit()
|
||||
}
|
||||
|
||||
func (d *dbImpl) Stop() {
|
||||
d.commit()
|
||||
if err := d.saver.Close(); err != nil {
|
||||
log.Printf("saver.Close error: %s", err)
|
||||
}
|
||||
d.consumer.Close()
|
||||
}
|
||||
64
backend/internal/heuristics/service.go
Normal file
64
backend/internal/heuristics/service.go
Normal file
|
|
@ -0,0 +1,64 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/heuristics"
|
||||
"openreplay/backend/internal/service"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
)
|
||||
|
||||
type heuristicsImpl struct {
|
||||
cfg *heuristics.Config
|
||||
producer types.Producer
|
||||
consumer types.Consumer
|
||||
events sessions.EventBuilder
|
||||
}
|
||||
|
||||
func New(cfg *heuristics.Config, p types.Producer, c types.Consumer, e sessions.EventBuilder) service.Interface {
|
||||
s := &heuristicsImpl{
|
||||
cfg: cfg,
|
||||
producer: p,
|
||||
consumer: c,
|
||||
events: e,
|
||||
}
|
||||
go s.run()
|
||||
return s
|
||||
}
|
||||
|
||||
func (h *heuristicsImpl) run() {
|
||||
tick := time.Tick(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case evt := <-h.events.Events():
|
||||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
log.Printf("can't send new event to queue: %s", err)
|
||||
}
|
||||
case <-tick:
|
||||
h.producer.Flush(h.cfg.ProducerTimeout)
|
||||
h.consumer.Commit()
|
||||
case msg := <-h.consumer.Rebalanced():
|
||||
log.Println(msg)
|
||||
default:
|
||||
if err := h.consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (h *heuristicsImpl) Stop() {
|
||||
// Stop event builder and flush all events
|
||||
log.Println("stopping heuristics service")
|
||||
h.events.Stop()
|
||||
for evt := range h.events.Events() {
|
||||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
log.Printf("can't send new event to queue: %s", err)
|
||||
}
|
||||
}
|
||||
h.producer.Close(h.cfg.ProducerTimeout)
|
||||
h.consumer.Commit()
|
||||
h.consumer.Close()
|
||||
}
|
||||
|
|
@ -22,28 +22,28 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
req := &StartIOSSessionRequest{}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"), startTime, r.URL.Path, 0)
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -53,18 +53,18 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
if err != nil { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
|
@ -94,22 +94,24 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
})
|
||||
}, startTime, r.URL.Path, 0)
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
|
|
@ -117,16 +119,17 @@ func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Reque
|
|||
}
|
||||
|
||||
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
|
|
@ -134,21 +137,21 @@ func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
|
||||
err = r.ParseMultipartForm(1e6) // ~1Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err, startTime, r.URL.Path, 0)
|
||||
return
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"), startTime, r.URL.Path, 0)
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.MultipartForm.Value["projectKey"]) == 0 {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter?
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing"), startTime, r.URL.Path, 0) // status for missing/wrong parameter?
|
||||
return
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -3,18 +3,17 @@ package router
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/Masterminds/semver"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"io"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"github.com/Masterminds/semver"
|
||||
"openreplay/backend/internal/http/uuid"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
|
@ -28,13 +27,6 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
||||
reqSize := len(bodyBytes)
|
||||
e.requestSize.Record(
|
||||
r.Context(),
|
||||
float64(reqSize),
|
||||
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
|
||||
)
|
||||
return bodyBytes, nil
|
||||
}
|
||||
|
||||
|
|
@ -56,40 +48,43 @@ func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint
|
|||
|
||||
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &StartSessionRequest{}
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached"))
|
||||
ResponseWithError(w, http.StatusNotFound,
|
||||
errors.New("project doesn't exist or capture limit has been reached"), startTime, r.URL.Path, bodySize)
|
||||
} else {
|
||||
log.Printf("can't get project by key: %s", err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"))
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"), startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
@ -99,19 +94,19 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
if err != nil || req.Reset { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
startTimeMili := startTime.UnixMilli()
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
|
|
@ -163,29 +158,33 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
BeaconSizeLimit: e.getBeaconSize(tokenData.ID),
|
||||
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
|
||||
Delay: tokenData.Delay,
|
||||
})
|
||||
}, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
ResponseWithError(w, http.StatusUnauthorized, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.getBeaconSize(sessionData.ID))
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Send processed messages to queue as array of bytes
|
||||
// TODO: check bytes for nonsense crap
|
||||
|
|
@ -194,39 +193,43 @@ func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
log.Printf("can't send processed messages to queue: %s", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
||||
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
bodySize := 0
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
|
||||
if err != nil {
|
||||
log.Printf("error while reading request body: %s", err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
|
||||
ResponseWithError(w, http.StatusRequestEntityTooLarge, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
bodySize = len(bodyBytes)
|
||||
|
||||
// Parse request body
|
||||
req := &NotStartedRequest{}
|
||||
|
||||
if err := json.Unmarshal(bodyBytes, req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
ResponseWithError(w, http.StatusBadRequest, err, startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"), startTime, r.URL.Path, bodySize)
|
||||
return
|
||||
}
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
|
@ -248,5 +251,5 @@ func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
|||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
ResponseOK(w, startTime, r.URL.Path, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -6,9 +6,11 @@ import (
|
|||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
start := time.Now()
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
|
|
@ -21,7 +23,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
|
||||
reader, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: stage-dependent response
|
||||
return
|
||||
}
|
||||
//log.Println("Gzip reader init", reader)
|
||||
|
|
@ -32,7 +34,7 @@ func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID
|
|||
//log.Println("Reader after switch:", reader)
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err, start, r.URL.Path, 0) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
|
||||
|
|
|
|||
|
|
@ -4,21 +4,44 @@ import (
|
|||
"encoding/json"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
|
||||
metrics "openreplay/backend/pkg/metrics/http"
|
||||
)
|
||||
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}) {
|
||||
func recordMetrics(requestStart time.Time, url string, code, bodySize int) {
|
||||
if bodySize > 0 {
|
||||
metrics.RecordRequestSize(float64(bodySize), url, code)
|
||||
}
|
||||
metrics.IncreaseTotalRequests()
|
||||
metrics.RecordRequestDuration(float64(time.Now().Sub(requestStart).Milliseconds()), url, code)
|
||||
}
|
||||
|
||||
func ResponseOK(w http.ResponseWriter, requestStart time.Time, url string, bodySize int) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
w.Header().Set("Content-Type", "application/json")
|
||||
w.Write(body)
|
||||
recordMetrics(requestStart, url, http.StatusOK, bodySize)
|
||||
}
|
||||
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error) {
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error, requestStart time.Time, url string, bodySize int) {
|
||||
body, err := json.Marshal(&response{err.Error()})
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
ResponseWithJSON(w, &response{err.Error()})
|
||||
w.Write(body)
|
||||
recordMetrics(requestStart, url, code, bodySize)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,19 +1,16 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"github.com/gorilla/mux"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"net/http"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"github.com/gorilla/mux"
|
||||
http3 "openreplay/backend/internal/config/http"
|
||||
http2 "openreplay/backend/internal/http/services"
|
||||
"openreplay/backend/internal/http/util"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type BeaconSize struct {
|
||||
|
|
@ -25,21 +22,16 @@ type Router struct {
|
|||
router *mux.Router
|
||||
cfg *http3.Config
|
||||
services *http2.ServicesBuilder
|
||||
requestSize syncfloat64.Histogram
|
||||
requestDuration syncfloat64.Histogram
|
||||
totalRequests syncfloat64.Counter
|
||||
mutex *sync.RWMutex
|
||||
beaconSizeCache map[uint64]*BeaconSize // Cache for session's beaconSize
|
||||
}
|
||||
|
||||
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) {
|
||||
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder) (*Router, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case services == nil:
|
||||
return nil, fmt.Errorf("services is empty")
|
||||
case metrics == nil:
|
||||
return nil, fmt.Errorf("metrics is empty")
|
||||
}
|
||||
e := &Router{
|
||||
cfg: cfg,
|
||||
|
|
@ -47,7 +39,6 @@ func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *moni
|
|||
mutex: &sync.RWMutex{},
|
||||
beaconSizeCache: make(map[uint64]*BeaconSize),
|
||||
}
|
||||
e.initMetrics(metrics)
|
||||
e.init()
|
||||
go e.clearBeaconSizes()
|
||||
return e, nil
|
||||
|
|
@ -115,22 +106,6 @@ func (e *Router) init() {
|
|||
e.router.Use(e.corsMiddleware)
|
||||
}
|
||||
|
||||
func (e *Router) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
e.requestSize, err = metrics.RegisterHistogram("requests_body_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_body_size metric: %s", err)
|
||||
}
|
||||
e.requestDuration, err = metrics.RegisterHistogram("requests_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_duration metric: %s", err)
|
||||
}
|
||||
e.totalRequests, err = metrics.RegisterCounter("requests_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create requests_total metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
@ -149,17 +124,8 @@ func (e *Router) corsMiddleware(next http.Handler) http.Handler {
|
|||
|
||||
log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path))
|
||||
|
||||
requestStart := time.Now()
|
||||
|
||||
// Serve request
|
||||
next.ServeHTTP(w, r)
|
||||
|
||||
metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
|
||||
e.totalRequests.Add(metricsContext, 1)
|
||||
e.requestDuration.Record(metricsContext,
|
||||
float64(time.Now().Sub(requestStart).Milliseconds()),
|
||||
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
|
||||
)
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
5
backend/internal/service/service.go
Normal file
5
backend/internal/service/service.go
Normal file
|
|
@ -0,0 +1,5 @@
|
|||
package service
|
||||
|
||||
type Interface interface {
|
||||
Stop()
|
||||
}
|
||||
|
|
@ -1,54 +1,36 @@
|
|||
package sessionender
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics/ender"
|
||||
)
|
||||
|
||||
// EndedSessionHandler handler for ended sessions
|
||||
type EndedSessionHandler func(sessionID uint64, timestamp int64) bool
|
||||
type EndedSessionHandler func(sessionID uint64, timestamp uint64) bool
|
||||
|
||||
// session holds information about user's session live status
|
||||
type session struct {
|
||||
lastTimestamp int64
|
||||
lastUpdate int64
|
||||
lastUserTime int64
|
||||
lastUserTime uint64
|
||||
isEnded bool
|
||||
}
|
||||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
type SessionEnder struct {
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
totalSessions syncfloat64.Counter
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
activeSessions, err := metrics.RegisterUpDownCounter("sessions_active")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't register session.active metric: %s", err)
|
||||
}
|
||||
totalSessions, err := metrics.RegisterCounter("sessions_total")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't register session.total metric: %s", err)
|
||||
}
|
||||
|
||||
func New(timeout int64, parts int) (*SessionEnder, error) {
|
||||
return &SessionEnder{
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
activeSessions: activeSessions,
|
||||
totalSessions: totalSessions,
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -74,8 +56,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
isEnded: false,
|
||||
}
|
||||
se.activeSessions.Add(context.Background(), 1)
|
||||
se.totalSessions.Add(context.Background(), 1)
|
||||
ender.IncreaseActiveSessions()
|
||||
ender.IncreaseTotalSessions()
|
||||
return
|
||||
}
|
||||
// Keep the highest user's timestamp for correct session duration value
|
||||
|
|
@ -100,7 +82,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
|||
sess.isEnded = true
|
||||
if handler(sessID, sess.lastUserTime) {
|
||||
delete(se.sessions, sessID)
|
||||
se.activeSessions.Add(context.Background(), -1)
|
||||
ender.DecreaseActiveSessions()
|
||||
ender.IncreaseClosedSessions()
|
||||
removedSessions++
|
||||
} else {
|
||||
log.Printf("sessID: %d, userTime: %d", sessID, sess.lastUserTime)
|
||||
|
|
|
|||
|
|
@ -1,20 +1,19 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
type CachedAsset struct {
|
||||
|
|
@ -23,52 +22,21 @@ type CachedAsset struct {
|
|||
}
|
||||
|
||||
type AssetsCache struct {
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
totalAssets syncfloat64.Counter
|
||||
cachedAssets syncfloat64.Counter
|
||||
skippedAssets syncfloat64.Counter
|
||||
assetSize syncfloat64.Histogram
|
||||
assetDuration syncfloat64.Histogram
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache {
|
||||
// Assets metrics
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cachedAssets, err := metrics.RegisterCounter("assets_cached")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_cached metric: %s", err)
|
||||
}
|
||||
skippedAssets, err := metrics.RegisterCounter("assets_skipped")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_skipped metric: %s", err)
|
||||
}
|
||||
assetSize, err := metrics.RegisterHistogram("asset_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_size metric: %s", err)
|
||||
}
|
||||
assetDuration, err := metrics.RegisterHistogram("asset_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_duration metric: %s", err)
|
||||
}
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
assetsCache := &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
totalAssets: totalAssets,
|
||||
cachedAssets: cachedAssets,
|
||||
skippedAssets: skippedAssets,
|
||||
assetSize: assetSize,
|
||||
assetDuration: assetDuration,
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
}
|
||||
// Parse black list for cache layer
|
||||
if len(cfg.CacheBlackList) > 0 {
|
||||
|
|
@ -84,7 +52,7 @@ func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, m
|
|||
}
|
||||
|
||||
func (e *AssetsCache) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 30)
|
||||
cleanTick := time.Tick(time.Minute * 3)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
|
|
@ -105,6 +73,7 @@ func (e *AssetsCache) clearCache() {
|
|||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
metrics.DecreaseCachedAssets()
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
|
|
@ -232,8 +201,7 @@ func parseHost(baseURL string) (string, error) {
|
|||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
ctx := context.Background()
|
||||
e.totalAssets.Add(ctx, 1)
|
||||
metrics.IncreaseTotalAssets()
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
|
|
@ -255,7 +223,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
e.skippedAssets.Add(ctx, 1)
|
||||
metrics.IncreaseSkippedAssets()
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
|
|
@ -267,8 +235,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
e.assetSize.Record(ctx, float64(len(res)))
|
||||
e.assetDuration.Record(ctx, float64(duration))
|
||||
metrics.RecordAssetSize(float64(len(res)))
|
||||
metrics.RecordProcessAssetDuration(float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
|
|
@ -277,7 +245,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
e.cachedAssets.Add(ctx, 1)
|
||||
metrics.IncreaseCachedAssets()
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -2,20 +2,20 @@ package storage
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"fmt"
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
"strconv"
|
||||
"strings"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/storage"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
type FileType string
|
||||
|
|
@ -25,6 +25,13 @@ const (
|
|||
DEV FileType = "/devtools.mob"
|
||||
)
|
||||
|
||||
func (t FileType) String() string {
|
||||
if t == DOM {
|
||||
return "dom"
|
||||
}
|
||||
return "devtools"
|
||||
}
|
||||
|
||||
type Task struct {
|
||||
id string
|
||||
doms *bytes.Buffer
|
||||
|
|
@ -36,92 +43,23 @@ type Storage struct {
|
|||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDEVSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingDEVTime syncfloat64.Histogram
|
||||
sortingDOMTime syncfloat64.Histogram
|
||||
sortingDEVTime syncfloat64.Histogram
|
||||
archivingDOMTime syncfloat64.Histogram
|
||||
archivingDEVTime syncfloat64.Histogram
|
||||
uploadingDOMTime syncfloat64.Histogram
|
||||
uploadingDEVTime syncfloat64.Histogram
|
||||
|
||||
tasks chan *Task
|
||||
ready chan struct{}
|
||||
tasks chan *Task
|
||||
ready chan struct{}
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) {
|
||||
func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case s3 == nil:
|
||||
return nil, fmt.Errorf("s3 storage is empty")
|
||||
}
|
||||
// Create metrics
|
||||
totalSessions, err := metrics.RegisterCounter("sessions_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_total metric: %s", err)
|
||||
}
|
||||
sessionDOMSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create session_size metric: %s", err)
|
||||
}
|
||||
sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_dt_size metric: %s", err)
|
||||
}
|
||||
readingDOMTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
readingDEVTime, err := metrics.RegisterHistogram("reading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
sortingDOMTime, err := metrics.RegisterHistogram("sorting_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
sortingDEVTime, err := metrics.RegisterHistogram("sorting_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
}
|
||||
archivingDOMTime, err := metrics.RegisterHistogram("archiving_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
archivingDEVTime, err := metrics.RegisterHistogram("archiving_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
uploadingDOMTime, err := metrics.RegisterHistogram("uploading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
uploadingDEVTime, err := metrics.RegisterHistogram("uploading_dt_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create uploading_duration metric: %s", err)
|
||||
}
|
||||
newStorage := &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDEVSize: sessionDevtoolsSize,
|
||||
readingDOMTime: readingDOMTime,
|
||||
readingDEVTime: readingDEVTime,
|
||||
sortingDOMTime: sortingDOMTime,
|
||||
sortingDEVTime: sortingDEVTime,
|
||||
archivingDOMTime: archivingDOMTime,
|
||||
archivingDEVTime: archivingDEVTime,
|
||||
uploadingDOMTime: uploadingDOMTime,
|
||||
uploadingDEVTime: uploadingDEVTime,
|
||||
tasks: make(chan *Task, 1),
|
||||
ready: make(chan struct{}),
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
tasks: make(chan *Task, 1),
|
||||
ready: make(chan struct{}),
|
||||
}
|
||||
go newStorage.worker()
|
||||
return newStorage, nil
|
||||
|
|
@ -187,11 +125,7 @@ func (s *Storage) openSession(filePath string, tp FileType) ([]byte, error) {
|
|||
if err != nil {
|
||||
return nil, fmt.Errorf("can't sort session, err: %s", err)
|
||||
}
|
||||
if tp == DOM {
|
||||
s.sortingDOMTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
} else {
|
||||
s.sortingDEVTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return res, nil
|
||||
}
|
||||
|
||||
|
|
@ -215,26 +149,19 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
if err != nil {
|
||||
return err
|
||||
}
|
||||
durRead := time.Now().Sub(startRead).Milliseconds()
|
||||
// Send metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
if tp == DOM {
|
||||
s.sessionDOMSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDOMTime.Record(ctx, float64(durRead))
|
||||
} else {
|
||||
s.sessionDEVSize.Record(ctx, float64(len(mob)))
|
||||
s.readingDEVTime.Record(ctx, float64(durRead))
|
||||
}
|
||||
metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
|
||||
// Encode and compress session
|
||||
if tp == DEV {
|
||||
startCompress := time.Now()
|
||||
start := time.Now()
|
||||
task.dev = s.compressSession(mob)
|
||||
s.archivingDEVTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
} else {
|
||||
if len(mob) <= s.cfg.FileSplitSize {
|
||||
startCompress := time.Now()
|
||||
start := time.Now()
|
||||
task.doms = s.compressSession(mob)
|
||||
s.archivingDOMTime.Record(ctx, float64(time.Now().Sub(startCompress).Milliseconds()))
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return nil
|
||||
}
|
||||
wg := &sync.WaitGroup{}
|
||||
|
|
@ -253,7 +180,7 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
s.archivingDOMTime.Record(ctx, float64(firstPart+secondPart))
|
||||
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -324,11 +251,9 @@ func (s *Storage) uploadSession(task *Task) {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
// Record metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
s.uploadingDOMTime.Record(ctx, float64(uploadDoms+uploadDome))
|
||||
s.uploadingDEVTime.Record(ctx, float64(uploadDev))
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
metrics.IncreaseStorageTotalSessions()
|
||||
}
|
||||
|
||||
func (s *Storage) worker() {
|
||||
|
|
|
|||
6
backend/pkg/db/cache/messages-common.go
vendored
6
backend/pkg/db/cache/messages-common.go
vendored
|
|
@ -21,7 +21,8 @@ func (c *PGCache) HandleSessionEnd(sessionID uint64) error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
||||
func (c *PGCache) InsertIssueEvent(crash *IssueEvent) error {
|
||||
sessionID := crash.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -29,7 +30,8 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
|||
return c.Conn.InsertIssueEvent(sessionID, session.ProjectID, crash)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
func (c *PGCache) InsertMetadata(metadata *Metadata) error {
|
||||
sessionID := metadata.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
30
backend/pkg/db/cache/messages-ios.go
vendored
30
backend/pkg/db/cache/messages-ios.go
vendored
|
|
@ -6,7 +6,8 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) error {
|
||||
func (c *PGCache) InsertIOSSessionStart(s *IOSSessionStart) error {
|
||||
sessionID := s.SessionID()
|
||||
if c.Cache.HasSession(sessionID) {
|
||||
return fmt.Errorf("session %d already in cache", sessionID)
|
||||
}
|
||||
|
|
@ -33,13 +34,15 @@ func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) er
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSSessionEnd(sessionID uint64, e *IOSSessionEnd) error {
|
||||
func (c *PGCache) InsertIOSSessionEnd(e *IOSSessionEnd) error {
|
||||
sessionID := e.SessionID()
|
||||
_, err := c.InsertSessionEnd(sessionID, e.Timestamp)
|
||||
return err
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSScreenEnter(sessionID uint64, screenEnter *IOSScreenEnter) error {
|
||||
if err := c.Conn.InsertIOSScreenEnter(sessionID, screenEnter); err != nil {
|
||||
func (c *PGCache) InsertIOSScreenEnter(screenEnter *IOSScreenEnter) error {
|
||||
sessionID := screenEnter.SessionID()
|
||||
if err := c.Conn.InsertIOSScreenEnter(screenEnter); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
|
|
@ -50,8 +53,9 @@ func (c *PGCache) InsertIOSScreenEnter(sessionID uint64, screenEnter *IOSScreenE
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSClickEvent(sessionID uint64, clickEvent *IOSClickEvent) error {
|
||||
if err := c.Conn.InsertIOSClickEvent(sessionID, clickEvent); err != nil {
|
||||
func (c *PGCache) InsertIOSClickEvent(clickEvent *IOSClickEvent) error {
|
||||
sessionID := clickEvent.SessionID()
|
||||
if err := c.Conn.InsertIOSClickEvent(clickEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
|
|
@ -62,8 +66,9 @@ func (c *PGCache) InsertIOSClickEvent(sessionID uint64, clickEvent *IOSClickEven
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEvent) error {
|
||||
if err := c.Conn.InsertIOSInputEvent(sessionID, inputEvent); err != nil {
|
||||
func (c *PGCache) InsertIOSInputEvent(inputEvent *IOSInputEvent) error {
|
||||
sessionID := inputEvent.SessionID()
|
||||
if err := c.Conn.InsertIOSInputEvent(inputEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
|
|
@ -74,18 +79,15 @@ func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEven
|
|||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSCrash(sessionID uint64, crash *IOSCrash) error {
|
||||
func (c *PGCache) InsertIOSCrash(crash *IOSCrash) error {
|
||||
sessionID := crash.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if err := c.Conn.InsertIOSCrash(sessionID, session.ProjectID, crash); err != nil {
|
||||
if err := c.Conn.InsertIOSCrash(session.ProjectID, crash); err != nil {
|
||||
return err
|
||||
}
|
||||
session.ErrorsCount += 1
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIOSIssueEvent(sessionID uint64, issueEvent *IOSIssueEvent) error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
30
backend/pkg/db/cache/messages-web.go
vendored
30
backend/pkg/db/cache/messages-web.go
vendored
|
|
@ -30,7 +30,8 @@ func (c *PGCache) InsertWebSessionStart(sessionID uint64, s *SessionStart) error
|
|||
})
|
||||
}
|
||||
|
||||
func (c *PGCache) HandleWebSessionStart(sessionID uint64, s *SessionStart) error {
|
||||
func (c *PGCache) HandleWebSessionStart(s *SessionStart) error {
|
||||
sessionID := s.SessionID()
|
||||
if c.Cache.HasSession(sessionID) {
|
||||
return fmt.Errorf("session %d already in cache", sessionID)
|
||||
}
|
||||
|
|
@ -69,7 +70,8 @@ func (c *PGCache) InsertWebSessionEnd(sessionID uint64, e *SessionEnd) error {
|
|||
return err
|
||||
}
|
||||
|
||||
func (c *PGCache) HandleWebSessionEnd(sessionID uint64, e *SessionEnd) error {
|
||||
func (c *PGCache) HandleWebSessionEnd(e *SessionEnd) error {
|
||||
sessionID := e.SessionID()
|
||||
return c.HandleSessionEnd(sessionID)
|
||||
}
|
||||
|
||||
|
|
@ -99,7 +101,8 @@ func (c *PGCache) InsertSessionReferrer(sessionID uint64, referrer string) error
|
|||
return c.Conn.InsertSessionReferrer(sessionID, referrer)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebNetworkRequest(sessionID uint64, e *NetworkRequest) error {
|
||||
func (c *PGCache) InsertWebNetworkRequest(e *NetworkRequest) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -111,7 +114,8 @@ func (c *PGCache) InsertWebNetworkRequest(sessionID uint64, e *NetworkRequest) e
|
|||
return c.Conn.InsertWebNetworkRequest(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebGraphQL(sessionID uint64, e *GraphQL) error {
|
||||
func (c *PGCache) InsertWebGraphQL(e *GraphQL) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -123,7 +127,8 @@ func (c *PGCache) InsertWebGraphQL(sessionID uint64, e *GraphQL) error {
|
|||
return c.Conn.InsertWebGraphQL(sessionID, session.ProjectID, project.SaveRequestPayloads, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
||||
func (c *PGCache) InsertWebCustomEvent(e *CustomEvent) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -131,7 +136,8 @@ func (c *PGCache) InsertWebCustomEvent(sessionID uint64, e *CustomEvent) error {
|
|||
return c.Conn.InsertWebCustomEvent(sessionID, session.ProjectID, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebUserID(sessionID uint64, userID *UserID) error {
|
||||
func (c *PGCache) InsertWebUserID(userID *UserID) error {
|
||||
sessionID := userID.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -139,7 +145,8 @@ func (c *PGCache) InsertWebUserID(sessionID uint64, userID *UserID) error {
|
|||
return c.Conn.InsertWebUserID(sessionID, session.ProjectID, userID)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *UserAnonymousID) error {
|
||||
func (c *PGCache) InsertWebUserAnonymousID(userAnonymousID *UserAnonymousID) error {
|
||||
sessionID := userAnonymousID.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -147,7 +154,8 @@ func (c *PGCache) InsertWebUserAnonymousID(sessionID uint64, userAnonymousID *Us
|
|||
return c.Conn.InsertWebUserAnonymousID(sessionID, session.ProjectID, userAnonymousID)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
||||
func (c *PGCache) InsertWebPageEvent(e *PageEvent) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -155,7 +163,8 @@ func (c *PGCache) InsertWebPageEvent(sessionID uint64, e *PageEvent) error {
|
|||
return c.Conn.InsertWebPageEvent(sessionID, session.ProjectID, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
||||
func (c *PGCache) InsertWebClickEvent(e *MouseClick) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -163,7 +172,8 @@ func (c *PGCache) InsertWebClickEvent(sessionID uint64, e *ClickEvent) error {
|
|||
return c.Conn.InsertWebClickEvent(sessionID, session.ProjectID, e)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertWebInputEvent(sessionID uint64, e *InputEvent) error {
|
||||
func (c *PGCache) InsertWebInputEvent(e *InputEvent) error {
|
||||
sessionID := e.SessionID()
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
24
backend/pkg/db/clickhouse/connector.go
Normal file
24
backend/pkg/db/clickhouse/connector.go
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
package clickhouse
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type Connector interface {
|
||||
Prepare() error
|
||||
Commit() error
|
||||
Stop() error
|
||||
InsertWebSession(session *types.Session) error
|
||||
InsertWebResourceEvent(session *types.Session, msg *messages.ResourceTiming) error
|
||||
InsertWebPageEvent(session *types.Session, msg *messages.PageEvent) error
|
||||
InsertWebClickEvent(session *types.Session, msg *messages.MouseClick) error
|
||||
InsertWebInputEvent(session *types.Session, msg *messages.InputEvent) error
|
||||
InsertWebErrorEvent(session *types.Session, msg *types.ErrorEvent) error
|
||||
InsertWebPerformanceTrackAggr(session *types.Session, msg *messages.PerformanceTrackAggr) error
|
||||
InsertAutocomplete(session *types.Session, msgType, msgValue string) error
|
||||
InsertRequest(session *types.Session, msg *messages.NetworkRequest, savePayload bool) error
|
||||
InsertCustom(session *types.Session, msg *messages.CustomEvent) error
|
||||
InsertGraphQL(session *types.Session, msg *messages.GraphQL) error
|
||||
InsertIssue(session *types.Session, msg *messages.IssueEvent) error
|
||||
}
|
||||
|
|
@ -1,14 +1,13 @@
|
|||
package postgres
|
||||
|
||||
import (
|
||||
"context"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
)
|
||||
|
||||
type batchItem struct {
|
||||
|
|
@ -78,21 +77,17 @@ func NewBatchesTask(size int) *batchesTask {
|
|||
}
|
||||
|
||||
type BatchSet struct {
|
||||
c Pool
|
||||
batches map[uint64]*SessionBatch
|
||||
batchQueueLimit int
|
||||
batchSizeLimit int
|
||||
batchSizeBytes syncfloat64.Histogram
|
||||
batchSizeLines syncfloat64.Histogram
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
updates map[uint64]*sessionUpdates
|
||||
workerTask chan *batchesTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
c Pool
|
||||
batches map[uint64]*SessionBatch
|
||||
batchQueueLimit int
|
||||
batchSizeLimit int
|
||||
updates map[uint64]*sessionUpdates
|
||||
workerTask chan *batchesTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *BatchSet {
|
||||
func NewBatchSet(c Pool, queueLimit, sizeLimit int) *BatchSet {
|
||||
bs := &BatchSet{
|
||||
c: c,
|
||||
batches: make(map[uint64]*SessionBatch),
|
||||
|
|
@ -103,31 +98,10 @@ func NewBatchSet(c Pool, queueLimit, sizeLimit int, metrics *monitoring.Metrics)
|
|||
finished: make(chan struct{}),
|
||||
updates: make(map[uint64]*sessionUpdates),
|
||||
}
|
||||
bs.initMetrics(metrics)
|
||||
go bs.worker()
|
||||
return bs
|
||||
}
|
||||
|
||||
func (conn *BatchSet) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeBytes metric: %s", err)
|
||||
}
|
||||
conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeLines metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestTime metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestNumber metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *BatchSet) getBatch(sessionID uint64) *SessionBatch {
|
||||
sessionID = sessionID % 10
|
||||
if _, ok := conn.batches[sessionID]; !ok {
|
||||
|
|
@ -194,11 +168,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
// Append session update sql request to the end of batch
|
||||
batch.Prepare()
|
||||
// Record batch size in bytes and number of lines
|
||||
conn.batchSizeBytes.Record(context.Background(), float64(batch.Size()))
|
||||
conn.batchSizeLines.Record(context.Background(), float64(batch.Len()))
|
||||
database.RecordBatchSize(float64(batch.Size()))
|
||||
database.RecordBatchElements(float64(batch.Len()))
|
||||
|
||||
start := time.Now()
|
||||
isFailed := false
|
||||
|
||||
// Send batch to db and execute
|
||||
br := conn.c.SendBatch(batch.batch)
|
||||
|
|
@ -209,15 +182,10 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
failedSql := batch.items[i]
|
||||
query := strings.ReplaceAll(failedSql.query, "\n", " ")
|
||||
log.Println("failed sql req:", query, failedSql.arguments)
|
||||
isFailed = true
|
||||
}
|
||||
}
|
||||
br.Close() // returns err
|
||||
dur := time.Now().Sub(start).Milliseconds()
|
||||
conn.sqlRequestTime.Record(context.Background(), float64(dur),
|
||||
attribute.String("method", "batch"), attribute.Bool("failed", isFailed))
|
||||
conn.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "batch"), attribute.Bool("failed", isFailed))
|
||||
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
@ -225,9 +193,7 @@ func (conn *BatchSet) worker() {
|
|||
for {
|
||||
select {
|
||||
case t := <-conn.workerTask:
|
||||
start := time.Now()
|
||||
conn.sendBatches(t)
|
||||
log.Printf("pg batches dur: %d", time.Now().Sub(start).Milliseconds())
|
||||
case <-conn.done:
|
||||
if len(conn.workerTask) > 0 {
|
||||
for t := range conn.workerTask {
|
||||
|
|
|
|||
|
|
@ -2,13 +2,9 @@ package postgres
|
|||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
)
|
||||
|
||||
|
|
@ -25,15 +21,13 @@ type Bulk interface {
|
|||
}
|
||||
|
||||
type bulkImpl struct {
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
bulkSize syncfloat64.Histogram
|
||||
bulkDuration syncfloat64.Histogram
|
||||
conn Pool
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
setSize int
|
||||
sizeLimit int
|
||||
values []interface{}
|
||||
}
|
||||
|
||||
func (b *bulkImpl) Append(args ...interface{}) error {
|
||||
|
|
@ -79,18 +73,15 @@ func (b *bulkImpl) send() error {
|
|||
return fmt.Errorf("send bulk err: %s", err)
|
||||
}
|
||||
// Save bulk metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
b.bulkDuration.Record(ctx, float64(time.Now().Sub(start).Milliseconds()), attribute.String("table", b.table))
|
||||
b.bulkSize.Record(ctx, float64(size), attribute.String("table", b.table))
|
||||
database.RecordBulkElements(float64(size), "pg", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
func NewBulk(conn Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("db conn is empty")
|
||||
case metrics == nil:
|
||||
return nil, errors.New("metrics is empty")
|
||||
case table == "":
|
||||
return nil, errors.New("table is empty")
|
||||
case columns == "":
|
||||
|
|
@ -102,23 +93,13 @@ func NewBulk(conn Pool, metrics *monitoring.Metrics, table, columns, template st
|
|||
case sizeLimit <= 0:
|
||||
return nil, errors.New("size limit is wrong")
|
||||
}
|
||||
messagesInBulk, err := metrics.RegisterHistogram("messages_in_bulk")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
bulkInsertDuration, err := metrics.RegisterHistogram("bulk_insert_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
bulkSize: messagesInBulk,
|
||||
bulkDuration: bulkInsertDuration,
|
||||
conn: conn,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
setSize: setSize,
|
||||
sizeLimit: sizeLimit,
|
||||
values: make([]interface{}, 0, setSize*sizeLimit),
|
||||
}, nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,8 +2,6 @@ package postgres
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
||||
type bulksTask struct {
|
||||
|
|
@ -30,16 +28,14 @@ type BulkSet struct {
|
|||
webCustomEvents Bulk
|
||||
webClickEvents Bulk
|
||||
webNetworkRequest Bulk
|
||||
metrics *monitoring.Metrics
|
||||
workerTask chan *bulksTask
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBulkSet(c Pool, metrics *monitoring.Metrics) *BulkSet {
|
||||
func NewBulkSet(c Pool) *BulkSet {
|
||||
bs := &BulkSet{
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
workerTask: make(chan *bulksTask, 1),
|
||||
done: make(chan struct{}),
|
||||
finished: make(chan struct{}),
|
||||
|
|
@ -86,7 +82,7 @@ func (conn *BulkSet) Get(name string) Bulk {
|
|||
|
||||
func (conn *BulkSet) initBulks() {
|
||||
var err error
|
||||
conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.autocompletes, err = NewBulk(conn.c,
|
||||
"autocomplete",
|
||||
"(value, type, project_id)",
|
||||
"($%d, $%d, $%d)",
|
||||
|
|
@ -94,7 +90,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||
|
|
@ -102,7 +98,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
"(session_id, timestamp, seq_index, name, payload)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||
|
|
@ -110,7 +106,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
"events.pages",
|
||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||
|
|
@ -122,7 +118,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webInputEvents, err = NewBulk(conn.c,
|
||||
"events.inputs",
|
||||
"(session_id, message_id, timestamp, value, label)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), NULLIF(LEFT($%d, 2000),''))",
|
||||
|
|
@ -130,7 +126,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webGraphQL, err = NewBulk(conn.c,
|
||||
"events.graphql",
|
||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -138,7 +134,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrors, err = NewBulk(conn.c,
|
||||
"errors",
|
||||
"(error_id, project_id, source, name, message, payload)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
|
|
@ -146,7 +142,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||
"events.errors",
|
||||
"(session_id, message_id, timestamp, error_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -154,7 +150,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webErrorTags, err = NewBulk(conn.c,
|
||||
"public.errors_tags",
|
||||
"(session_id, message_id, error_id, key, value)",
|
||||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
|
|
@ -162,7 +158,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webIssues, err = NewBulk(conn.c,
|
||||
"issues",
|
||||
"(project_id, issue_id, type, context_string)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
|
|
@ -170,7 +166,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||
"events_common.issues",
|
||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
|
|
@ -178,7 +174,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||
"events_common.customs",
|
||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
|
|
@ -186,7 +182,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webClickEvents, err = NewBulk(conn.c,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000))",
|
||||
|
|
@ -194,7 +190,7 @@ func (conn *BulkSet) initBulks() {
|
|||
if err != nil {
|
||||
log.Fatalf("can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d)",
|
||||
|
|
@ -246,9 +242,7 @@ func (conn *BulkSet) worker() {
|
|||
for {
|
||||
select {
|
||||
case t := <-conn.workerTask:
|
||||
start := time.Now()
|
||||
conn.sendBulks(t)
|
||||
log.Printf("pg bulks dur: %d", time.Now().Sub(start).Milliseconds())
|
||||
case <-conn.done:
|
||||
if len(conn.workerTask) > 0 {
|
||||
for t := range conn.workerTask {
|
||||
|
|
|
|||
|
|
@ -2,11 +2,10 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
)
|
||||
|
||||
type CH interface {
|
||||
|
|
@ -15,36 +14,28 @@ type CH interface {
|
|||
|
||||
// Conn contains batches, bulks and cache for all sessions
|
||||
type Conn struct {
|
||||
c Pool
|
||||
batches *BatchSet
|
||||
bulks *BulkSet
|
||||
batchSizeBytes syncfloat64.Histogram
|
||||
batchSizeLines syncfloat64.Histogram
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
chConn CH
|
||||
c Pool
|
||||
batches *BatchSet
|
||||
bulks *BulkSet
|
||||
chConn CH // hack for autocomplete inserts, TODO: rewrite
|
||||
}
|
||||
|
||||
func (conn *Conn) SetClickHouse(ch CH) {
|
||||
conn.chConn = ch
|
||||
}
|
||||
|
||||
func NewConn(url string, queueLimit, sizeLimit int, metrics *monitoring.Metrics) *Conn {
|
||||
if metrics == nil {
|
||||
log.Fatalf("metrics is nil")
|
||||
}
|
||||
func NewConn(url string, queueLimit, sizeLimit int) *Conn {
|
||||
c, err := pgxpool.Connect(context.Background(), url)
|
||||
if err != nil {
|
||||
log.Fatalf("pgxpool.Connect err: %s", err)
|
||||
}
|
||||
conn := &Conn{}
|
||||
conn.initMetrics(metrics)
|
||||
conn.c, err = NewPool(c, conn.sqlRequestTime, conn.sqlRequestCounter)
|
||||
conn.c, err = NewPool(c)
|
||||
if err != nil {
|
||||
log.Fatalf("can't create new pool wrapper: %s", err)
|
||||
}
|
||||
conn.bulks = NewBulkSet(conn.c, metrics)
|
||||
conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit, metrics)
|
||||
conn.bulks = NewBulkSet(conn.c)
|
||||
conn.batches = NewBatchSet(conn.c, queueLimit, sizeLimit)
|
||||
return conn
|
||||
}
|
||||
|
||||
|
|
@ -55,26 +46,6 @@ func (conn *Conn) Close() error {
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) initMetrics(metrics *monitoring.Metrics) {
|
||||
var err error
|
||||
conn.batchSizeBytes, err = metrics.RegisterHistogram("batch_size_bytes")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeBytes metric: %s", err)
|
||||
}
|
||||
conn.batchSizeLines, err = metrics.RegisterHistogram("batch_size_lines")
|
||||
if err != nil {
|
||||
log.Printf("can't create batchSizeLines metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestTime, err = metrics.RegisterHistogram("sql_request_time")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestTime metric: %s", err)
|
||||
}
|
||||
conn.sqlRequestCounter, err = metrics.RegisterCounter("sql_request_number")
|
||||
if err != nil {
|
||||
log.Printf("can't create sqlRequestNumber metric: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
func (conn *Conn) insertAutocompleteValue(sessionID uint64, projectID uint32, tp string, value string) {
|
||||
if len(value) == 0 {
|
||||
return
|
||||
|
|
|
|||
|
|
@ -6,7 +6,8 @@ import (
|
|||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertIOSCustomEvent(sessionID uint64, e *messages.IOSCustomEvent) error {
|
||||
func (conn *Conn) InsertIOSCustomEvent(e *messages.IOSCustomEvent) error {
|
||||
sessionID := e.SessionID()
|
||||
err := conn.InsertCustomEvent(sessionID, e.Timestamp, truncSqIdx(e.Index), e.Name, e.Payload)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "CUSTOM_IOS", e.Name)
|
||||
|
|
@ -14,7 +15,8 @@ func (conn *Conn) InsertIOSCustomEvent(sessionID uint64, e *messages.IOSCustomEv
|
|||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSUserID(sessionID uint64, userID *messages.IOSUserID) error {
|
||||
func (conn *Conn) InsertIOSUserID(userID *messages.IOSUserID) error {
|
||||
sessionID := userID.SessionID()
|
||||
err := conn.InsertUserID(sessionID, userID.Value)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "USERID_IOS", userID.Value)
|
||||
|
|
@ -22,7 +24,8 @@ func (conn *Conn) InsertIOSUserID(sessionID uint64, userID *messages.IOSUserID)
|
|||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSUserAnonymousID(sessionID uint64, userAnonymousID *messages.IOSUserAnonymousID) error {
|
||||
func (conn *Conn) InsertIOSUserAnonymousID(userAnonymousID *messages.IOSUserAnonymousID) error {
|
||||
sessionID := userAnonymousID.SessionID()
|
||||
err := conn.InsertUserAnonymousID(sessionID, userAnonymousID.Value)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "USERANONYMOUSID_IOS", userAnonymousID.Value)
|
||||
|
|
@ -30,7 +33,8 @@ func (conn *Conn) InsertIOSUserAnonymousID(sessionID uint64, userAnonymousID *me
|
|||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSNetworkCall(sessionID uint64, e *messages.IOSNetworkCall) error {
|
||||
func (conn *Conn) InsertIOSNetworkCall(e *messages.IOSNetworkCall) error {
|
||||
sessionID := e.SessionID()
|
||||
err := conn.InsertRequest(sessionID, e.Timestamp, truncSqIdx(e.Index), e.URL, e.Duration, e.Success)
|
||||
if err == nil {
|
||||
conn.insertAutocompleteValue(sessionID, 0, "REQUEST_IOS", url.DiscardURLQuery(e.URL))
|
||||
|
|
@ -38,7 +42,8 @@ func (conn *Conn) InsertIOSNetworkCall(sessionID uint64, e *messages.IOSNetworkC
|
|||
return err
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSScreenEnter(sessionID uint64, screenEnter *messages.IOSScreenEnter) error {
|
||||
func (conn *Conn) InsertIOSScreenEnter(screenEnter *messages.IOSScreenEnter) error {
|
||||
sessionID := screenEnter.SessionID()
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -69,7 +74,8 @@ func (conn *Conn) InsertIOSScreenEnter(sessionID uint64, screenEnter *messages.I
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSClickEvent(sessionID uint64, clickEvent *messages.IOSClickEvent) error {
|
||||
func (conn *Conn) InsertIOSClickEvent(clickEvent *messages.IOSClickEvent) error {
|
||||
sessionID := clickEvent.SessionID()
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -100,7 +106,8 @@ func (conn *Conn) InsertIOSClickEvent(sessionID uint64, clickEvent *messages.IOS
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSInputEvent(sessionID uint64, inputEvent *messages.IOSInputEvent) error {
|
||||
func (conn *Conn) InsertIOSInputEvent(inputEvent *messages.IOSInputEvent) error {
|
||||
sessionID := inputEvent.SessionID()
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
@ -137,7 +144,8 @@ func (conn *Conn) InsertIOSInputEvent(sessionID uint64, inputEvent *messages.IOS
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertIOSCrash(sessionID uint64, projectID uint32, crash *messages.IOSCrash) error {
|
||||
func (conn *Conn) InsertIOSCrash(projectID uint32, crash *messages.IOSCrash) error {
|
||||
sessionID := crash.SessionID()
|
||||
tx, err := conn.c.Begin()
|
||||
if err != nil {
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ import (
|
|||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
||||
func (conn *Conn) InsertWebStatsPerformance(sessionID uint64, p *PerformanceTrackAggr) error {
|
||||
func (conn *Conn) InsertWebStatsPerformance(p *PerformanceTrackAggr) error {
|
||||
sessionID := p.SessionID()
|
||||
timestamp := (p.TimestampEnd + p.TimestampStart) / 2
|
||||
|
||||
sqlRequest := `
|
||||
|
|
@ -35,40 +36,37 @@ func (conn *Conn) InsertWebStatsPerformance(sessionID uint64, p *PerformanceTrac
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebStatsResourceEvent(sessionID uint64, e *ResourceEvent) error {
|
||||
func (conn *Conn) InsertWebStatsResourceEvent(e *ResourceTiming) error {
|
||||
sessionID := e.SessionID()
|
||||
host, _, _, err := url.GetURLParts(e.URL)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
||||
msgType := url.GetResourceType(e.Initiator, e.URL)
|
||||
sqlRequest := `
|
||||
INSERT INTO events.resources (
|
||||
session_id, timestamp, message_id,
|
||||
type,
|
||||
url, url_host, url_hostpath,
|
||||
success, status,
|
||||
method,
|
||||
duration, ttfb, header_size, encoded_body_size, decoded_body_size
|
||||
) VALUES (
|
||||
$1, $2, $3,
|
||||
$4,
|
||||
LEFT($5, 8000), LEFT($6, 300), LEFT($7, 2000),
|
||||
$8, $9,
|
||||
NULLIF($10, '')::events.resource_method,
|
||||
NULLIF($11, 0), NULLIF($12, 0), NULLIF($13, 0), NULLIF($14, 0), NULLIF($15, 0)
|
||||
NULLIF($10, 0), NULLIF($11, 0), NULLIF($12, 0), NULLIF($13, 0), NULLIF($14, 0)
|
||||
)`
|
||||
urlQuery := url.DiscardURLQuery(e.URL)
|
||||
urlMethod := url.EnsureMethod(e.Method)
|
||||
conn.batchQueue(sessionID, sqlRequest,
|
||||
sessionID, e.Timestamp, truncSqIdx(e.MessageID),
|
||||
e.Type,
|
||||
sessionID, e.Timestamp, truncSqIdx(e.MsgID()),
|
||||
msgType,
|
||||
e.URL, host, urlQuery,
|
||||
e.Success, e.Status,
|
||||
urlMethod,
|
||||
e.Duration != 0, 0,
|
||||
e.Duration, e.TTFB, e.HeaderSize, e.EncodedBodySize, e.DecodedBodySize,
|
||||
)
|
||||
|
||||
// Record approximate message size
|
||||
conn.updateBatchSize(sessionID, len(sqlRequest)+len(e.Type)+len(e.URL)+len(host)+len(urlQuery)+len(urlMethod)+8*9+1)
|
||||
conn.updateBatchSize(sessionID, len(sqlRequest)+len(msgType)+len(e.URL)+len(host)+len(urlQuery)+8*9+1)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -57,10 +57,13 @@ func (conn *Conn) InsertWebPageEvent(sessionID uint64, projectID uint32, e *Page
|
|||
return nil
|
||||
}
|
||||
|
||||
func (conn *Conn) InsertWebClickEvent(sessionID uint64, projectID uint32, e *ClickEvent) error {
|
||||
func (conn *Conn) InsertWebClickEvent(sessionID uint64, projectID uint32, e *MouseClick) error {
|
||||
if e.Label == "" {
|
||||
return nil
|
||||
}
|
||||
var host, path string
|
||||
host, path, _, _ = url.GetURLParts(e.Url)
|
||||
if err := conn.bulks.Get("webClickEvents").Append(sessionID, truncSqIdx(e.MessageID), e.Timestamp, e.Label, e.Selector, host+path, path); err != nil {
|
||||
if err := conn.bulks.Get("webClickEvents").Append(sessionID, truncSqIdx(e.MsgID()), e.Timestamp, e.Label, e.Selector, host+path, path); err != nil {
|
||||
log.Printf("insert web click err: %s", err)
|
||||
}
|
||||
// Accumulate session updates and exec inside batch with another sql commands
|
||||
|
|
|
|||
|
|
@ -3,12 +3,12 @@ package postgres
|
|||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
"github.com/jackc/pgx/v4"
|
||||
"github.com/jackc/pgx/v4/pgxpool"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
)
|
||||
|
||||
// Pool is a pgx.Pool wrapper with metrics integration
|
||||
|
|
@ -22,19 +22,15 @@ type Pool interface {
|
|||
}
|
||||
|
||||
type poolImpl struct {
|
||||
conn *pgxpool.Pool
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
conn *pgxpool.Pool
|
||||
}
|
||||
|
||||
func (p *poolImpl) Query(sql string, args ...interface{}) (pgx.Rows, error) {
|
||||
start := time.Now()
|
||||
res, err := p.conn.Query(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res, err
|
||||
}
|
||||
|
||||
|
|
@ -42,10 +38,8 @@ func (p *poolImpl) QueryRow(sql string, args ...interface{}) pgx.Row {
|
|||
start := time.Now()
|
||||
res := p.conn.QueryRow(getTimeoutContext(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return res
|
||||
}
|
||||
|
||||
|
|
@ -53,45 +47,37 @@ func (p *poolImpl) Exec(sql string, arguments ...interface{}) error {
|
|||
start := time.Now()
|
||||
_, err := p.conn.Exec(getTimeoutContext(), sql, arguments...)
|
||||
method, table := methodName(sql)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (p *poolImpl) SendBatch(b *pgx.Batch) pgx.BatchResults {
|
||||
start := time.Now()
|
||||
res := p.conn.SendBatch(getTimeoutContext(), b)
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "sendBatch"))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "sendBatch"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "sendBatch", "")
|
||||
database.IncreaseTotalRequests("sendBatch", "")
|
||||
return res
|
||||
}
|
||||
|
||||
func (p *poolImpl) Begin() (*_Tx, error) {
|
||||
start := time.Now()
|
||||
tx, err := p.conn.Begin(context.Background())
|
||||
p.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "begin"))
|
||||
p.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "begin"))
|
||||
return &_Tx{tx, p.sqlRequestTime, p.sqlRequestCounter}, err
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "begin", "")
|
||||
database.IncreaseTotalRequests("begin", "")
|
||||
return &_Tx{tx}, err
|
||||
}
|
||||
|
||||
func (p *poolImpl) Close() {
|
||||
p.conn.Close()
|
||||
}
|
||||
|
||||
func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlRequestCounter syncfloat64.Counter) (Pool, error) {
|
||||
func NewPool(conn *pgxpool.Pool) (Pool, error) {
|
||||
if conn == nil {
|
||||
return nil, errors.New("conn is empty")
|
||||
}
|
||||
return &poolImpl{
|
||||
conn: conn,
|
||||
sqlRequestTime: sqlRequestTime,
|
||||
sqlRequestCounter: sqlRequestCounter,
|
||||
conn: conn,
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -99,38 +85,30 @@ func NewPool(conn *pgxpool.Pool, sqlRequestTime syncfloat64.Histogram, sqlReques
|
|||
|
||||
type _Tx struct {
|
||||
pgx.Tx
|
||||
sqlRequestTime syncfloat64.Histogram
|
||||
sqlRequestCounter syncfloat64.Counter
|
||||
}
|
||||
|
||||
func (tx *_Tx) exec(sql string, args ...interface{}) error {
|
||||
start := time.Now()
|
||||
_, err := tx.Exec(context.Background(), sql, args...)
|
||||
method, table := methodName(sql)
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", method), attribute.String("table", table))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), method, table)
|
||||
database.IncreaseTotalRequests(method, table)
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *_Tx) rollback() error {
|
||||
start := time.Now()
|
||||
err := tx.Rollback(context.Background())
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "rollback"))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "rollback"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "rollback", "")
|
||||
database.IncreaseTotalRequests("rollback", "")
|
||||
return err
|
||||
}
|
||||
|
||||
func (tx *_Tx) commit() error {
|
||||
start := time.Now()
|
||||
err := tx.Commit(context.Background())
|
||||
tx.sqlRequestTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()),
|
||||
attribute.String("method", "commit"))
|
||||
tx.sqlRequestCounter.Add(context.Background(), 1,
|
||||
attribute.String("method", "commit"))
|
||||
database.RecordRequestDuration(float64(time.Now().Sub(start).Milliseconds()), "commit", "")
|
||||
database.IncreaseTotalRequests("commit", "")
|
||||
return err
|
||||
}
|
||||
|
||||
|
|
@ -169,7 +147,8 @@ func methodName(sql string) (string, string) {
|
|||
case "update":
|
||||
table = strings.TrimSpace(parts[1])
|
||||
case "insert":
|
||||
table = strings.TrimSpace(parts[2])
|
||||
tableNameParts := strings.Split(strings.TrimSpace(parts[2]), "(")
|
||||
table = tableNameParts[0]
|
||||
}
|
||||
return cmd, table
|
||||
}
|
||||
|
|
|
|||
|
|
@ -120,3 +120,15 @@ func (e *ErrorEvent) ID(projectID uint32) string {
|
|||
}
|
||||
return strconv.FormatUint(uint64(projectID), 16) + hex.EncodeToString(hash.Sum(nil))
|
||||
}
|
||||
|
||||
func WrapCustomEvent(m *CustomEvent) *IssueEvent {
|
||||
msg := &IssueEvent{
|
||||
Type: "custom",
|
||||
Timestamp: m.Time(),
|
||||
MessageID: m.MsgID(),
|
||||
ContextString: m.Name,
|
||||
Payload: m.Payload,
|
||||
}
|
||||
msg.Meta().SetMeta(m.Meta())
|
||||
return msg
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,82 +0,0 @@
|
|||
package custom
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func getURLExtention(URL string) string {
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
i := strings.LastIndex(u.Path, ".")
|
||||
return u.Path[i+1:]
|
||||
}
|
||||
|
||||
func getResourceType(initiator string, URL string) string {
|
||||
switch initiator {
|
||||
case "xmlhttprequest", "fetch":
|
||||
return "fetch"
|
||||
case "img":
|
||||
return "img"
|
||||
default:
|
||||
switch getURLExtention(URL) {
|
||||
case "css":
|
||||
return "stylesheet"
|
||||
case "js":
|
||||
return "script"
|
||||
case "png", "gif", "jpg", "jpeg", "svg":
|
||||
return "img"
|
||||
case "mp4", "mkv", "ogg", "webm", "avi", "mp3":
|
||||
return "media"
|
||||
default:
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type EventMapper struct{}
|
||||
|
||||
func (b *EventMapper) Build() Message {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *EventMapper) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *MouseClick:
|
||||
if msg.Label != "" {
|
||||
return &ClickEvent{
|
||||
MessageID: messageID,
|
||||
Label: msg.Label,
|
||||
HesitationTime: msg.HesitationTime,
|
||||
Timestamp: timestamp,
|
||||
Selector: msg.Selector,
|
||||
}
|
||||
}
|
||||
case *ResourceTiming:
|
||||
return &ResourceEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Duration: msg.Duration,
|
||||
TTFB: msg.TTFB,
|
||||
HeaderSize: msg.HeaderSize,
|
||||
EncodedBodySize: msg.EncodedBodySize,
|
||||
DecodedBodySize: msg.DecodedBodySize,
|
||||
URL: msg.URL,
|
||||
Type: getResourceType(msg.Initiator, msg.URL),
|
||||
Success: msg.Duration != 0,
|
||||
}
|
||||
case *CustomIssue:
|
||||
return &IssueEvent{
|
||||
Type: "custom",
|
||||
Timestamp: timestamp,
|
||||
MessageID: messageID,
|
||||
ContextString: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -4,7 +4,7 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const INPUT_EVENT_TIMEOUT = 1 * 60 * 1000
|
||||
const InputEventTimeout = 1 * 60 * 1000
|
||||
|
||||
type inputLabels map[uint64]string
|
||||
|
||||
|
|
@ -24,7 +24,7 @@ func (b *inputEventBuilder) clearLabels() {
|
|||
b.inputLabels = make(inputLabels)
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (b *inputEventBuilder) Handle(message Message, timestamp uint64) Message {
|
||||
var inputEvent Message = nil
|
||||
switch msg := message.(type) {
|
||||
case *SetInputTarget:
|
||||
|
|
@ -41,7 +41,7 @@ func (b *inputEventBuilder) Handle(message Message, messageID uint64, timestamp
|
|||
}
|
||||
if b.inputEvent == nil {
|
||||
b.inputEvent = &InputEvent{
|
||||
MessageID: messageID,
|
||||
MessageID: message.MsgID(),
|
||||
Timestamp: timestamp,
|
||||
Value: msg.Value,
|
||||
ValueMasked: msg.Mask > 0,
|
||||
|
|
@ -59,7 +59,7 @@ func (b *inputEventBuilder) Handle(message Message, messageID uint64, timestamp
|
|||
return b.Build()
|
||||
}
|
||||
|
||||
if b.inputEvent != nil && b.inputEvent.Timestamp+INPUT_EVENT_TIMEOUT < timestamp {
|
||||
if b.inputEvent != nil && b.inputEvent.Timestamp+InputEventTimeout < timestamp {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const PAGE_EVENT_TIMEOUT = 1 * 60 * 1000
|
||||
const PageEventTimeout = 1 * 60 * 1000
|
||||
|
||||
type pageEventBuilder struct {
|
||||
pageEvent *PageEvent
|
||||
|
|
@ -16,7 +16,7 @@ func NewPageEventBuilder() *pageEventBuilder {
|
|||
return ieBuilder
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (b *pageEventBuilder) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *SetPageLocation:
|
||||
if msg.NavigationStart == 0 { // routing without new page loading
|
||||
|
|
@ -24,7 +24,7 @@ func (b *pageEventBuilder) Handle(message Message, messageID uint64, timestamp u
|
|||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: false,
|
||||
MessageID: messageID,
|
||||
MessageID: message.MsgID(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
} else {
|
||||
|
|
@ -33,7 +33,7 @@ func (b *pageEventBuilder) Handle(message Message, messageID uint64, timestamp u
|
|||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: true,
|
||||
MessageID: messageID,
|
||||
MessageID: message.MsgID(),
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
return pageEvent
|
||||
|
|
@ -81,7 +81,7 @@ func (b *pageEventBuilder) Handle(message Message, messageID uint64, timestamp u
|
|||
|
||||
}
|
||||
|
||||
if b.pageEvent != nil && b.pageEvent.Timestamp+PAGE_EVENT_TIMEOUT < timestamp {
|
||||
if b.pageEvent != nil && b.pageEvent.Timestamp+PageEventTimeout < timestamp {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -48,7 +48,7 @@ func (h *ClickRageDetector) Handle(message Message, messageID uint64, timestamp
|
|||
}
|
||||
|
||||
func (h *ClickRageDetector) Build() Message {
|
||||
if h.countsInARow >= web.MIN_CLICKS_IN_A_ROW {
|
||||
if h.countsInARow >= web.MinClicksInARow {
|
||||
event := &IOSIssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: h.lastLabel,
|
||||
|
|
|
|||
|
|
@ -6,6 +6,6 @@ import . "openreplay/backend/pkg/messages"
|
|||
// U can create your own message handler and easily connect to heuristics service
|
||||
|
||||
type MessageProcessor interface {
|
||||
Handle(message Message, messageID uint64, timestamp uint64) Message
|
||||
Handle(message Message, timestamp uint64) Message
|
||||
Build() Message
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,14 +7,8 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: ClickRage
|
||||
Input event: MouseClick
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const MAX_TIME_DIFF = 300
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
const MaxTimeDiff = 300
|
||||
const MinClicksInARow = 3
|
||||
|
||||
type ClickRageDetector struct {
|
||||
lastTimestamp uint64
|
||||
|
|
@ -34,46 +28,54 @@ func (crd *ClickRageDetector) reset() {
|
|||
crd.url = ""
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) Build() Message {
|
||||
defer crd.reset()
|
||||
if crd.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
payload, err := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal ClickRage payload to json: %s", err)
|
||||
}
|
||||
event := &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: string(payload),
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
URL: crd.url,
|
||||
}
|
||||
return event
|
||||
func (crd *ClickRageDetector) createPayload() string {
|
||||
p, err := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal ClickRage payload to json: %s", err)
|
||||
return ""
|
||||
}
|
||||
return nil
|
||||
return string(p)
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (crd *ClickRageDetector) Build() Message {
|
||||
defer crd.reset()
|
||||
if crd.countsInARow < MinClicksInARow {
|
||||
return nil
|
||||
}
|
||||
return &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: crd.createPayload(),
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
URL: crd.url,
|
||||
}
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *MouseClick:
|
||||
// Set click url
|
||||
if crd.url == "" && msg.Url != "" {
|
||||
crd.url = msg.Url
|
||||
}
|
||||
// TODO: check if we it is ok to capture clickRage event without the connected ClickEvent in db.
|
||||
// Click on different object -> build if we can and reset the builder
|
||||
if msg.Label == "" {
|
||||
return crd.Build()
|
||||
}
|
||||
if crd.lastLabel == msg.Label && timestamp-crd.lastTimestamp < MAX_TIME_DIFF {
|
||||
// Update builder with last information
|
||||
if crd.lastLabel == msg.Label && timestamp-crd.lastTimestamp < MaxTimeDiff {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.countsInARow += 1
|
||||
return nil
|
||||
}
|
||||
// Try to build event
|
||||
event := crd.Build()
|
||||
// Use current message as init values for new event
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.lastLabel = msg.Label
|
||||
crd.firstInARawTimestamp = timestamp
|
||||
crd.firstInARawMessageId = messageID
|
||||
crd.firstInARawMessageId = message.MsgID()
|
||||
crd.countsInARow = 1
|
||||
if crd.url == "" && msg.Url != "" {
|
||||
crd.url = msg.Url
|
||||
|
|
|
|||
|
|
@ -15,8 +15,8 @@ import (
|
|||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const CPU_THRESHOLD = 70 // % out of 100
|
||||
const CPU_MIN_DURATION_TRIGGER = 6 * 1000
|
||||
const CpuThreshold = 70 // % out of 100
|
||||
const CpuMinDurationTrigger = 6 * 1000
|
||||
|
||||
type CpuIssueDetector struct {
|
||||
startTimestamp uint64
|
||||
|
|
@ -26,65 +26,61 @@ type CpuIssueDetector struct {
|
|||
contextString string
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) Build() Message {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
duration := f.lastTimestamp - f.startTimestamp
|
||||
timestamp := f.startTimestamp
|
||||
messageID := f.startMessageID
|
||||
maxRate := f.maxRate
|
||||
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.maxRate = 0
|
||||
if duration < CPU_MIN_DURATION_TRIGGER {
|
||||
return nil
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(struct {
|
||||
func (f *CpuIssueDetector) createPayload() string {
|
||||
p, err := json.Marshal(struct {
|
||||
Duration uint64
|
||||
Rate uint64
|
||||
}{duration, maxRate})
|
||||
}{f.duration(), f.maxRate})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal CpuIssue payload to json: %s", err)
|
||||
}
|
||||
return string(p)
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) duration() uint64 {
|
||||
return f.lastTimestamp - f.startTimestamp
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) reset() {
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.maxRate = 0
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) Build() Message {
|
||||
defer f.reset()
|
||||
if f.startTimestamp == 0 || f.duration() < CpuMinDurationTrigger {
|
||||
return nil
|
||||
}
|
||||
return &IssueEvent{
|
||||
Type: "cpu",
|
||||
Timestamp: timestamp,
|
||||
MessageID: messageID,
|
||||
Timestamp: f.startTimestamp,
|
||||
MessageID: f.startMessageID,
|
||||
ContextString: f.contextString,
|
||||
Payload: string(payload),
|
||||
Payload: f.createPayload(),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (f *CpuIssueDetector) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
dt := performance.TimeDiff(timestamp, f.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
// Ignore if it's a wrong message order
|
||||
if timestamp < f.lastTimestamp {
|
||||
return nil
|
||||
}
|
||||
|
||||
f.lastTimestamp = timestamp
|
||||
|
||||
if msg.Frames == -1 || msg.Ticks == -1 {
|
||||
cpuRate := performance.CPURate(msg.Ticks, performance.TimeDiff(timestamp, f.lastTimestamp))
|
||||
// Build event if cpu issue have gone
|
||||
if msg.Frames == -1 || msg.Ticks == -1 || cpuRate < CpuThreshold {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
cpuRate := performance.CPURate(msg.Ticks, dt)
|
||||
|
||||
if cpuRate >= CPU_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
// Update values
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = message.MsgID()
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
case *SetPageLocation:
|
||||
f.contextString = msg.URL
|
||||
|
|
|
|||
|
|
@ -4,43 +4,39 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: DeadClick
|
||||
Input events: SetInputTarget,
|
||||
CreateDocument,
|
||||
MouseClick,
|
||||
SetNodeAttribute,
|
||||
RemoveNodeAttribute,
|
||||
CreateElementNode,
|
||||
CreateTextNode,
|
||||
MoveNode,
|
||||
RemoveNode,
|
||||
SetCSSData,
|
||||
CSSInsertRule,
|
||||
CSSDeleteRule
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const CLICK_RELATION_TIME = 1234
|
||||
const ClickRelationTime = 1234
|
||||
|
||||
type DeadClickDetector struct {
|
||||
lastTimestamp uint64
|
||||
lastMouseClick *MouseClick
|
||||
lastTimestamp uint64
|
||||
lastClickTimestamp uint64
|
||||
lastMessageID uint64
|
||||
inputIDSet map[uint64]bool
|
||||
}
|
||||
|
||||
func NewDeadClickDetector() *DeadClickDetector {
|
||||
return &DeadClickDetector{inputIDSet: make(map[uint64]bool)}
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) addInputID(id uint64) {
|
||||
d.inputIDSet[id] = true
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) clearInputIDs() {
|
||||
d.inputIDSet = make(map[uint64]bool)
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) reset() {
|
||||
d.inputIDSet = nil
|
||||
d.lastMouseClick = nil
|
||||
d.lastClickTimestamp = 0
|
||||
d.lastMessageID = 0
|
||||
d.clearInputIDs()
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) build(timestamp uint64) Message {
|
||||
func (d *DeadClickDetector) Build() Message {
|
||||
// remove reset from external Build call
|
||||
defer d.reset()
|
||||
if d.lastMouseClick == nil || d.lastClickTimestamp+CLICK_RELATION_TIME > timestamp { // reaction is instant
|
||||
if d.lastMouseClick == nil || d.lastClickTimestamp+ClickRelationTime > d.lastTimestamp { // reaction is instant
|
||||
return nil
|
||||
}
|
||||
event := &IssueEvent{
|
||||
|
|
@ -52,42 +48,37 @@ func (d *DeadClickDetector) build(timestamp uint64) Message {
|
|||
return event
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) Build() Message {
|
||||
return d.build(d.lastTimestamp)
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (d *DeadClickDetector) Handle(message Message, timestamp uint64) Message {
|
||||
d.lastTimestamp = timestamp
|
||||
switch msg := message.(type) {
|
||||
case *SetInputTarget:
|
||||
if d.inputIDSet == nil {
|
||||
d.inputIDSet = make(map[uint64]bool)
|
||||
}
|
||||
d.inputIDSet[msg.ID] = true
|
||||
d.addInputID(msg.ID)
|
||||
case *CreateDocument:
|
||||
d.inputIDSet = nil
|
||||
d.clearInputIDs()
|
||||
case *MouseClick:
|
||||
if msg.Label == "" {
|
||||
return nil
|
||||
}
|
||||
event := d.build(timestamp)
|
||||
if d.inputIDSet[msg.ID] { // ignore if input
|
||||
isInputEvent := d.inputIDSet[msg.ID]
|
||||
event := d.Build()
|
||||
if isInputEvent {
|
||||
return event
|
||||
}
|
||||
d.lastMouseClick = msg
|
||||
d.lastClickTimestamp = timestamp
|
||||
d.lastMessageID = messageID
|
||||
d.lastMessageID = message.MsgID()
|
||||
return event
|
||||
case *SetNodeAttribute,
|
||||
*RemoveNodeAttribute,
|
||||
*CreateElementNode,
|
||||
*CreateTextNode,
|
||||
*SetNodeFocus,
|
||||
*MoveNode,
|
||||
*RemoveNode,
|
||||
*SetCSSData,
|
||||
*CSSInsertRule,
|
||||
*CSSDeleteRule:
|
||||
return d.build(timestamp)
|
||||
return d.Build()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,55 +0,0 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: DomDrop
|
||||
Input events: CreateElementNode,
|
||||
CreateTextNode,
|
||||
RemoveNode
|
||||
Output event: DOMDrop
|
||||
*/
|
||||
|
||||
const DROP_WINDOW = 200 //ms
|
||||
const CRITICAL_COUNT = 1 // Our login page contains 20. But on crush it removes only roots (1-3 nodes).
|
||||
// TODO: smart detection (making whole DOM tree would eat all memory)
|
||||
|
||||
type domDropDetector struct {
|
||||
removedCount int
|
||||
lastDropTimestamp uint64
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) reset() {
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
switch message.(type) {
|
||||
case *CreateElementNode,
|
||||
*CreateTextNode:
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
case *RemoveNode:
|
||||
if dd.lastDropTimestamp+DROP_WINDOW > timestamp {
|
||||
dd.removedCount += 1
|
||||
} else {
|
||||
dd.removedCount = 1
|
||||
}
|
||||
dd.lastDropTimestamp = timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) Build() Message {
|
||||
defer dd.reset()
|
||||
if dd.removedCount >= CRITICAL_COUNT {
|
||||
domDrop := &DOMDrop{
|
||||
Timestamp: dd.lastDropTimestamp,
|
||||
}
|
||||
return domDrop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -8,13 +8,6 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: MemoryIssue
|
||||
Input events: PerformanceTrack,
|
||||
SetPageLocation
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const MIN_COUNT = 3
|
||||
const MEM_RATE_THRESHOLD = 300 // % to average
|
||||
|
||||
|
|
@ -52,7 +45,7 @@ func (f *MemoryIssueDetector) Build() Message {
|
|||
return event
|
||||
}
|
||||
|
||||
func (f *MemoryIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (f *MemoryIssueDetector) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if f.count < MIN_COUNT {
|
||||
|
|
@ -70,7 +63,7 @@ func (f *MemoryIssueDetector) Handle(message Message, messageID uint64, timestam
|
|||
if rate >= MEM_RATE_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
f.startMessageID = message.MsgID()
|
||||
}
|
||||
if f.rate < rate {
|
||||
f.rate = rate
|
||||
|
|
|
|||
|
|
@ -4,26 +4,19 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: NetworkIssue
|
||||
Input events: ResourceTiming,
|
||||
NetworkRequest
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
type NetworkIssueDetector struct{}
|
||||
|
||||
func (f *NetworkIssueDetector) Build() Message {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *NetworkIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
func (f *NetworkIssueDetector) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *NetworkRequest:
|
||||
if msg.Status >= 400 {
|
||||
return &IssueEvent{
|
||||
Type: "bad_request",
|
||||
MessageID: messageID,
|
||||
MessageID: message.MsgID(),
|
||||
Timestamp: msg.Timestamp,
|
||||
ContextString: msg.URL,
|
||||
}
|
||||
|
|
|
|||
|
|
@ -7,13 +7,7 @@ import (
|
|||
"openreplay/backend/pkg/messages/performance"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: PerformanceAggregator
|
||||
Input event: PerformanceTrack
|
||||
Output event: PerformanceTrackAggr
|
||||
*/
|
||||
|
||||
const AGGREGATION_WINDOW = 2 * 60 * 1000
|
||||
const AggregationWindow = 2 * 60 * 1000
|
||||
|
||||
type PerformanceAggregator struct {
|
||||
*PerformanceTrackAggr
|
||||
|
|
@ -42,7 +36,7 @@ func (b *PerformanceAggregator) reset() {
|
|||
b.lastTimestamp = 0
|
||||
}
|
||||
|
||||
func (b *PerformanceAggregator) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
func (b *PerformanceAggregator) Handle(message Message, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if b.PerformanceTrackAggr == nil || msg.Frames == -1 || msg.Ticks == -1 {
|
||||
|
|
@ -93,7 +87,7 @@ func (b *PerformanceAggregator) Handle(message Message, _ uint64, timestamp uint
|
|||
b.lastTimestamp = timestamp
|
||||
}
|
||||
if b.PerformanceTrackAggr != nil &&
|
||||
timestamp-b.PerformanceTrackAggr.TimestampStart >= AGGREGATION_WINDOW {
|
||||
timestamp-b.PerformanceTrackAggr.TimestampStart >= AggregationWindow {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
|
|
|
|||
|
|
@ -12,3 +12,4 @@ func IsIOSType(id int) bool {
|
|||
func IsDOMType(id int) bool {
|
||||
return 0 == id || 4 == id || 5 == id || 6 == id || 7 == id || 8 == id || 9 == id || 10 == id || 11 == id || 12 == id || 13 == id || 14 == id || 15 == id || 16 == id || 18 == id || 19 == id || 20 == id || 37 == id || 38 == id || 49 == id || 50 == id || 51 == id || 54 == id || 55 == id || 57 == id || 58 == id || 59 == id || 60 == id || 61 == id || 67 == id || 69 == id || 70 == id || 71 == id || 72 == id || 73 == id || 74 == id || 75 == id || 76 == id || 77 == id || 113 == id || 114 == id || 90 == id || 93 == id || 96 == id || 100 == id || 102 == id || 103 == id || 105 == id
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -126,7 +126,7 @@ func (i *enderMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("incorrect batch version: %d, skip current batch, info: %s", i.version, i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMetadata")
|
||||
}
|
||||
|
|
@ -139,7 +139,7 @@ func (i *enderMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMeta")
|
||||
}
|
||||
|
|
@ -149,13 +149,13 @@ func (i *enderMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *Timestamp:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("Timestamp")
|
||||
}
|
||||
|
||||
case *SessionStart:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionStart")
|
||||
log.Printf("zero session start, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
|
|
@ -163,7 +163,7 @@ func (i *enderMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *SessionEnd:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionEnd")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package messages
|
|||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"openreplay/backend/pkg/metrics/sink"
|
||||
)
|
||||
|
||||
type sinkMessageIteratorImpl struct {
|
||||
|
|
@ -53,6 +54,8 @@ func (i *sinkMessageIteratorImpl) sendBatchEnd() {
|
|||
}
|
||||
|
||||
func (i *sinkMessageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
||||
sink.RecordBatchSize(float64(len(batchData)))
|
||||
sink.IncreaseTotalBatches()
|
||||
// Create new message reader
|
||||
reader := NewMessageReader(batchData)
|
||||
|
||||
|
|
@ -125,7 +128,7 @@ func (i *sinkMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("incorrect batch version: %d, skip current batch, info: %s", i.version, i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMetadata")
|
||||
}
|
||||
|
|
@ -138,7 +141,7 @@ func (i *sinkMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMeta")
|
||||
}
|
||||
|
|
@ -148,13 +151,13 @@ func (i *sinkMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *Timestamp:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("Timestamp")
|
||||
}
|
||||
|
||||
case *SessionStart:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionStart")
|
||||
log.Printf("zero session start, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
|
|
@ -162,7 +165,7 @@ func (i *sinkMessageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *SessionEnd:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionEnd")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -74,12 +74,13 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
i.messageInfo.Index++
|
||||
|
||||
msg := reader.Message()
|
||||
msgType := msg.TypeID()
|
||||
|
||||
// Preprocess "system" messages
|
||||
if _, ok := i.preFilter[msg.TypeID()]; ok {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
msg = transformDeprecated(msg)
|
||||
|
|
@ -99,7 +100,7 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
if i.autoDecode {
|
||||
msg = msg.Decode()
|
||||
if msg == nil {
|
||||
log.Printf("decode error, type: %d, info: %s", msg.TypeID(), i.batchInfo.Info())
|
||||
log.Printf("decode error, type: %d, info: %s", msgType, i.batchInfo.Info())
|
||||
return
|
||||
}
|
||||
}
|
||||
|
|
@ -107,11 +108,20 @@ func (i *messageIteratorImpl) Iterate(batchData []byte, batchInfo *BatchInfo) {
|
|||
// Set meta information for message
|
||||
msg.Meta().SetMeta(i.messageInfo)
|
||||
|
||||
// Update timestamp value for iOS message types
|
||||
if IsIOSType(msgType) {
|
||||
msg.Meta().Timestamp = i.getIOSTimestamp(msg)
|
||||
}
|
||||
|
||||
// Process message
|
||||
i.handler(msg)
|
||||
}
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) getIOSTimestamp(msg Message) uint64 {
|
||||
return GetTimestamp(msg)
|
||||
}
|
||||
|
||||
func (i *messageIteratorImpl) zeroTsLog(msgType string) {
|
||||
log.Printf("zero timestamp in %s, info: %s", msgType, i.batchInfo.Info())
|
||||
}
|
||||
|
|
@ -126,7 +136,7 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("incorrect batch version: %d, skip current batch, info: %s", i.version, i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMetadata")
|
||||
}
|
||||
|
|
@ -139,7 +149,7 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
return fmt.Errorf("batchMeta found at the end of the batch, info: %s", i.batchInfo.Info())
|
||||
}
|
||||
i.messageInfo.Index = m.PageNo<<32 + m.FirstIndex // 2^32 is the maximum count of messages per page (ha-ha)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
i.messageInfo.Timestamp = uint64(m.Timestamp)
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("BatchMeta")
|
||||
}
|
||||
|
|
@ -149,13 +159,13 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *Timestamp:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("Timestamp")
|
||||
}
|
||||
|
||||
case *SessionStart:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionStart")
|
||||
log.Printf("zero session start, project: %d, UA: %s, tracker: %s, info: %s",
|
||||
|
|
@ -163,7 +173,7 @@ func (i *messageIteratorImpl) preprocessing(msg Message) error {
|
|||
}
|
||||
|
||||
case *SessionEnd:
|
||||
i.messageInfo.Timestamp = int64(m.Timestamp)
|
||||
i.messageInfo.Timestamp = m.Timestamp
|
||||
if m.Timestamp == 0 {
|
||||
i.zeroTsLog("SessionEnd")
|
||||
}
|
||||
|
|
|
|||
|
|
@ -8,6 +8,8 @@ type Message interface {
|
|||
TypeID() int
|
||||
Meta() *message
|
||||
SessionID() uint64
|
||||
MsgID() uint64
|
||||
Time() uint64
|
||||
}
|
||||
|
||||
// BatchInfo represents common information for all messages inside data batch
|
||||
|
|
@ -47,7 +49,7 @@ func (b *BatchInfo) Info() string {
|
|||
}
|
||||
|
||||
type message struct {
|
||||
Timestamp int64
|
||||
Timestamp uint64
|
||||
Index uint64
|
||||
Url string
|
||||
batch *BatchInfo
|
||||
|
|
@ -72,6 +74,14 @@ func (m *message) SessionID() uint64 {
|
|||
return m.batch.sessionID
|
||||
}
|
||||
|
||||
func (m *message) MsgID() uint64 {
|
||||
return m.Meta().Index
|
||||
}
|
||||
|
||||
func (m *message) Time() uint64 {
|
||||
return m.Meta().Timestamp
|
||||
}
|
||||
|
||||
func (m *message) SetSessionID(sessID uint64) {
|
||||
if m.batch == nil {
|
||||
m.batch = &BatchInfo{}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -42,3 +42,17 @@ func (m *RawMessage) SessionID() uint64 {
|
|||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RawMessage) MsgID() uint64 {
|
||||
if m.meta != nil {
|
||||
return m.meta.Index
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
||||
func (m *RawMessage) Time() uint64 {
|
||||
if m.meta != nil {
|
||||
return m.meta.Timestamp
|
||||
}
|
||||
return 0
|
||||
}
|
||||
|
|
|
|||
File diff suppressed because it is too large
Load diff
|
|
@ -40,13 +40,6 @@ func SplitMessages(data []byte) ([]*msgInfo, error) {
|
|||
return nil, fmt.Errorf("read message type err: %s", err)
|
||||
}
|
||||
|
||||
if msgType == MsgRedux {
|
||||
log.Printf("redux")
|
||||
}
|
||||
if msgType == MsgFetch {
|
||||
log.Printf("fetch")
|
||||
}
|
||||
|
||||
// Read message body
|
||||
_, err = ReadMessage(msgType, reader)
|
||||
if err != nil {
|
||||
|
|
|
|||
72
backend/pkg/metrics/assets/metrics.go
Normal file
72
backend/pkg/metrics/assets/metrics.go
Normal file
|
|
@ -0,0 +1,72 @@
|
|||
package assets
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var assetsProcessedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "processed_total",
|
||||
Help: "A counter displaying the total count of processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseProcessesSessions() {
|
||||
assetsProcessedSessions.Inc()
|
||||
}
|
||||
|
||||
var assetsSavedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "assets",
|
||||
Name: "saved_total",
|
||||
Help: "A counter displaying the total number of cached assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseSavedSessions() {
|
||||
assetsSavedSessions.Inc()
|
||||
}
|
||||
|
||||
var assetsDownloadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "download_duration_seconds",
|
||||
Help: "A histogram displaying the duration of downloading for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"response_code"},
|
||||
)
|
||||
|
||||
func RecordDownloadDuration(durMillis float64, code int) {
|
||||
assetsDownloadDuration.WithLabelValues(strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var assetsUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "assets",
|
||||
Name: "upload_s3_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"failed"},
|
||||
)
|
||||
|
||||
func RecordUploadDuration(durMillis float64, isFailed bool) {
|
||||
failed := "false"
|
||||
if isFailed {
|
||||
failed = "true"
|
||||
}
|
||||
assetsUploadDuration.WithLabelValues(failed).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
assetsProcessedSessions,
|
||||
assetsSavedSessions,
|
||||
assetsDownloadDuration,
|
||||
assetsUploadDuration,
|
||||
}
|
||||
}
|
||||
11
backend/pkg/metrics/common/metrics.go
Normal file
11
backend/pkg/metrics/common/metrics.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package common
|
||||
|
||||
// DefaultDurationBuckets is a set of buckets from 5 milliseconds to 1000 seconds (16.6667 minutes)
|
||||
var DefaultDurationBuckets = []float64{.005, .01, .025, .05, .1, .25, .5, 1, 2.5, 5, 10, 25, 50, 100, 250, 500, 1000}
|
||||
|
||||
// DefaultSizeBuckets is a set of buckets from 1 byte to 1_000_000_000 bytes (~1 Gb)
|
||||
var DefaultSizeBuckets = []float64{1, 10, 50, 100, 250, 500, 1000, 2500, 5000, 10000, 25000, 50000, 100_000, 250_000,
|
||||
500_000, 1_000_000, 10_000_000, 100_000_000, 1_000_000_000}
|
||||
|
||||
// DefaultBuckets is a set of buckets from 1 to 1_000_000 elements
|
||||
var DefaultBuckets = []float64{1, 2, 5, 10, 20, 50, 100, 200, 500, 1000, 2000, 5000, 10_000, 50_000, 100_000, 1_000_000}
|
||||
127
backend/pkg/metrics/database/metrics.go
Normal file
127
backend/pkg/metrics/database/metrics.go
Normal file
|
|
@ -0,0 +1,127 @@
|
|||
package database
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var dbBatchSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the batch size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchSize(size float64) {
|
||||
dbBatchSize.Observe(size)
|
||||
}
|
||||
|
||||
var dbBatchElements = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_size_elements",
|
||||
Help: "A histogram displaying the number of SQL commands in each batch.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchElements(number float64) {
|
||||
dbBatchElements.Observe(number)
|
||||
}
|
||||
|
||||
var dbBatchInsertDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "batch_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of batch inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchInsertDuration(durMillis float64) {
|
||||
dbBatchInsertDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbBulkSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_bytes",
|
||||
Help: "A histogram displaying the bulk size in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkSize(size float64, db, table string) {
|
||||
dbBulkSize.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkElements = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_size_elements",
|
||||
Help: "A histogram displaying the size of data set in each bulk.",
|
||||
Buckets: common.DefaultBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkElements(size float64, db, table string) {
|
||||
dbBulkElements.WithLabelValues(db, table).Observe(size)
|
||||
}
|
||||
|
||||
var dbBulkInsertDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "bulk_insert_duration_seconds",
|
||||
Help: "A histogram displaying the duration of bulk inserts in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"db", "table"},
|
||||
)
|
||||
|
||||
func RecordBulkInsertDuration(durMillis float64, db, table string) {
|
||||
dbBulkInsertDuration.WithLabelValues(db, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "db",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each sql request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, method, table string) {
|
||||
dbRequestDuration.WithLabelValues(method, table).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var dbTotalRequests = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "db",
|
||||
Name: "requests_total",
|
||||
Help: "A counter showing the total number of all SQL requests.",
|
||||
},
|
||||
[]string{"method", "table"},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests(method, table string) {
|
||||
dbTotalRequests.WithLabelValues(method, table).Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
dbBatchSize,
|
||||
dbBatchElements,
|
||||
dbBatchInsertDuration,
|
||||
dbBulkSize,
|
||||
dbBulkElements,
|
||||
dbBulkInsertDuration,
|
||||
dbRequestDuration,
|
||||
dbTotalRequests,
|
||||
}
|
||||
}
|
||||
51
backend/pkg/metrics/ender/metrics.go
Normal file
51
backend/pkg/metrics/ender/metrics.go
Normal file
|
|
@ -0,0 +1,51 @@
|
|||
package ender
|
||||
|
||||
import "github.com/prometheus/client_golang/prometheus"
|
||||
|
||||
var enderActiveSessions = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_active",
|
||||
Help: "A gauge displaying the number of active (live) sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseActiveSessions() {
|
||||
enderActiveSessions.Inc()
|
||||
}
|
||||
|
||||
func DecreaseActiveSessions() {
|
||||
enderActiveSessions.Dec()
|
||||
}
|
||||
|
||||
var enderClosedSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_closed",
|
||||
Help: "A counter displaying the number of closed sessions (sent SessionEnd).",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseClosedSessions() {
|
||||
enderClosedSessions.Inc()
|
||||
}
|
||||
|
||||
var enderTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "ender",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalSessions() {
|
||||
enderTotalSessions.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
enderActiveSessions,
|
||||
enderClosedSessions,
|
||||
enderTotalSessions,
|
||||
}
|
||||
}
|
||||
55
backend/pkg/metrics/http/metrics.go
Normal file
55
backend/pkg/metrics/http/metrics.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package http
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
"strconv"
|
||||
)
|
||||
|
||||
var httpRequestSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "http",
|
||||
Name: "request_size_bytes",
|
||||
Help: "A histogram displaying the size of each HTTP request in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
|
||||
func RecordRequestSize(size float64, url string, code int) {
|
||||
httpRequestSize.WithLabelValues(url, strconv.Itoa(code)).Observe(size)
|
||||
}
|
||||
|
||||
var httpRequestDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "http",
|
||||
Name: "request_duration_seconds",
|
||||
Help: "A histogram displaying the duration of each HTTP request in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"url", "response_code"},
|
||||
)
|
||||
|
||||
func RecordRequestDuration(durMillis float64, url string, code int) {
|
||||
httpRequestDuration.WithLabelValues(url, strconv.Itoa(code)).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var httpTotalRequests = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "http",
|
||||
Name: "requests_total",
|
||||
Help: "A counter displaying the number all HTTP requests.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalRequests() {
|
||||
httpTotalRequests.Inc()
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
httpRequestSize,
|
||||
httpRequestDuration,
|
||||
httpTotalRequests,
|
||||
}
|
||||
}
|
||||
40
backend/pkg/metrics/server.go
Normal file
40
backend/pkg/metrics/server.go
Normal file
|
|
@ -0,0 +1,40 @@
|
|||
package metrics
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"github.com/prometheus/client_golang/prometheus/collectors"
|
||||
"github.com/prometheus/client_golang/prometheus/promhttp"
|
||||
"log"
|
||||
"net/http"
|
||||
)
|
||||
|
||||
type MetricServer struct {
|
||||
registry *prometheus.Registry
|
||||
}
|
||||
|
||||
func New() *MetricServer {
|
||||
registry := prometheus.NewRegistry()
|
||||
// Add go runtime metrics and process collectors.
|
||||
registry.MustRegister(
|
||||
collectors.NewGoCollector(),
|
||||
collectors.NewProcessCollector(collectors.ProcessCollectorOpts{}),
|
||||
)
|
||||
// Expose /metrics HTTP endpoint using the created custom registry.
|
||||
http.Handle(
|
||||
"/metrics", promhttp.HandlerFor(
|
||||
registry,
|
||||
promhttp.HandlerOpts{
|
||||
EnableOpenMetrics: true,
|
||||
}),
|
||||
)
|
||||
go func() {
|
||||
log.Println(http.ListenAndServe(":8888", nil))
|
||||
}()
|
||||
return &MetricServer{
|
||||
registry: registry,
|
||||
}
|
||||
}
|
||||
|
||||
func (s *MetricServer) Register(cs []prometheus.Collector) {
|
||||
s.registry.MustRegister(cs...)
|
||||
}
|
||||
185
backend/pkg/metrics/sink/metrics.go
Normal file
185
backend/pkg/metrics/sink/metrics.go
Normal file
|
|
@ -0,0 +1,185 @@
|
|||
package sink
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var sinkMessageSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "message_size_bytes",
|
||||
Help: "A histogram displaying the size of each message in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordMessageSize(size float64) {
|
||||
sinkMessageSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkWrittenMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_written",
|
||||
Help: "A counter displaying the total number of all written messages.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseWrittenMessages() {
|
||||
sinkWrittenMessages.Inc()
|
||||
}
|
||||
|
||||
var sinkTotalMessages = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "messages_total",
|
||||
Help: "A counter displaying the total number of all processed messages.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalMessages() {
|
||||
sinkTotalMessages.Inc()
|
||||
}
|
||||
|
||||
var sinkBatchSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batch_size_bytes",
|
||||
Help: "A histogram displaying the size of each batch in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordBatchSize(size float64) {
|
||||
sinkBatchSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkTotalBatches = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "batches_total",
|
||||
Help: "A counter displaying the total number of all written batches.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalBatches() {
|
||||
sinkTotalBatches.Inc()
|
||||
}
|
||||
|
||||
var sinkWrittenBytes = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes",
|
||||
Help: "A histogram displaying the size of buffer in bytes written to session file.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkWrittenBytes.WithLabelValues(fileType).Observe(size)
|
||||
IncreaseTotalWrittenBytes(size, fileType)
|
||||
}
|
||||
|
||||
var sinkTotalWrittenBytes = prometheus.NewCounterVec(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "written_bytes_total",
|
||||
Help: "A counter displaying the total number of bytes written to all session files.",
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func IncreaseTotalWrittenBytes(size float64, fileType string) {
|
||||
if size == 0 {
|
||||
return
|
||||
}
|
||||
sinkTotalWrittenBytes.WithLabelValues(fileType).Add(size)
|
||||
}
|
||||
|
||||
var sinkCachedAssets = prometheus.NewGauge(
|
||||
prometheus.GaugeOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_cached",
|
||||
Help: "A gauge displaying the current number of cached assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseCachedAssets() {
|
||||
sinkCachedAssets.Inc()
|
||||
}
|
||||
|
||||
func DecreaseCachedAssets() {
|
||||
sinkCachedAssets.Dec()
|
||||
}
|
||||
|
||||
var sinkSkippedAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_skipped",
|
||||
Help: "A counter displaying the total number of all skipped assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseSkippedAssets() {
|
||||
sinkSkippedAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkTotalAssets = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "sink",
|
||||
Name: "assets_total",
|
||||
Help: "A counter displaying the total number of all processed assets.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseTotalAssets() {
|
||||
sinkTotalAssets.Inc()
|
||||
}
|
||||
|
||||
var sinkAssetSize = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_size_bytes",
|
||||
Help: "A histogram displaying the size of each asset in bytes.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordAssetSize(size float64) {
|
||||
sinkAssetSize.Observe(size)
|
||||
}
|
||||
|
||||
var sinkProcessAssetDuration = prometheus.NewHistogram(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "sink",
|
||||
Name: "asset_process_duration_seconds",
|
||||
Help: "A histogram displaying the duration of processing for each asset in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
)
|
||||
|
||||
func RecordProcessAssetDuration(durMillis float64) {
|
||||
sinkProcessAssetDuration.Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
sinkMessageSize,
|
||||
sinkWrittenMessages,
|
||||
sinkTotalMessages,
|
||||
sinkBatchSize,
|
||||
sinkTotalBatches,
|
||||
sinkWrittenBytes,
|
||||
sinkTotalWrittenBytes,
|
||||
sinkCachedAssets,
|
||||
sinkSkippedAssets,
|
||||
sinkTotalAssets,
|
||||
sinkAssetSize,
|
||||
sinkProcessAssetDuration,
|
||||
}
|
||||
}
|
||||
114
backend/pkg/metrics/storage/metrics.go
Normal file
114
backend/pkg/metrics/storage/metrics.go
Normal file
|
|
@ -0,0 +1,114 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"github.com/prometheus/client_golang/prometheus"
|
||||
"openreplay/backend/pkg/metrics/common"
|
||||
)
|
||||
|
||||
var storageSessionSize = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "session_size_bytes",
|
||||
Help: "A histogram displaying the size of each session file in bytes prior to any manipulation.",
|
||||
Buckets: common.DefaultSizeBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSize(fileSize float64, fileType string) {
|
||||
storageSessionSize.WithLabelValues(fileType).Observe(fileSize)
|
||||
}
|
||||
|
||||
var storageTotalSessions = prometheus.NewCounter(
|
||||
prometheus.CounterOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sessions_total",
|
||||
Help: "A counter displaying the total number of all processed sessions.",
|
||||
},
|
||||
)
|
||||
|
||||
func IncreaseStorageTotalSessions() {
|
||||
storageTotalSessions.Inc()
|
||||
}
|
||||
|
||||
var storageSessionReadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "read_duration_seconds",
|
||||
Help: "A histogram displaying the duration of reading for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionReadDuration(durMillis float64, fileType string) {
|
||||
storageSessionReadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionSortDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "sort_duration_seconds",
|
||||
Help: "A histogram displaying the duration of sorting for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionSortDuration(durMillis float64, fileType string) {
|
||||
storageSessionSortDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionEncodeDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "encode_duration_seconds",
|
||||
Help: "A histogram displaying the duration of encoding for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionEncodeDuration(durMillis float64, fileType string) {
|
||||
storageSessionEncodeDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionCompressDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "compress_duration_seconds",
|
||||
Help: "A histogram displaying the duration of compressing for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionCompressDuration(durMillis float64, fileType string) {
|
||||
storageSessionCompressDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
var storageSessionUploadDuration = prometheus.NewHistogramVec(
|
||||
prometheus.HistogramOpts{
|
||||
Namespace: "storage",
|
||||
Name: "upload_duration_seconds",
|
||||
Help: "A histogram displaying the duration of uploading to s3 for each session in seconds.",
|
||||
Buckets: common.DefaultDurationBuckets,
|
||||
},
|
||||
[]string{"file_type"},
|
||||
)
|
||||
|
||||
func RecordSessionUploadDuration(durMillis float64, fileType string) {
|
||||
storageSessionUploadDuration.WithLabelValues(fileType).Observe(durMillis / 1000.0)
|
||||
}
|
||||
|
||||
func List() []prometheus.Collector {
|
||||
return []prometheus.Collector{
|
||||
storageSessionSize,
|
||||
storageTotalSessions,
|
||||
storageSessionReadDuration,
|
||||
storageSessionSortDuration,
|
||||
storageSessionEncodeDuration,
|
||||
storageSessionCompressDuration,
|
||||
storageSessionUploadDuration,
|
||||
}
|
||||
}
|
||||
|
|
@ -1,138 +0,0 @@
|
|||
package monitoring
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
"go.opentelemetry.io/otel/exporters/prometheus"
|
||||
"go.opentelemetry.io/otel/metric"
|
||||
"go.opentelemetry.io/otel/metric/global"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"go.opentelemetry.io/otel/sdk/metric/aggregator/histogram"
|
||||
controller "go.opentelemetry.io/otel/sdk/metric/controller/basic"
|
||||
"go.opentelemetry.io/otel/sdk/metric/export/aggregation"
|
||||
processor "go.opentelemetry.io/otel/sdk/metric/processor/basic"
|
||||
selector "go.opentelemetry.io/otel/sdk/metric/selector/simple"
|
||||
)
|
||||
|
||||
// Metrics stores all collected metrics
|
||||
type Metrics struct {
|
||||
meter metric.Meter
|
||||
counters map[string]syncfloat64.Counter
|
||||
upDownCounters map[string]syncfloat64.UpDownCounter
|
||||
histograms map[string]syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(name string) *Metrics {
|
||||
m := &Metrics{
|
||||
counters: make(map[string]syncfloat64.Counter),
|
||||
upDownCounters: make(map[string]syncfloat64.UpDownCounter),
|
||||
histograms: make(map[string]syncfloat64.Histogram),
|
||||
}
|
||||
m.initPrometheusDataExporter()
|
||||
m.initMetrics(name)
|
||||
return m
|
||||
}
|
||||
|
||||
// initPrometheusDataExporter allows to use collected metrics in prometheus
|
||||
func (m *Metrics) initPrometheusDataExporter() {
|
||||
config := prometheus.Config{
|
||||
DefaultHistogramBoundaries: []float64{1, 2, 5, 10, 20, 50, 100, 250, 500, 1000},
|
||||
}
|
||||
c := controller.New(
|
||||
processor.NewFactory(
|
||||
selector.NewWithHistogramDistribution(
|
||||
histogram.WithExplicitBoundaries(config.DefaultHistogramBoundaries),
|
||||
),
|
||||
aggregation.CumulativeTemporalitySelector(),
|
||||
processor.WithMemory(true),
|
||||
),
|
||||
)
|
||||
exporter, err := prometheus.New(config, c)
|
||||
if err != nil {
|
||||
log.Panicf("failed to initialize prometheus exporter %v", err)
|
||||
}
|
||||
|
||||
global.SetMeterProvider(exporter.MeterProvider())
|
||||
|
||||
http.HandleFunc("/metrics", exporter.ServeHTTP)
|
||||
go func() {
|
||||
_ = http.ListenAndServe(":8888", nil)
|
||||
}()
|
||||
|
||||
fmt.Println("Prometheus server running on :8888")
|
||||
}
|
||||
|
||||
func (m *Metrics) initMetrics(name string) {
|
||||
m.meter = global.Meter(name)
|
||||
}
|
||||
|
||||
/*
|
||||
Counter is a synchronous instrument that measures additive non-decreasing values, for example, the number of:
|
||||
- processed requests
|
||||
- received bytes
|
||||
- disk reads
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterCounter(name string) (syncfloat64.Counter, error) {
|
||||
if counter, ok := m.counters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().Counter(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize counter: %v", err)
|
||||
}
|
||||
m.counters[name] = counter
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetCounter(name string) syncfloat64.Counter {
|
||||
return m.counters[name]
|
||||
}
|
||||
|
||||
/*
|
||||
UpDownCounter is a synchronous instrument which measures additive values that increase or decrease with time,
|
||||
for example, the number of:
|
||||
- active requests
|
||||
- open connections
|
||||
- memory in use (megabytes)
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterUpDownCounter(name string) (syncfloat64.UpDownCounter, error) {
|
||||
if counter, ok := m.upDownCounters[name]; ok {
|
||||
return counter, nil
|
||||
}
|
||||
counter, err := m.meter.SyncFloat64().UpDownCounter(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize upDownCounter: %v", err)
|
||||
}
|
||||
m.upDownCounters[name] = counter
|
||||
return counter, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetUpDownCounter(name string) syncfloat64.UpDownCounter {
|
||||
return m.upDownCounters[name]
|
||||
}
|
||||
|
||||
/*
|
||||
Histogram is a synchronous instrument that produces a histogram from recorded values, for example:
|
||||
- request latency
|
||||
- request size
|
||||
*/
|
||||
|
||||
func (m *Metrics) RegisterHistogram(name string) (syncfloat64.Histogram, error) {
|
||||
if hist, ok := m.histograms[name]; ok {
|
||||
return hist, nil
|
||||
}
|
||||
hist, err := m.meter.SyncFloat64().Histogram(name)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to initialize histogram: %v", err)
|
||||
}
|
||||
m.histograms[name] = hist
|
||||
return hist, nil
|
||||
}
|
||||
|
||||
func (m *Metrics) GetHistogram(name string) syncfloat64.Histogram {
|
||||
return m.histograms[name]
|
||||
}
|
||||
|
|
@ -10,7 +10,7 @@ import (
|
|||
|
||||
type builder struct {
|
||||
sessionID uint64
|
||||
readyMsgs []Message
|
||||
readyMsgs chan Message
|
||||
timestamp uint64
|
||||
lastMessageID uint64
|
||||
lastSystemTime time.Time
|
||||
|
|
@ -18,20 +18,14 @@ type builder struct {
|
|||
ended bool
|
||||
}
|
||||
|
||||
func NewBuilder(sessionID uint64, handlers ...handlers.MessageProcessor) *builder {
|
||||
func NewBuilder(sessionID uint64, events chan Message, handlers ...handlers.MessageProcessor) *builder {
|
||||
return &builder{
|
||||
sessionID: sessionID,
|
||||
processors: handlers,
|
||||
readyMsgs: events,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) iterateReadyMessages(iter func(msg Message)) {
|
||||
for _, readyMsg := range b.readyMsgs {
|
||||
iter(readyMsg)
|
||||
}
|
||||
b.readyMsgs = nil
|
||||
}
|
||||
|
||||
func (b *builder) checkSessionEnd(message Message) {
|
||||
if _, isEnd := message.(*IOSSessionEnd); isEnd {
|
||||
b.ended = true
|
||||
|
|
@ -41,34 +35,31 @@ func (b *builder) checkSessionEnd(message Message) {
|
|||
}
|
||||
}
|
||||
|
||||
func (b *builder) handleMessage(message Message, messageID uint64) {
|
||||
if messageID < b.lastMessageID {
|
||||
func (b *builder) handleMessage(m Message) {
|
||||
if m.MsgID() < b.lastMessageID {
|
||||
// May happen in case of duplicated messages in kafka (if `idempotence: false`)
|
||||
log.Printf("skip message with wrong msgID, sessID: %d, msgID: %d, lastID: %d", b.sessionID, messageID, b.lastMessageID)
|
||||
log.Printf("skip message with wrong msgID, sessID: %d, msgID: %d, lastID: %d", b.sessionID, m.MsgID(), b.lastMessageID)
|
||||
return
|
||||
}
|
||||
timestamp := GetTimestamp(message)
|
||||
if timestamp == 0 {
|
||||
switch message.(type) {
|
||||
if m.Time() <= 0 {
|
||||
switch m.(type) {
|
||||
case *IssueEvent, *PerformanceTrackAggr:
|
||||
break
|
||||
default:
|
||||
log.Printf("skip message with empty timestamp, sessID: %d, msgID: %d, msgType: %d", b.sessionID, messageID, message.TypeID())
|
||||
log.Printf("skip message with incorrect timestamp, sessID: %d, msgID: %d, msgType: %d", b.sessionID, m.MsgID(), m.TypeID())
|
||||
}
|
||||
return
|
||||
}
|
||||
if timestamp < b.timestamp {
|
||||
//log.Printf("skip message with wrong timestamp, sessID: %d, msgID: %d, type: %d, msgTS: %d, lastTS: %d", b.sessionID, messageID, message.TypeID(), timestamp, b.timestamp)
|
||||
} else {
|
||||
b.timestamp = timestamp
|
||||
if m.Time() > b.timestamp {
|
||||
b.timestamp = m.Time()
|
||||
}
|
||||
|
||||
b.lastSystemTime = time.Now()
|
||||
// Process current message
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Handle(message, messageID, b.timestamp); rm != nil {
|
||||
rm.Meta().SetMeta(message.Meta())
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
if rm := p.Handle(m, b.timestamp); rm != nil {
|
||||
rm.Meta().SetMeta(m.Meta())
|
||||
b.readyMsgs <- rm
|
||||
}
|
||||
}
|
||||
b.checkSessionEnd(message)
|
||||
b.checkSessionEnd(m)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,92 +2,98 @@ package sessions
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/handlers"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const FORCE_DELETE_TIMEOUT = 4 * time.Hour
|
||||
const ForceDeleteTimeout = 30 * time.Minute
|
||||
|
||||
type builderMap struct {
|
||||
handlersFabric func() []handlers.MessageProcessor
|
||||
sessions map[uint64]*builder
|
||||
mutex *sync.Mutex
|
||||
events chan Message
|
||||
done chan struct{}
|
||||
}
|
||||
|
||||
func NewBuilderMap(handlersFabric func() []handlers.MessageProcessor) *builderMap {
|
||||
return &builderMap{
|
||||
type EventBuilder interface {
|
||||
Events() chan Message
|
||||
HandleMessage(msg Message)
|
||||
Stop()
|
||||
}
|
||||
|
||||
func NewBuilderMap(handlersFabric func() []handlers.MessageProcessor) EventBuilder {
|
||||
b := &builderMap{
|
||||
handlersFabric: handlersFabric,
|
||||
sessions: make(map[uint64]*builder),
|
||||
mutex: &sync.Mutex{},
|
||||
events: make(chan Message, 1024*10),
|
||||
done: make(chan struct{}),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) GetBuilder(sessionID uint64) *builder {
|
||||
b := m.sessions[sessionID]
|
||||
if b == nil {
|
||||
b = NewBuilder(sessionID, m.handlersFabric()...) // Should create new instances
|
||||
m.sessions[sessionID] = b
|
||||
}
|
||||
go b.worker()
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *builderMap) HandleMessage(msg Message) {
|
||||
sessionID := msg.SessionID()
|
||||
messageID := msg.Meta().Index
|
||||
b := m.GetBuilder(sessionID)
|
||||
b.handleMessage(msg, messageID)
|
||||
func (m *builderMap) getBuilder(sessionID uint64) *builder {
|
||||
m.mutex.Lock()
|
||||
b := m.sessions[sessionID]
|
||||
if b == nil {
|
||||
b = NewBuilder(sessionID, m.events, m.handlersFabric()...)
|
||||
m.sessions[sessionID] = b
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *builderMap) ClearOldSessions() {
|
||||
func (m *builderMap) Events() chan Message {
|
||||
return m.events
|
||||
}
|
||||
|
||||
func (m *builderMap) HandleMessage(msg Message) {
|
||||
m.getBuilder(msg.SessionID()).handleMessage(msg)
|
||||
}
|
||||
|
||||
func (m *builderMap) worker() {
|
||||
tick := time.Tick(10 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
m.checkSessions()
|
||||
case <-m.done:
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) checkSessions() {
|
||||
m.mutex.Lock()
|
||||
deleted := 0
|
||||
now := time.Now()
|
||||
for id, sess := range m.sessions {
|
||||
if sess.lastSystemTime.Add(FORCE_DELETE_TIMEOUT).Before(now) {
|
||||
// Should delete zombie session
|
||||
delete(m.sessions, id)
|
||||
for sessID, b := range m.sessions {
|
||||
// Check session's events
|
||||
if b.ended || b.lastSystemTime.Add(ForceDeleteTimeout).Before(now) {
|
||||
// Build rest of messages
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Build(); rm != nil {
|
||||
rm.Meta().SetSessionID(sessID)
|
||||
m.events <- rm
|
||||
}
|
||||
}
|
||||
delete(m.sessions, sessID)
|
||||
deleted++
|
||||
}
|
||||
}
|
||||
m.mutex.Unlock()
|
||||
if deleted > 0 {
|
||||
log.Printf("deleted %d sessions from message builder", deleted)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) iterateSessionReadyMessages(sessionID uint64, b *builder, iter func(msg Message)) {
|
||||
if b.ended || b.lastSystemTime.Add(FORCE_DELETE_TIMEOUT).Before(time.Now()) {
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Build(); rm != nil {
|
||||
rm.Meta().SetSessionID(sessionID)
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.iterateReadyMessages(iter)
|
||||
if b.ended {
|
||||
delete(m.sessions, sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) IterateReadyMessages(iter func(sessionID uint64, msg Message)) {
|
||||
for sessionID, session := range m.sessions {
|
||||
m.iterateSessionReadyMessages(
|
||||
sessionID,
|
||||
session,
|
||||
func(msg Message) {
|
||||
iter(sessionID, msg)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) IterateSessionReadyMessages(sessionID uint64, iter func(msg Message)) {
|
||||
session, ok := m.sessions[sessionID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
m.iterateSessionReadyMessages(
|
||||
sessionID,
|
||||
session,
|
||||
iter,
|
||||
)
|
||||
func (m *builderMap) Stop() {
|
||||
m.done <- struct{}{}
|
||||
m.checkSessions()
|
||||
close(m.events)
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue