Compare commits
378 commits
add_webrtc
...
main
| Author | SHA1 | Date | |
|---|---|---|---|
|
|
90510aa33b | ||
|
|
96a70f5d41 | ||
|
|
d4a13edcf0 | ||
|
|
51fad91a22 | ||
|
|
36abcda1e1 | ||
|
|
dd5f464f73 | ||
|
|
f9ada41272 | ||
|
|
9e24a3583e | ||
|
|
0a3129d3cd | ||
|
|
99d61db9d9 | ||
|
|
133958622e | ||
|
|
fb021f606f | ||
|
|
a2905fa8ed | ||
|
|
beec2283fd | ||
|
|
6c8b55019e | ||
|
|
e3e3e11227 | ||
|
|
c6f7de04cc | ||
|
|
2921c17cbf | ||
|
|
7eb3f5c4c8 | ||
|
|
5a9a8e588a | ||
|
|
4b14258266 | ||
|
|
744d2d4311 | ||
|
|
64242a5dc0 | ||
|
|
cae3002697 | ||
|
|
3d3c62196b | ||
|
|
e810958a5d | ||
|
|
39fa9787d1 | ||
|
|
c9c1ad4dde | ||
|
|
d9868928be | ||
|
|
a460d8c9a2 | ||
|
|
930417aab4 | ||
|
|
07bc184f4d | ||
|
|
71b7cca569 | ||
|
|
355d27eaa0 | ||
|
|
66b485cccf | ||
|
|
de33a42151 | ||
|
|
f12bdebf82 | ||
|
|
bbfa20c693 | ||
|
|
f264ba043d | ||
|
|
a05dce8125 | ||
|
|
3a1635d81f | ||
|
|
ccb332c636 | ||
|
|
80ffa15959 | ||
|
|
b2e961d621 | ||
|
|
b4d0598f23 | ||
|
|
e77f083f10 | ||
|
|
58da1d3f64 | ||
|
|
447fc26a2a | ||
|
|
9bdf6e4f92 | ||
|
|
01f403e12d | ||
|
|
39eb943b86 | ||
|
|
366b0d38b0 | ||
|
|
f4d5b3c06e | ||
|
|
93ae18133e | ||
|
|
fbe5d78270 | ||
|
|
b803eed1d4 | ||
|
|
9ed3cb1b7e | ||
|
|
5e0e5730ba | ||
|
|
d78b33dcd2 | ||
|
|
4b1ca200b4 | ||
|
|
08d930f9ff | ||
|
|
da37809bc8 | ||
|
|
d922fc7ad5 | ||
|
|
796360fdd2 | ||
|
|
13dbb60d8b | ||
|
|
9e20a49128 | ||
|
|
91f8cc1399 | ||
|
|
f8ba3f6d89 | ||
|
|
85e30b3692 | ||
|
|
0360e3726e | ||
|
|
77bbb5af36 | ||
|
|
ab0d4cfb62 | ||
|
|
3fd506a812 | ||
|
|
e8432e2dec | ||
|
|
5c76a8524c | ||
|
|
3ba40a4811 | ||
|
|
f9a3f24590 | ||
|
|
85d6d0abac | ||
|
|
b3594136ce | ||
|
|
8f67edde8d | ||
|
|
74ed29915b | ||
|
|
3ca71ec211 | ||
|
|
0e469fd056 | ||
|
|
a8cb0e1643 | ||
|
|
e171f0d8d5 | ||
|
|
68ea291444 | ||
|
|
05cbb831c7 | ||
|
|
5070ded1f4 | ||
|
|
77610a4924 | ||
|
|
7c34e4a0f6 | ||
|
|
330e21183f | ||
|
|
30ce37896c | ||
|
|
80a7817e7d | ||
|
|
1b9c568cb1 | ||
|
|
3759771ae9 | ||
|
|
f6ae5aba88 | ||
|
|
5190dc512a | ||
|
|
3fcccb51e8 | ||
|
|
26077d5689 | ||
|
|
00c57348fd | ||
|
|
1f9bc5520a | ||
|
|
aef94618f6 | ||
|
|
2a330318c7 | ||
|
|
6777d5ce2a | ||
|
|
8a6f8fe91f | ||
|
|
7b078fed4c | ||
|
|
894d4c84b3 | ||
|
|
46390a3ba9 | ||
|
|
621667f5ce | ||
|
|
a72f476f1c | ||
|
|
623946ce4e | ||
|
|
2d099214fc | ||
|
|
b0e7054f89 | ||
|
|
a9097270af | ||
|
|
5d514ddaf2 | ||
|
|
43688bb03b | ||
|
|
e050cee7bb | ||
|
|
6b35df7125 | ||
|
|
8e099b6dc3 | ||
|
|
c0a4734054 | ||
|
|
7de1efb5fe | ||
|
|
d4ff28ddbe | ||
|
|
b2256f72d0 | ||
|
|
a63bda1c79 | ||
|
|
3a0176789e | ||
|
|
f2b7271fca | ||
|
|
d50f89662b | ||
|
|
35051d201c | ||
|
|
214be95ecc | ||
|
|
dbc142c114 | ||
|
|
443f5e8f08 | ||
|
|
9f693f220d | ||
|
|
5ab30380b0 | ||
|
|
fc86555644 | ||
|
|
2a3c611a27 | ||
|
|
1d6fb0ae9e | ||
|
|
bef91a6136 | ||
|
|
1e2bd19d32 | ||
|
|
3b58cb347e | ||
|
|
ca4590501a | ||
|
|
fd12cc7585 | ||
|
|
6abded53e0 | ||
|
|
82c5e5e59d | ||
|
|
c77b0cc4de | ||
|
|
de344e62ef | ||
|
|
deb78a62c0 | ||
|
|
0724cf05f0 | ||
|
|
cc704f1bc3 | ||
|
|
4c159b2d26 | ||
|
|
42df33bc01 | ||
|
|
ae95b48760 | ||
|
|
4be3050e61 | ||
|
|
8eec6e983b | ||
|
|
5fec615044 | ||
|
|
f77568a01c | ||
|
|
618e4dc59f | ||
|
|
b94fcb11e5 | ||
|
|
f93ee6fb8f | ||
|
|
23820b7ea5 | ||
|
|
e92bfe3cfe | ||
|
|
102f0c7b06 | ||
|
|
8d57cc55a5 | ||
|
|
24b36efc9d | ||
|
|
fe91cad4af | ||
|
|
033ffcb7b9 | ||
|
|
499048e46c | ||
|
|
5b6c653862 | ||
|
|
4169ab87c6 | ||
|
|
80229a0214 | ||
|
|
fb48ba8300 | ||
|
|
b0f3c50c0f | ||
|
|
5806362ce0 | ||
|
|
2458af460b | ||
|
|
6c891cb131 | ||
|
|
8e41c3ce91 | ||
|
|
14d0a77a73 | ||
|
|
0333c56d52 | ||
|
|
52d4abb61c | ||
|
|
b0e7d3aa79 | ||
|
|
e9eea78283 | ||
|
|
0f4c509582 | ||
|
|
820bca6308 | ||
|
|
51e71a4d52 | ||
|
|
2c9e9576c5 | ||
|
|
9e7f751df6 | ||
|
|
b6d0e71544 | ||
|
|
93a9e03026 | ||
|
|
a62f6f6bb0 | ||
|
|
cd80aa85ea | ||
|
|
961c685310 | ||
|
|
160b5ac2c8 | ||
|
|
1cca40d4c5 | ||
|
|
bd2a59266d | ||
|
|
8acee7d357 | ||
|
|
fb49c715cb | ||
|
|
221bee70f5 | ||
|
|
8eb431f70c | ||
|
|
820b0954e7 | ||
|
|
19b350761c | ||
|
|
3b3e95a413 | ||
|
|
fe1130397c | ||
|
|
fd4b71d854 | ||
|
|
404ffd5b2d | ||
|
|
5af63eb9f1 | ||
|
|
038bfee383 | ||
|
|
bd09160a4a | ||
|
|
136a5b2bfb | ||
|
|
33deaef0ce | ||
|
|
3f541e5d59 | ||
|
|
ae463db150 | ||
|
|
9eb19fedf1 | ||
|
|
5df934c9ce | ||
|
|
e027a2d016 | ||
|
|
c7f3c78740 | ||
|
|
3245579b7c | ||
|
|
0107c9c523 | ||
|
|
05f4054b31 | ||
|
|
ce844296ed | ||
|
|
0a5856afe1 | ||
|
|
45b8bdef8a | ||
|
|
264f28ed39 | ||
|
|
59d3253737 | ||
|
|
1c8c231d13 | ||
|
|
77208b95e8 | ||
|
|
cdbbb482ce | ||
|
|
ccd8d76e98 | ||
|
|
17a5089c24 | ||
|
|
384866621c | ||
|
|
743625f66b | ||
|
|
ffd134c204 | ||
|
|
8da099ba98 | ||
|
|
75ca0267ae | ||
|
|
6ab3c80985 | ||
|
|
eab2d3a2cf | ||
|
|
c6cbc4eba8 | ||
|
|
fdd26c567c | ||
|
|
4b9be69719 | ||
|
|
b8511b6be1 | ||
|
|
5cc9945f16 | ||
|
|
cef251db6a | ||
|
|
687ab05f22 | ||
|
|
4b09213448 | ||
|
|
af4a344c85 | ||
|
|
c40e32d624 | ||
|
|
afbf5fee7a | ||
|
|
28b580499f | ||
|
|
9d7c54554e | ||
|
|
adf302bc34 | ||
|
|
6852d63cdb | ||
|
|
41178ba841 | ||
|
|
90bc6bc83e | ||
|
|
b8d365de3d | ||
|
|
87e7acecde | ||
|
|
e53301d18e | ||
|
|
ff04276623 | ||
|
|
b0e0321224 | ||
|
|
e95417c1ed | ||
|
|
5f3b3bb2ef | ||
|
|
06937b305a | ||
|
|
a693a36a6c | ||
|
|
c8ff481725 | ||
|
|
ef897538d1 | ||
|
|
07ffb06db1 | ||
|
|
ad9883ceb2 | ||
|
|
5c9a29570c | ||
|
|
9f9990d737 | ||
|
|
fd5c0c9747 | ||
|
|
b8091b69c2 | ||
|
|
502303aee7 | ||
|
|
632bc1cbb9 | ||
|
|
bcc7d35b7f | ||
|
|
45656ec6d7 | ||
|
|
15829d865e | ||
|
|
029376c3e4 | ||
|
|
3ca6f78bed | ||
|
|
12a729fafe | ||
|
|
0a17460c5a | ||
|
|
faadfa497f | ||
|
|
bbeb508738 | ||
|
|
333fd642be | ||
|
|
5e93178876 | ||
|
|
da433e1666 | ||
|
|
a87f6c658c | ||
|
|
4ebbfd3501 | ||
|
|
6dc3dcfd4e | ||
|
|
74146eecf1 | ||
|
|
2e69a6e4df | ||
|
|
afacbc1460 | ||
|
|
6e1316c05f | ||
|
|
d3851cedec | ||
|
|
a1989eb574 | ||
|
|
95455f761b | ||
|
|
69d1d88600 | ||
|
|
ceb40992cc | ||
|
|
1ab7d0ad7f | ||
|
|
2ee535f213 | ||
|
|
0ba1382c16 | ||
|
|
c025b2f1a5 | ||
|
|
918d9de4c9 | ||
|
|
047a5f52e7 | ||
|
|
7a88acfa9f | ||
|
|
366d2e1017 | ||
|
|
46e6f1a503 | ||
|
|
ce2a65f276 | ||
|
|
f168f90f10 | ||
|
|
b6cca71053 | ||
|
|
2841740afb | ||
|
|
927f96cb79 | ||
|
|
e174a11466 | ||
|
|
ee4c5cf45d | ||
|
|
78ddbb9233 | ||
|
|
66edf44f8b | ||
|
|
0af941e543 | ||
|
|
fd64d721c6 | ||
|
|
f965c69a26 | ||
|
|
9bb93d5daa | ||
|
|
4d19586eb9 | ||
|
|
5e10e168c6 | ||
|
|
aa2c14b7c1 | ||
|
|
4ef61f6fb5 | ||
|
|
95a5037abf | ||
|
|
23514d4b3f | ||
|
|
ee46413b13 | ||
|
|
9f57271af2 | ||
|
|
84771542a6 | ||
|
|
83f8b67f74 | ||
|
|
6af9f719c8 | ||
|
|
789427dd57 | ||
|
|
59bbc6a903 | ||
|
|
0529ee3afd | ||
|
|
307b0c1cd8 | ||
|
|
11a2ea48bc | ||
|
|
1146900dc0 | ||
|
|
0a999247e4 | ||
|
|
f13ad8a882 | ||
|
|
0d12fdddc9 | ||
|
|
c0a5415eb9 | ||
|
|
b8a70367ed | ||
|
|
1efe5c87e8 | ||
|
|
2dcbfe2ef9 | ||
|
|
fedc48bd0e | ||
|
|
de72e79fc6 | ||
|
|
d43bc3a2e9 | ||
|
|
8ba6a17055 | ||
|
|
e5809a5eff | ||
|
|
171fd5aa59 | ||
|
|
533fb71cb7 | ||
|
|
90964e8f50 | ||
|
|
7d5ac6a8c9 | ||
|
|
32b281f689 | ||
|
|
b175c836a3 | ||
|
|
4e54bced9c | ||
|
|
e2fa3c91e2 | ||
|
|
19c8fba445 | ||
|
|
94e8e0319d | ||
|
|
ec8f9a349d | ||
|
|
992cb2feca | ||
|
|
844f79a989 | ||
|
|
1ec06d360e | ||
|
|
fd76f7c302 | ||
|
|
c793d9d177 | ||
|
|
1c1a41bb55 | ||
|
|
6873f1c56b | ||
|
|
d79665cbea | ||
|
|
256c065153 | ||
|
|
114bd4080b | ||
|
|
d4965f2137 | ||
|
|
8ed97b353b | ||
|
|
ac232ef599 | ||
|
|
264f35cc9e | ||
|
|
d85f63c72e | ||
|
|
4b16e50e5f | ||
|
|
735b86d778 | ||
|
|
78bb1c3c6b | ||
|
|
968a3eefde | ||
|
|
1122ced4c3 | ||
|
|
b406893d00 | ||
|
|
8b2cf031ca |
2533 changed files with 87080 additions and 50278 deletions
|
|
@ -1,4 +1,4 @@
|
|||
# This action will push the peers changes to aws
|
||||
# This action will push the assist changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
|
|
@ -10,12 +10,9 @@ on:
|
|||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "ee/peers/**"
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
- "ee/assist-server/**"
|
||||
|
||||
name: Build and Deploy Peers EE
|
||||
name: Build and Deploy Assist-Server EE
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
|
|
@ -56,12 +53,7 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
- name: Building and Pushing Assist-Server image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
|
|
@ -69,11 +61,11 @@ jobs:
|
|||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
cd assist-server
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
|
@ -84,7 +76,7 @@ jobs:
|
|||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
images=("assist-server")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
|
|
@ -108,43 +100,23 @@ jobs:
|
|||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
pwd
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
sed -i "/assist-server/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
mv openreplay/charts/{ingress-nginx,assist-server,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks --kube-version=$k_version | kubectl apply -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# iimit-access-to-actor: true
|
||||
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
189
.github/workflows/patch-build-old.yaml
vendored
Normal file
|
|
@ -0,0 +1,189 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
required: true
|
||||
default: 'chalice,frontend'
|
||||
tag:
|
||||
description: 'Tag to update.'
|
||||
required: true
|
||||
type: string
|
||||
branch:
|
||||
description: 'Branch to build patches from. Make sure the branch is uptodate with tag. Else itll cause missing commits.'
|
||||
required: true
|
||||
type: string
|
||||
|
||||
name: Build patches from tag, rewrite commit HEAD to older timestamp, and Push the tag
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from old tag
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
fetch-depth: 4
|
||||
ref: ${{ github.event.inputs.tag }}
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
|
||||
- name: Create backup tag with timestamp
|
||||
run: |
|
||||
set -e # Exit immediately if a command exits with a non-zero status
|
||||
TIMESTAMP=$(date +%Y%m%d%H%M%S)
|
||||
BACKUP_TAG="${{ github.event.inputs.tag }}-backup-${TIMESTAMP}"
|
||||
echo "BACKUP_TAG=${BACKUP_TAG}" >> $GITHUB_ENV
|
||||
echo "INPUT_TAG=${{ github.event.inputs.tag }}" >> $GITHUB_ENV
|
||||
git tag $BACKUP_TAG || { echo "Failed to create backup tag"; exit 1; }
|
||||
git push origin $BACKUP_TAG || { echo "Failed to push backup tag"; exit 1; }
|
||||
echo "Created backup tag: $BACKUP_TAG"
|
||||
|
||||
# Get the oldest commit date from the last 3 commits in raw format
|
||||
OLDEST_COMMIT_TIMESTAMP=$(git log -3 --pretty=format:"%at" | tail -1)
|
||||
echo "Oldest commit timestamp: $OLDEST_COMMIT_TIMESTAMP"
|
||||
# Add 1 second to the timestamp
|
||||
NEW_TIMESTAMP=$((OLDEST_COMMIT_TIMESTAMP + 1))
|
||||
echo "NEW_TIMESTAMP=$NEW_TIMESTAMP" >> $GITHUB_ENV
|
||||
|
||||
|
||||
- name: Setup yq
|
||||
uses: mikefarah/yq@master
|
||||
|
||||
# Configure AWS credentials for the first registry
|
||||
- name: Configure AWS credentials for RELEASE_ARM_REGISTRY
|
||||
uses: aws-actions/configure-aws-credentials@v1
|
||||
with:
|
||||
aws-access-key-id: ${{ secrets.AWS_DEPOT_ACCESS_KEY }}
|
||||
aws-secret-access-key: ${{ secrets.AWS_DEPOT_SECRET_KEY }}
|
||||
aws-region: ${{ secrets.AWS_DEPOT_DEFAULT_REGION }}
|
||||
|
||||
- name: Login to Amazon ECR for RELEASE_ARM_REGISTRY
|
||||
id: login-ecr-arm
|
||||
run: |
|
||||
aws ecr get-login-password --region ${{ secrets.AWS_DEPOT_DEFAULT_REGION }} | docker login --username AWS --password-stdin ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
run: echo "BRANCH_NAME=${{inputs.branch}}" >> $GITHUB_ENV
|
||||
|
||||
- name: Build
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b $INPUT_TAG --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout $INPUT_TAG
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
done
|
||||
|
||||
- name: Change commit timestamp
|
||||
run: |
|
||||
# Convert the timestamp to a date format git can understand
|
||||
NEW_DATE=$(perl -le 'print scalar gmtime($ARGV[0])." +0000"' $NEW_TIMESTAMP)
|
||||
echo "Setting commit date to: $NEW_DATE"
|
||||
|
||||
# Amend the commit with the new date
|
||||
GIT_COMMITTER_DATE="$NEW_DATE" git commit --amend --no-edit --date="$NEW_DATE"
|
||||
|
||||
# Verify the change
|
||||
git log -1 --pretty=format:"Commit now dated: %cD"
|
||||
|
||||
# git tag and push
|
||||
git tag $INPUT_TAG -f
|
||||
git push origin $INPUT_TAG -f
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO_ARM: ${{ secrets.RELEASE_ARM_REGISTRY }}
|
||||
# DOCKER_REPO_OSS: ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
# MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
# MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
# MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
246
.github/workflows/patch-build.yaml
vendored
246
.github/workflows/patch-build.yaml
vendored
|
|
@ -2,7 +2,6 @@
|
|||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
description: 'This workflow will build for patches for latest tag, and will Always use commit from main branch.'
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma separated names of services to build(in small letters).'
|
||||
|
|
@ -20,12 +19,20 @@ jobs:
|
|||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
uses: actions/checkout@v4
|
||||
with:
|
||||
fetch-depth: 1
|
||||
fetch-depth: 0
|
||||
token: ${{ secrets.GITHUB_TOKEN }}
|
||||
- name: Rebase with main branch, to make sure the code has latest main changes
|
||||
if: github.ref != 'refs/heads/main'
|
||||
run: |
|
||||
git pull --rebase origin main
|
||||
git remote -v
|
||||
git config --global user.email "action@github.com"
|
||||
git config --global user.name "GitHub Action"
|
||||
git config --global rebase.autoStash true
|
||||
git fetch origin main:main
|
||||
git rebase main
|
||||
git log -3
|
||||
|
||||
- name: Downloading yq
|
||||
run: |
|
||||
|
|
@ -48,6 +55,8 @@ jobs:
|
|||
aws ecr-public get-login-password --region us-east-1 | docker login --username AWS --password-stdin ${{ secrets.RELEASE_OSS_REGISTRY }}
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
- name: Get HEAD Commit ID
|
||||
run: echo "HEAD_COMMIT_ID=$(git rev-parse HEAD)" >> $GITHUB_ENV
|
||||
- name: Define Branch Name
|
||||
|
|
@ -65,78 +74,168 @@ jobs:
|
|||
MSAAS_REPO_CLONE_TOKEN: ${{ secrets.MSAAS_REPO_CLONE_TOKEN }}
|
||||
MSAAS_REPO_URL: ${{ secrets.MSAAS_REPO_URL }}
|
||||
MSAAS_REPO_FOLDER: /tmp/msaas
|
||||
SERVICES_INPUT: ${{ github.event.inputs.services }}
|
||||
run: |
|
||||
set -exo pipefail
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b $BRANCH_NAME
|
||||
working_dir=$(pwd)
|
||||
function image_version(){
|
||||
local service=$1
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
current_version=$(yq eval '.AppVersion' $chart_path)
|
||||
new_version=$(echo $current_version | awk -F. '{$NF += 1 ; print $1"."$2"."$3}')
|
||||
echo $new_version
|
||||
# yq eval ".AppVersion = \"$new_version\"" -i $chart_path
|
||||
#!/bin/bash
|
||||
set -euo pipefail
|
||||
|
||||
# Configuration
|
||||
readonly WORKING_DIR=$(pwd)
|
||||
readonly BUILD_SCRIPT_NAME="build.sh"
|
||||
readonly BACKEND_SERVICES_FILE="/tmp/backend.txt"
|
||||
|
||||
# Initialize git configuration
|
||||
setup_git() {
|
||||
git config --local user.email "action@github.com"
|
||||
git config --local user.name "GitHub Action"
|
||||
git checkout -b "$BRANCH_NAME"
|
||||
}
|
||||
function clone_msaas() {
|
||||
[ -d $MSAAS_REPO_FOLDER ] || {
|
||||
git clone -b dev --recursive https://x-access-token:$MSAAS_REPO_CLONE_TOKEN@$MSAAS_REPO_URL $MSAAS_REPO_FOLDER
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
cd openreplay && git fetch origin && git checkout main # This have to be changed to specific tag
|
||||
git log -1
|
||||
cd $MSAAS_REPO_FOLDER
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
}
|
||||
|
||||
# Get and increment image version
|
||||
image_version() {
|
||||
local service=$1
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
local current_version new_version
|
||||
|
||||
current_version=$(yq eval '.AppVersion' "$chart_path")
|
||||
new_version=$(echo "$current_version" | awk -F. '{$NF += 1; print $1"."$2"."$3}')
|
||||
echo "$new_version"
|
||||
}
|
||||
function build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
echo building managed
|
||||
clone_msaas
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/api
|
||||
else
|
||||
cd $MSAAS_REPO_FOLDER/openreplay/$service
|
||||
fi
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh >> /tmp/arm.txt
|
||||
|
||||
# Clone MSAAS repository if not exists
|
||||
clone_msaas() {
|
||||
if [[ ! -d "$MSAAS_REPO_FOLDER" ]]; then
|
||||
git clone -b dev --recursive "https://x-access-token:${MSAAS_REPO_CLONE_TOKEN}@${MSAAS_REPO_URL}" "$MSAAS_REPO_FOLDER"
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
cd openreplay && git fetch origin && git checkout main
|
||||
git log -1
|
||||
cd "$MSAAS_REPO_FOLDER"
|
||||
bash git-init.sh
|
||||
git checkout
|
||||
fi
|
||||
}
|
||||
# Checking for backend images
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
echo Services: "${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
# Build FOSS
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
version=$(image_version $SERVICE)
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=$version-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
if [[ "$SERVICE" != "chalice" && "$SERVICE" != "frontend" ]]; then
|
||||
IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
echo IMAGE_TAG=$version DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
else
|
||||
build_managed $SERVICE $version
|
||||
fi
|
||||
cd $working_dir
|
||||
chart_path="$working_dir/scripts/helmcharts/openreplay/charts/$SERVICE/Chart.yaml"
|
||||
yq eval ".AppVersion = \"$version\"" -i $chart_path
|
||||
git add $chart_path
|
||||
git commit -m "Increment $SERVICE chart version"
|
||||
git push --set-upstream origin $BRANCH_NAME
|
||||
done
|
||||
|
||||
# Build managed services
|
||||
build_managed() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
|
||||
echo "Building managed service: $service"
|
||||
clone_msaas
|
||||
|
||||
if [[ $service == 'chalice' ]]; then
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/api"
|
||||
else
|
||||
cd "$MSAAS_REPO_FOLDER/openreplay/$service"
|
||||
fi
|
||||
|
||||
local build_cmd="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=arm64 DOCKER_REPO=$DOCKER_REPO_ARM PUSH_IMAGE=0 bash build.sh"
|
||||
|
||||
echo "Executing: $build_cmd"
|
||||
if ! eval "$build_cmd" 2>&1; then
|
||||
echo "Build failed for $service"
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
|
||||
# Build service with given arguments
|
||||
build_service() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local build_args=$3
|
||||
local build_script=${4:-$BUILD_SCRIPT_NAME}
|
||||
|
||||
local command="IMAGE_TAG=$version DOCKER_RUNTIME=depot DOCKER_BUILD_ARGS=--push ARCH=amd64 DOCKER_REPO=$DOCKER_REPO_OSS PUSH_IMAGE=0 bash $build_script $build_args"
|
||||
echo "Executing: $command"
|
||||
eval "$command"
|
||||
}
|
||||
|
||||
# Update chart version and commit changes
|
||||
update_chart_version() {
|
||||
local service=$1
|
||||
local version=$2
|
||||
local chart_path="$WORKING_DIR/scripts/helmcharts/openreplay/charts/$service/Chart.yaml"
|
||||
|
||||
# Ensure we're in the original working directory/repository
|
||||
cd "$WORKING_DIR"
|
||||
yq eval ".AppVersion = \"$version\"" -i "$chart_path"
|
||||
git add "$chart_path"
|
||||
git commit -m "Increment $service chart version to $version"
|
||||
git push --set-upstream origin "$BRANCH_NAME"
|
||||
cd -
|
||||
}
|
||||
|
||||
# Main execution
|
||||
main() {
|
||||
setup_git
|
||||
|
||||
# Get backend services list
|
||||
ls backend/cmd >"$BACKEND_SERVICES_FILE"
|
||||
|
||||
# Parse services input (fix for GitHub Actions syntax)
|
||||
echo "Services: ${SERVICES_INPUT:-$1}"
|
||||
IFS=',' read -ra services <<<"${SERVICES_INPUT:-$1}"
|
||||
|
||||
# Process each service
|
||||
for service in "${services[@]}"; do
|
||||
echo "Processing service: $service"
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
local foss_build_args="" ee_build_args="" build_script="$BUILD_SCRIPT_NAME"
|
||||
|
||||
# Determine build configuration based on service type
|
||||
if grep -q "$service" "$BACKEND_SERVICES_FILE"; then
|
||||
# Backend service
|
||||
cd backend
|
||||
foss_build_args="nil $service"
|
||||
ee_build_args="ee $service"
|
||||
else
|
||||
# Non-backend service
|
||||
case "$service" in
|
||||
chalice | alerts | crons)
|
||||
cd "$WORKING_DIR/api"
|
||||
;;
|
||||
*)
|
||||
cd "$service"
|
||||
;;
|
||||
esac
|
||||
|
||||
# Special build scripts for alerts/crons
|
||||
if [[ $service == 'alerts' || $service == 'crons' ]]; then
|
||||
build_script="build_${service}.sh"
|
||||
fi
|
||||
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
|
||||
# Get version and build
|
||||
local version
|
||||
version=$(image_version "$service")
|
||||
|
||||
# Build FOSS and EE versions
|
||||
build_service "$service" "$version" "$foss_build_args"
|
||||
build_service "$service" "${version}-ee" "$ee_build_args"
|
||||
|
||||
# Build managed version for specific services
|
||||
if [[ "$service" != "chalice" && "$service" != "frontend" ]]; then
|
||||
echo "Nothing to build in managed for service $service"
|
||||
else
|
||||
build_managed "$service" "$version"
|
||||
fi
|
||||
|
||||
# Update chart and commit
|
||||
update_chart_version "$service" "$version"
|
||||
done
|
||||
cd "$WORKING_DIR"
|
||||
|
||||
# Cleanup
|
||||
rm -f "$BACKEND_SERVICES_FILE"
|
||||
}
|
||||
|
||||
echo "Working directory: $WORKING_DIR"
|
||||
# Run main function with all arguments
|
||||
main "$SERVICES_INPUT"
|
||||
|
||||
|
||||
- name: Create Pull Request
|
||||
uses: repo-sync/pull-request@v2
|
||||
|
|
@ -147,8 +246,7 @@ jobs:
|
|||
pr_title: "Updated patch build from main ${{ env.HEAD_COMMIT_ID }}"
|
||||
pr_body: |
|
||||
This PR updates the Helm chart version after building the patch from $HEAD_COMMIT_ID.
|
||||
Once this PR is merged, To update the latest tag, run the following workflow.
|
||||
https://github.com/openreplay/openreplay/actions/workflows/update-tag.yaml
|
||||
Once this PR is merged, tag update job will run automatically.
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
|
|
|
|||
148
.github/workflows/peers.yaml
vendored
148
.github/workflows/peers.yaml
vendored
|
|
@ -1,148 +0,0 @@
|
|||
# This action will push the peers changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: "Skip Security checks if there is a unfixable vuln or error. Value: true/false"
|
||||
required: false
|
||||
default: "false"
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- "peers/**"
|
||||
- "!peers/.gitignore"
|
||||
- "!peers/*-dev.sh"
|
||||
|
||||
name: Build and Deploy Peers
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- uses: ./.github/composite-actions/update-keys
|
||||
with:
|
||||
assist_jwt_secret: ${{ secrets.ASSIST_JWT_SECRET }}
|
||||
assist_key: ${{ secrets.ASSIST_KEY }}
|
||||
domain_name: ${{ secrets.OSS_DOMAIN_NAME }}
|
||||
jwt_refresh_secret: ${{ secrets.JWT_REFRESH_SECRET }}
|
||||
jwt_secret: ${{ secrets.OSS_JWT_SECRET }}
|
||||
jwt_spot_refresh_secret: ${{ secrets.JWT_SPOT_REFRESH_SECRET }}
|
||||
jwt_spot_secret: ${{ secrets.JWT_SPOT_SECRET }}
|
||||
license_key: ${{ secrets.OSS_LICENSE_KEY }}
|
||||
minio_access_key: ${{ secrets.OSS_MINIO_ACCESS_KEY }}
|
||||
minio_secret_key: ${{ secrets.OSS_MINIO_SECRET_KEY }}
|
||||
pg_password: ${{ secrets.OSS_PG_PASSWORD }}
|
||||
registry_url: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
name: Update Keys
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Building and Pushing peers image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd peers
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.56.2/trivy_0.56.2_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --db-repository ghcr.io/aquasecurity/trivy-db:2 --db-repository public.ecr.aws/aquasecurity/trivy-db:2 --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
images=("peers")
|
||||
for image in ${images[*]};do
|
||||
docker push $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/peers/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mkdir -p /tmp/charts
|
||||
mv openreplay/charts/{ingress-nginx,peers,quickwit,connector} /tmp/charts/
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/charts/* openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: "Build failed :bomb:"
|
||||
|
||||
# - name: Debug Job
|
||||
# # if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
103
.github/workflows/release-deployment.yaml
vendored
Normal file
103
.github/workflows/release-deployment.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
name: Release Deployment
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
services:
|
||||
description: 'Comma-separated list of services to deploy. eg: frontend,api,sink'
|
||||
required: true
|
||||
branch:
|
||||
description: 'Branch to deploy (defaults to dev)'
|
||||
required: false
|
||||
default: 'dev'
|
||||
|
||||
env:
|
||||
IMAGE_REGISTRY_URL: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
runs-on: ubuntu-latest
|
||||
steps:
|
||||
- name: Checkout code
|
||||
uses: actions/checkout@v3
|
||||
with:
|
||||
ref: ${{ github.event.inputs.branch }}
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login $IMAGE_REGISTRY_URL -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- name: Set image tag with branch info
|
||||
run: |
|
||||
SHORT_SHA=$(git rev-parse --short HEAD)
|
||||
echo "IMAGE_TAG=${{ github.event.inputs.branch }}-${SHORT_SHA}" >> $GITHUB_ENV
|
||||
echo "Using image tag: $IMAGE_TAG"
|
||||
|
||||
- uses: depot/setup-action@v1
|
||||
|
||||
- name: Build and push Docker images
|
||||
run: |
|
||||
# Parse the comma-separated services list into an array
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
working_dir=$(pwd)
|
||||
|
||||
# Define backend services (consider moving this to workflow inputs or repo config)
|
||||
ls backend/cmd >> /tmp/backend.txt
|
||||
BUILD_SCRIPT_NAME="build.sh"
|
||||
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
# Check if service is backend
|
||||
if grep -q $SERVICE /tmp/backend.txt; then
|
||||
cd $working_dir/backend
|
||||
foss_build_args="nil $SERVICE"
|
||||
ee_build_args="ee $SERVICE"
|
||||
else
|
||||
cd $working_dir
|
||||
[[ $SERVICE == 'chalice' || $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && cd $working_dir/api || cd $SERVICE
|
||||
[[ $SERVICE == 'alerts' || $SERVICE == 'crons' ]] && BUILD_SCRIPT_NAME="build_${SERVICE}.sh"
|
||||
ee_build_args="ee"
|
||||
fi
|
||||
{
|
||||
echo IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
IMAGE_TAG=$IMAGE_TAG DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $foss_build_args
|
||||
}&
|
||||
{
|
||||
echo IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
IMAGE_TAG=${IMAGE_TAG}-ee DOCKER_RUNTIME="depot" DOCKER_BUILD_ARGS="--push" ARCH=amd64 DOCKER_REPO=$IMAGE_REGISTRY_URL PUSH_IMAGE=0 bash ${BUILD_SCRIPT_NAME} $ee_build_args
|
||||
}&
|
||||
done
|
||||
wait
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using ee release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.EE_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to ee release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to EE cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to EE cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}-ee
|
||||
done
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
name: Using foss release cluster
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.FOSS_RELEASE_KUBECONFIG }}
|
||||
|
||||
- name: Deploy to FOSS release Kubernetes
|
||||
run: |
|
||||
echo "Deploying services to FOSS cluster: ${{ github.event.inputs.services }}"
|
||||
IFS=',' read -ra SERVICES <<< "${{ github.event.inputs.services }}"
|
||||
for SERVICE in "${SERVICES[@]}"; do
|
||||
SERVICE=$(echo $SERVICE | xargs) # Trim whitespace
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
echo "Deploying $SERVICE to FOSS cluster with image tag: ${IMAGE_TAG}"
|
||||
kubectl set image deployment/$SERVICE-openreplay -n app $SERVICE=${IMAGE_REGISTRY_URL}/$SERVICE:${IMAGE_TAG}
|
||||
done
|
||||
47
.github/workflows/update-tag.yaml
vendored
47
.github/workflows/update-tag.yaml
vendored
|
|
@ -1,35 +1,42 @@
|
|||
on:
|
||||
workflow_dispatch:
|
||||
description: "This workflow will build for patches for latest tag, and will Always use commit from main branch."
|
||||
inputs:
|
||||
services:
|
||||
description: "This action will update the latest tag with current main branch HEAD. Should I proceed ? true/false"
|
||||
required: true
|
||||
default: "false"
|
||||
|
||||
name: Force Push tag with main branch HEAD
|
||||
pull_request:
|
||||
types: [closed]
|
||||
branches:
|
||||
- main
|
||||
name: Release tag update --force
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Build Patch from main
|
||||
runs-on: ubuntu-latest
|
||||
env:
|
||||
DEPOT_TOKEN: ${{ secrets.DEPOT_TOKEN }}
|
||||
DEPOT_PROJECT_ID: ${{ secrets.DEPOT_PROJECT_ID }}
|
||||
if: ${{ (github.event_name == 'pull_request' && github.event.pull_request.merged == true) || github.event.inputs.services == 'true' }}
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
|
||||
- name: Get latest release tag using GitHub API
|
||||
id: get-latest-tag
|
||||
run: |
|
||||
LATEST_TAG=$(curl -s -H "Authorization: token ${{ secrets.GITHUB_TOKEN }}" \
|
||||
"https://api.github.com/repos/${{ github.repository }}/releases/latest" \
|
||||
| jq -r .tag_name)
|
||||
|
||||
# Fallback to git command if API doesn't return a tag
|
||||
if [ "$LATEST_TAG" == "null" ] || [ -z "$LATEST_TAG" ]; then
|
||||
echo "Not found latest tag"
|
||||
exit 100
|
||||
fi
|
||||
|
||||
echo "LATEST_TAG=$LATEST_TAG" >> $GITHUB_ENV
|
||||
echo "Latest tag: $LATEST_TAG"
|
||||
|
||||
- name: Set Remote with GITHUB_TOKEN
|
||||
run: |
|
||||
git config --unset http.https://github.com/.extraheader
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}.git
|
||||
git remote set-url origin https://x-access-token:${{ secrets.ACTIONS_COMMMIT_TOKEN }}@github.com/${{ github.repository }}
|
||||
|
||||
- name: Push main branch to tag
|
||||
run: |
|
||||
git fetch --tags
|
||||
git checkout main
|
||||
git push origin HEAD:refs/tags/$(git tag --list 'v[0-9]*' --sort=-v:refname | head -n 1) --force
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# with:
|
||||
# limit-access-to-actor: true
|
||||
echo "Updating tag ${{ env.LATEST_TAG }} to point to latest commit on main"
|
||||
git push origin HEAD:refs/tags/${{ env.LATEST_TAG }} --force
|
||||
|
|
|
|||
2
LICENSE
2
LICENSE
|
|
@ -1,4 +1,4 @@
|
|||
Copyright (c) 2021-2024 Asayer, Inc dba OpenReplay
|
||||
Copyright (c) 2021-2025 Asayer, Inc dba OpenReplay
|
||||
|
||||
OpenReplay monorepo uses multiple licenses. Portions of this software are licensed as follows:
|
||||
- All content that resides under the "ee/" directory of this repository, is licensed under the license defined in "ee/LICENSE".
|
||||
|
|
|
|||
|
|
@ -1,10 +1,17 @@
|
|||
FROM python:3.12-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
ARG GIT_SHA
|
||||
LABEL GIT_SHA=$GIT_SHA
|
||||
FROM python:3.12-alpine AS builder
|
||||
LABEL maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
|
||||
RUN apk add --no-cache build-base tini
|
||||
RUN apk add --no-cache build-base
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv && \
|
||||
export UV_SYSTEM_PYTHON=true && \
|
||||
uv pip install --no-cache-dir --upgrade pip setuptools wheel && \
|
||||
uv pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
FROM python:3.12-alpine
|
||||
ARG GIT_SHA
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
|
|
@ -14,19 +21,11 @@ ENV SOURCE_MAP_VERSION=0.7.4 \
|
|||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg} \
|
||||
GIT_SHA=$GIT_SHA
|
||||
|
||||
COPY --from=builder /usr/local/lib/python3.12/site-packages /usr/local/lib/python3.12/site-packages
|
||||
COPY --from=builder /usr/local/bin /usr/local/bin
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade uv
|
||||
RUN uv pip install --no-cache-dir --upgrade pip setuptools wheel --system
|
||||
RUN uv pip install --no-cache-dir --upgrade -r requirements.txt --system
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
RUN apk add --no-cache tini && mv env.default .env
|
||||
|
||||
ENTRYPOINT ["/sbin/tini", "--"]
|
||||
CMD ./entrypoint.sh
|
||||
|
||||
CMD ["./entrypoint.sh"]
|
||||
|
|
|
|||
|
|
@ -4,7 +4,8 @@ from pydantic_core._pydantic_core import ValidationError
|
|||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import sessions, alert_helpers
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_pg as sessions
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
|
@ -131,6 +132,7 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on PG")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
|
|||
|
|
@ -3,10 +3,11 @@ import logging
|
|||
from pydantic_core._pydantic_core import ValidationError
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import sessions, alert_helpers
|
||||
from chalicelib.utils import pg_client, ch_client, exp_ch_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.core.alerts import alerts, alerts_listener
|
||||
from chalicelib.core.alerts.modules import alert_helpers
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -155,6 +156,7 @@ def Build(a):
|
|||
|
||||
|
||||
def process():
|
||||
logger.info("> processing alerts on CH")
|
||||
notifications = []
|
||||
all_alerts = alerts_listener.get_all_alerts()
|
||||
with pg_client.PostgresClient() as cur, ch_client.ClickHouseClient() as ch_cur:
|
||||
|
|
|
|||
|
|
@ -1,9 +1,3 @@
|
|||
from decouple import config
|
||||
|
||||
TENANT_ID = "-1"
|
||||
if config("EXP_ALERTS", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
|
||||
from . import helpers as alert_helpers
|
||||
|
|
|
|||
|
|
@ -37,8 +37,7 @@ def jwt_authorizer(scheme: str, token: str, leeway=0) -> dict | None:
|
|||
logger.debug("! JWT Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT Base Exception")
|
||||
logger.debug(e)
|
||||
logger.warning("! JWT Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
@ -56,8 +55,7 @@ def jwt_refresh_authorizer(scheme: str, token: str):
|
|||
logger.debug("! JWT-refresh Expired signature")
|
||||
return None
|
||||
except BaseException as e:
|
||||
logger.warning("! JWT-refresh Base Exception")
|
||||
logger.debug(e)
|
||||
logger.error("! JWT-refresh Base Exception", exc_info=e)
|
||||
return None
|
||||
return payload
|
||||
|
||||
|
|
|
|||
|
|
@ -85,7 +85,8 @@ def __generic_query(typename, value_length=None):
|
|||
ORDER BY value"""
|
||||
|
||||
if value_length is None or value_length > 2:
|
||||
return f"""(SELECT DISTINCT value, type
|
||||
return f"""SELECT DISTINCT ON(value,type) value, type
|
||||
((SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
|
|
@ -101,7 +102,7 @@ def __generic_query(typename, value_length=None):
|
|||
AND type='{typename.upper()}'
|
||||
AND value ILIKE %(value)s
|
||||
ORDER BY value
|
||||
LIMIT 5);"""
|
||||
LIMIT 5)) AS raw;"""
|
||||
return f"""SELECT DISTINCT value, type
|
||||
FROM {TABLE}
|
||||
WHERE
|
||||
|
|
@ -124,7 +125,7 @@ def __generic_autocomplete(event: Event):
|
|||
return f
|
||||
|
||||
|
||||
def __generic_autocomplete_metas(typename):
|
||||
def generic_autocomplete_metas(typename):
|
||||
def f(project_id, text):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
params = {"project_id": project_id, "value": helper.string_to_sql_like(text),
|
||||
|
|
@ -326,7 +327,7 @@ def __search_metadata(project_id, value, key=None, source=None):
|
|||
AND {colname} ILIKE %(svalue)s LIMIT 5)""")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(f"""\
|
||||
SELECT key, value, 'METADATA' AS TYPE
|
||||
SELECT DISTINCT ON(key, value) key, value, 'METADATA' AS TYPE
|
||||
FROM({" UNION ALL ".join(sub_from)}) AS all_metas
|
||||
LIMIT 5;""", {"project_id": project_id, "value": helper.string_to_sql_like(value),
|
||||
"svalue": helper.string_to_sql_like("^" + value)}))
|
||||
|
|
|
|||
|
|
@ -13,15 +13,18 @@ def get_state(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
meta = False
|
||||
if recorded:
|
||||
query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
|
|
@ -36,26 +39,35 @@ def get_state(tenant_id):
|
|||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id})
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return [
|
||||
{"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"},
|
||||
{"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"},
|
||||
{"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"},
|
||||
{"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
{
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
},
|
||||
{
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
},
|
||||
{
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
},
|
||||
{
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
},
|
||||
]
|
||||
|
||||
|
||||
|
|
@ -66,21 +78,26 @@ def get_state_installing(tenant_id):
|
|||
|
||||
if len(pids) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify("""SELECT EXISTS(( SELECT 1
|
||||
cur.mogrify(
|
||||
"""SELECT EXISTS(( SELECT 1
|
||||
FROM public.sessions AS s
|
||||
WHERE s.project_id IN %(ids)s)) AS exists;""",
|
||||
{"ids": tuple(pids)})
|
||||
{"ids": tuple(pids)},
|
||||
)
|
||||
)
|
||||
recorded = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start"}
|
||||
return {
|
||||
"task": "Install OpenReplay",
|
||||
"done": recorded,
|
||||
"URL": "https://docs.openreplay.com/getting-started/quick-start",
|
||||
}
|
||||
|
||||
|
||||
def get_state_identify_users(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT EXISTS((SELECT 1
|
||||
query = cur.mogrify(
|
||||
f"""SELECT EXISTS((SELECT 1
|
||||
FROM public.projects AS p
|
||||
LEFT JOIN LATERAL ( SELECT 1
|
||||
FROM public.sessions
|
||||
|
|
@ -95,25 +112,32 @@ def get_state_identify_users(tenant_id):
|
|||
OR p.metadata_8 IS NOT NULL OR p.metadata_9 IS NOT NULL
|
||||
OR p.metadata_10 IS NOT NULL )
|
||||
)) AS exists;""",
|
||||
{"tenant_id": tenant_id})
|
||||
{"tenant_id": tenant_id},
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
meta = cur.fetchone()["exists"]
|
||||
|
||||
return {"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata"}
|
||||
return {
|
||||
"task": "Identify Users",
|
||||
"done": meta,
|
||||
"URL": "https://docs.openreplay.com/data-privacy-security/metadata",
|
||||
}
|
||||
|
||||
|
||||
def get_state_manage_users(tenant_id):
|
||||
return {"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users"}
|
||||
return {
|
||||
"task": "Invite Team Members",
|
||||
"done": len(users.get_members(tenant_id=tenant_id)) > 1,
|
||||
"URL": "https://app.openreplay.com/client/manage-users",
|
||||
}
|
||||
|
||||
|
||||
def get_state_integrations(tenant_id):
|
||||
return {"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0 \
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations"}
|
||||
return {
|
||||
"task": "Integrations",
|
||||
"done": len(datadog.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(sentry.get_all(tenant_id=tenant_id)) > 0
|
||||
or len(stackdriver.get_all(tenant_id=tenant_id)) > 0,
|
||||
"URL": "https://docs.openreplay.com/integrations",
|
||||
}
|
||||
|
|
|
|||
|
|
@ -4,10 +4,10 @@ from decouple import config
|
|||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
from . import errors as errors_legacy
|
||||
from . import errors_pg as errors_legacy
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental error search")
|
||||
from . import errors_ch as errors
|
||||
else:
|
||||
from . import errors
|
||||
from . import errors_pg as errors
|
||||
|
|
|
|||
|
|
@ -1,10 +1,11 @@
|
|||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.errors import errors_legacy
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.errors.modules import sessions
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper, metrics_helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from . import errors as errors_legacy
|
||||
|
||||
|
||||
def _multiple_values(values, value_key="value"):
|
||||
|
|
@ -61,25 +62,6 @@ def get_batch(error_ids):
|
|||
return errors_legacy.get_batch(error_ids=error_ids)
|
||||
|
||||
|
||||
def __get_basic_constraints(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id", table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_events(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
|
|
@ -116,7 +98,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
ch_sessions_sub_query = __get_basic_constraints(platform, type_condition=False)
|
||||
ch_sessions_sub_query = errors_helper.__get_basic_constraints_ch(platform, type_condition=False)
|
||||
# ignore platform for errors table
|
||||
ch_sub_query = __get_basic_constraints_events(None, type_condition=True)
|
||||
ch_sub_query.append("JSONExtractString(toString(`$properties`), 'source') = 'js_exception'")
|
||||
|
|
@ -148,7 +130,8 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
if len(data.events) > errors_condition_count:
|
||||
subquery_part_args, subquery_part = sessions.search_query_parts_ch(data=data, error_status=data.status,
|
||||
errors_only=True,
|
||||
project_id=project.project_id, user_id=user_id,
|
||||
project_id=project.project_id,
|
||||
user_id=user_id,
|
||||
issue=None,
|
||||
favorite_only=False)
|
||||
subquery_part = f"INNER JOIN {subquery_part} USING(session_id)"
|
||||
|
|
@ -355,14 +338,14 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
SELECT details.error_id as error_id,
|
||||
name, message, users, total,
|
||||
sessions, last_occurrence, first_occurrence, chart
|
||||
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
FROM (SELECT error_id,
|
||||
JSONExtractString(toString(`$properties`), 'name') AS name,
|
||||
JSONExtractString(toString(`$properties`), 'message') AS message,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT events.session_id) AS sessions,
|
||||
MAX(created_at) AS max_datetime,
|
||||
MIN(created_at) AS min_datetime,
|
||||
COUNT(DISTINCT JSONExtractString(toString(`$properties`), 'error_id'))
|
||||
COUNT(DISTINCT error_id)
|
||||
OVER() AS total
|
||||
FROM {MAIN_EVENTS_TABLE} AS events
|
||||
INNER JOIN (SELECT session_id, coalesce(user_id,toString(user_uuid)) AS user_id
|
||||
|
|
@ -374,7 +357,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
GROUP BY error_id, name, message
|
||||
ORDER BY {sort} {order}
|
||||
LIMIT %(errors_limit)s OFFSET %(errors_offset)s) AS details
|
||||
INNER JOIN (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
INNER JOIN (SELECT error_id,
|
||||
toUnixTimestamp(MAX(created_at))*1000 AS last_occurrence,
|
||||
toUnixTimestamp(MIN(created_at))*1000 AS first_occurrence
|
||||
FROM {MAIN_EVENTS_TABLE}
|
||||
|
|
@ -383,7 +366,7 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
GROUP BY error_id) AS time_details
|
||||
ON details.error_id=time_details.error_id
|
||||
INNER JOIN (SELECT error_id, groupArray([timestamp, count]) AS chart
|
||||
FROM (SELECT JSONExtractString(toString(`$properties`), 'error_id') AS error_id,
|
||||
FROM (SELECT error_id,
|
||||
gs.generate_series AS timestamp,
|
||||
COUNT(DISTINCT session_id) AS count
|
||||
FROM generate_series(%(startDate)s, %(endDate)s, %(step_size)s) AS gs
|
||||
|
|
|
|||
|
|
@ -1,5 +1,5 @@
|
|||
from chalicelib.core.errors import errors_legacy as errors
|
||||
from chalicelib.utils import errors_helper
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils.metrics_helper import get_step_size
|
||||
|
|
@ -40,26 +40,29 @@ def __process_tags(row):
|
|||
|
||||
|
||||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = errors.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err = errors_helper.__get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30",
|
||||
project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = errors.__get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30 = errors_helper.__get_basic_constraints(time_constraint=False, chart=True,
|
||||
step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = errors.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query = errors_helper.__get_basic_constraints(time_constraint=False)
|
||||
pg_basic_query.append("error_id = %(error_id)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
data["startDate24"] = TimeUTC.now(-1)
|
||||
|
|
@ -95,8 +98,7 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30,
|
||||
custom_tags
|
||||
chart30
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
|
|
@ -111,15 +113,8 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
INNER JOIN (SELECT session_id AS last_session_id
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
import json
|
||||
from typing import Optional, List
|
||||
from typing import List
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.core.sessions import sessions_search
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
|
@ -51,27 +52,6 @@ def get_batch(error_ids):
|
|||
return helper.list_to_camel_case(errors)
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_sort_key(key):
|
||||
return {
|
||||
schemas.ErrorSort.OCCURRENCE: "max_datetime",
|
||||
|
|
@ -90,12 +70,13 @@ def search(data: schemas.SearchErrorsSchema, project: schemas.ProjectContext, us
|
|||
for f in data.filters:
|
||||
if f.type == schemas.FilterType.PLATFORM and len(f.value) > 0:
|
||||
platform = f.value[0]
|
||||
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query = errors_helper.__get_basic_constraints(platform, project_key="sessions.project_id")
|
||||
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
|
||||
"pe.project_id=%(project_id)s"]
|
||||
# To ignore Script error
|
||||
pg_sub_query.append("pe.message!='Script error.'")
|
||||
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
|
||||
pg_sub_query_chart = errors_helper.__get_basic_constraints(platform, time_constraint=False, chart=True,
|
||||
project_key=None)
|
||||
if platform:
|
||||
pg_sub_query_chart += ["start_ts>=%(startDate)s", "start_ts<%(endDate)s", "project_id=%(project_id)s"]
|
||||
pg_sub_query_chart.append("errors.error_id =details.error_id")
|
||||
|
|
@ -3,8 +3,9 @@ import logging
|
|||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import helper as errors_helper
|
||||
|
||||
if config("EXP_ERRORS_SEARCH", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
|
|
|||
58
api/chalicelib/core/errors/modules/helper.py
Normal file
58
api/chalicelib/core/errors/modules/helper.py
Normal file
|
|
@ -0,0 +1,58 @@
|
|||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def __get_basic_constraints(platform: Optional[schemas.PlatformType] = None, time_constraint: bool = True,
|
||||
startTime_arg_name: str = "startDate", endTime_arg_name: str = "endDate",
|
||||
chart: bool = False, step_size_name: str = "step_size",
|
||||
project_key: Optional[str] = "project_id"):
|
||||
if project_key is None:
|
||||
ch_sub_query = []
|
||||
else:
|
||||
ch_sub_query = [f"{project_key} =%(project_id)s"]
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"timestamp >= %({startTime_arg_name})s",
|
||||
f"timestamp < %({endTime_arg_name})s"]
|
||||
if chart:
|
||||
ch_sub_query += [f"timestamp >= generated_timestamp",
|
||||
f"timestamp < generated_timestamp + %({step_size_name})s"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def __get_basic_constraints_ch(platform=None, time_constraint=True, startTime_arg_name="startDate",
|
||||
endTime_arg_name="endDate", type_condition=True, project_key="project_id",
|
||||
table_name=None):
|
||||
ch_sub_query = [f"{project_key} =toUInt16(%(project_id)s)"]
|
||||
if table_name is not None:
|
||||
table_name = table_name + "."
|
||||
else:
|
||||
table_name = ""
|
||||
if type_condition:
|
||||
ch_sub_query.append(f"{table_name}`$event_name`='ERROR'")
|
||||
if time_constraint:
|
||||
ch_sub_query += [f"{table_name}datetime >= toDateTime(%({startTime_arg_name})s/1000)",
|
||||
f"{table_name}datetime < toDateTime(%({endTime_arg_name})s/1000)"]
|
||||
if platform == schemas.PlatformType.MOBILE:
|
||||
ch_sub_query.append("user_device_type = 'mobile'")
|
||||
elif platform == schemas.PlatformType.DESKTOP:
|
||||
ch_sub_query.append("user_device_type = 'desktop'")
|
||||
return ch_sub_query
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
|
|
@ -1,8 +1,9 @@
|
|||
from functools import cache
|
||||
from typing import Optional
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core import issues
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.core.sessions import sessions_metas
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -137,52 +138,57 @@ class EventType:
|
|||
column=None) # column=None because errors are searched by name or message
|
||||
|
||||
|
||||
SUPPORTED_TYPES = {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
EventType.CLICK.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK),
|
||||
query=autocomplete.__generic_query(typename=EventType.CLICK.ui_type)),
|
||||
EventType.INPUT.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT),
|
||||
query=autocomplete.__generic_query(typename=EventType.INPUT.ui_type)),
|
||||
EventType.LOCATION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.LOCATION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.LOCATION.ui_type)),
|
||||
EventType.CUSTOM.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM.ui_type)),
|
||||
EventType.REQUEST.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
typename=EventType.REQUEST.ui_type)),
|
||||
EventType.GRAPHQL.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.GRAPHQL),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
typename=EventType.GRAPHQL.ui_type)),
|
||||
EventType.STATEACTION.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.STATEACTION),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.STATEACTION.ui_type)),
|
||||
EventType.TAG.ui_type: SupportedFilter(get=_search_tags, query=None),
|
||||
EventType.ERROR.ui_type: SupportedFilter(get=autocomplete.__search_errors,
|
||||
query=None),
|
||||
EventType.METADATA.ui_type: SupportedFilter(get=autocomplete.__search_metadata,
|
||||
query=None),
|
||||
}
|
||||
# MOBILE
|
||||
EventType.CLICK_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.CLICK_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CLICK_MOBILE.ui_type)),
|
||||
EventType.SWIPE_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.SWIPE_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.SWIPE_MOBILE.ui_type)),
|
||||
EventType.INPUT_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.INPUT_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.INPUT_MOBILE.ui_type)),
|
||||
EventType.VIEW_MOBILE.ui_type: SupportedFilter(get=autocomplete.__generic_autocomplete(EventType.VIEW_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.VIEW_MOBILE.ui_type)),
|
||||
EventType.CUSTOM_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.CUSTOM_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.CUSTOM_MOBILE.ui_type)),
|
||||
EventType.REQUEST_MOBILE.ui_type: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete(EventType.REQUEST_MOBILE),
|
||||
query=autocomplete.__generic_query(
|
||||
typename=EventType.REQUEST_MOBILE.ui_type)),
|
||||
EventType.CRASH_MOBILE.ui_type: SupportedFilter(get=autocomplete.__search_errors_mobile,
|
||||
query=None),
|
||||
}
|
||||
|
||||
|
||||
def get_errors_by_session_id(session_id, project_id):
|
||||
|
|
@ -202,17 +208,17 @@ def search(text, event_type, project_id, source, key):
|
|||
if not event_type:
|
||||
return {"data": autocomplete.__get_autocomplete_table(text, project_id)}
|
||||
|
||||
if event_type in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in SUPPORTED_TYPES.keys():
|
||||
rows = SUPPORTED_TYPES[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
if event_type in supported_types().keys():
|
||||
rows = supported_types()[event_type].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type + "_MOBILE" in supported_types().keys():
|
||||
rows = supported_types()[event_type + "_MOBILE"].get(project_id=project_id, value=text, key=key, source=source)
|
||||
elif event_type in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_IOS") \
|
||||
and event_type[:-len("_IOS")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_IOS")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
elif event_type.endswith("_MOBILE") \
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.SUPPORTED_TYPES.keys():
|
||||
and event_type[:-len("_MOBILE")] in sessions_metas.supported_types().keys():
|
||||
return sessions_metas.search(text, event_type, project_id)
|
||||
else:
|
||||
return {"errors": ["unsupported event"]}
|
||||
|
|
|
|||
|
|
@ -27,7 +27,6 @@ HEALTH_ENDPOINTS = {
|
|||
"http": app_connection_string("http-openreplay", 8888, "metrics"),
|
||||
"ingress-nginx": app_connection_string("ingress-nginx-openreplay", 80, "healthz"),
|
||||
"integrations": app_connection_string("integrations-openreplay", 8888, "metrics"),
|
||||
"peers": app_connection_string("peers-openreplay", 8888, "health"),
|
||||
"sink": app_connection_string("sink-openreplay", 8888, "metrics"),
|
||||
"sourcemapreader": app_connection_string(
|
||||
"sourcemapreader-openreplay", 8888, "health"
|
||||
|
|
@ -39,9 +38,7 @@ HEALTH_ENDPOINTS = {
|
|||
def __check_database_pg(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["Postgres health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["Postgres health-check failed"]},
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
try:
|
||||
|
|
@ -63,29 +60,26 @@ def __check_database_pg(*_):
|
|||
"details": {
|
||||
# "version": server_version["server_version"],
|
||||
# "schema": schema_version["version"]
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __always_healthy(*_):
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __check_be_service(service_name):
|
||||
def fn(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["server health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
try:
|
||||
results = requests.get(HEALTH_ENDPOINTS.get(service_name), timeout=2)
|
||||
if results.status_code != 200:
|
||||
logger.error(f"!! issue with the {service_name}-health code:{results.status_code}")
|
||||
logger.error(
|
||||
f"!! issue with the {service_name}-health code:{results.status_code}"
|
||||
)
|
||||
logger.error(results.text)
|
||||
# fail_response["details"]["errors"].append(results.text)
|
||||
return fail_response
|
||||
|
|
@ -103,10 +97,7 @@ def __check_be_service(service_name):
|
|||
logger.error("couldn't get response")
|
||||
# fail_response["details"]["errors"].append(str(e))
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
return fn
|
||||
|
||||
|
|
@ -114,7 +105,7 @@ def __check_be_service(service_name):
|
|||
def __check_redis(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {"errors": ["server health-check failed"]}
|
||||
"details": {"errors": ["server health-check failed"]},
|
||||
}
|
||||
if config("REDIS_STRING", default=None) is None:
|
||||
# fail_response["details"]["errors"].append("REDIS_STRING not defined in env-vars")
|
||||
|
|
@ -133,16 +124,14 @@ def __check_redis(*_):
|
|||
"health": True,
|
||||
"details": {
|
||||
# "version": r.execute_command('INFO')['redis_version']
|
||||
}
|
||||
},
|
||||
}
|
||||
|
||||
|
||||
def __check_SSL(*_):
|
||||
fail_response = {
|
||||
"health": False,
|
||||
"details": {
|
||||
"errors": ["SSL Certificate health-check failed"]
|
||||
}
|
||||
"details": {"errors": ["SSL Certificate health-check failed"]},
|
||||
}
|
||||
try:
|
||||
requests.get(config("SITE_URL"), verify=True, allow_redirects=True)
|
||||
|
|
@ -150,36 +139,28 @@ def __check_SSL(*_):
|
|||
logger.error("!! health failed: SSL Certificate")
|
||||
logger.exception(e)
|
||||
return fail_response
|
||||
return {
|
||||
"health": True,
|
||||
"details": {}
|
||||
}
|
||||
return {"health": True, "details": {}}
|
||||
|
||||
|
||||
def __get_sessions_stats(*_):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
constraints = ["projects.deleted_at IS NULL"]
|
||||
query = cur.mogrify(f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
query = cur.mogrify(
|
||||
f"""SELECT COALESCE(SUM(sessions_count),0) AS s_c,
|
||||
COALESCE(SUM(events_count),0) AS e_c
|
||||
FROM public.projects_stats
|
||||
INNER JOIN public.projects USING(project_id)
|
||||
WHERE {" AND ".join(constraints)};""")
|
||||
WHERE {" AND ".join(constraints)};"""
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return {
|
||||
"numberOfSessionsCaptured": row["s_c"],
|
||||
"numberOfEventCaptured": row["e_c"]
|
||||
}
|
||||
return {"numberOfSessionsCaptured": row["s_c"], "numberOfEventCaptured": row["e_c"]}
|
||||
|
||||
|
||||
def get_health(tenant_id=None):
|
||||
health_map = {
|
||||
"databases": {
|
||||
"postgres": __check_database_pg
|
||||
},
|
||||
"ingestionPipeline": {
|
||||
"redis": __check_redis
|
||||
},
|
||||
"databases": {"postgres": __check_database_pg},
|
||||
"ingestionPipeline": {"redis": __check_redis},
|
||||
"backendServices": {
|
||||
"alerts": __check_be_service("alerts"),
|
||||
"assets": __check_be_service("assets"),
|
||||
|
|
@ -192,13 +173,12 @@ def get_health(tenant_id=None):
|
|||
"http": __check_be_service("http"),
|
||||
"ingress-nginx": __always_healthy,
|
||||
"integrations": __check_be_service("integrations"),
|
||||
"peers": __check_be_service("peers"),
|
||||
"sink": __check_be_service("sink"),
|
||||
"sourcemapreader": __check_be_service("sourcemapreader"),
|
||||
"storage": __check_be_service("storage")
|
||||
"storage": __check_be_service("storage"),
|
||||
},
|
||||
"details": __get_sessions_stats,
|
||||
"ssl": __check_SSL
|
||||
"ssl": __check_SSL,
|
||||
}
|
||||
return __process_health(health_map=health_map)
|
||||
|
||||
|
|
@ -210,10 +190,16 @@ def __process_health(health_map):
|
|||
response.pop(parent_key)
|
||||
elif isinstance(health_map[parent_key], dict):
|
||||
for element_key in health_map[parent_key]:
|
||||
if config(f"SKIP_H_{parent_key.upper()}_{element_key.upper()}", cast=bool, default=False):
|
||||
if config(
|
||||
f"SKIP_H_{parent_key.upper()}_{element_key.upper()}",
|
||||
cast=bool,
|
||||
default=False,
|
||||
):
|
||||
response[parent_key].pop(element_key)
|
||||
else:
|
||||
response[parent_key][element_key] = health_map[parent_key][element_key]()
|
||||
response[parent_key][element_key] = health_map[parent_key][
|
||||
element_key
|
||||
]()
|
||||
else:
|
||||
response[parent_key] = health_map[parent_key]()
|
||||
return response
|
||||
|
|
@ -221,7 +207,8 @@ def __process_health(health_map):
|
|||
|
||||
def cron():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT projects.project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT projects.project_id,
|
||||
projects.created_at,
|
||||
projects.sessions_last_check_at,
|
||||
projects.first_recorded_session_at,
|
||||
|
|
@ -229,7 +216,8 @@ def cron():
|
|||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
|
|
@ -250,20 +238,24 @@ def cron():
|
|||
count_start_from = r["last_update_at"]
|
||||
|
||||
count_start_from = TimeUTC.datetime_to_timestamp(count_start_from)
|
||||
params = {"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"start_ts": count_start_from,
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts>=%(start_ts)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
|
|
@ -271,56 +263,68 @@ def cron():
|
|||
params["events_count"] = row["events_count"]
|
||||
|
||||
if insert:
|
||||
query = cur.mogrify("""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
query = cur.mogrify(
|
||||
"""INSERT INTO public.projects_stats(project_id, sessions_count, events_count, last_update_at)
|
||||
VALUES (%(project_id)s, %(sessions_count)s, %(events_count)s, (now() AT TIME ZONE 'utc'::text));""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
else:
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=sessions_count+%(sessions_count)s,
|
||||
events_count=events_count+%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
# this cron is used to correct the sessions&events count every week
|
||||
def weekly_cron():
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
query = cur.mogrify("""SELECT project_id,
|
||||
query = cur.mogrify(
|
||||
"""SELECT project_id,
|
||||
projects_stats.last_update_at
|
||||
FROM public.projects
|
||||
LEFT JOIN public.projects_stats USING (project_id)
|
||||
WHERE projects.deleted_at IS NULL
|
||||
ORDER BY project_id;""")
|
||||
ORDER BY project_id;"""
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
for r in rows:
|
||||
if r["last_update_at"] is None:
|
||||
continue
|
||||
|
||||
params = {"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0}
|
||||
params = {
|
||||
"project_id": r["project_id"],
|
||||
"end_ts": TimeUTC.now(),
|
||||
"sessions_count": 0,
|
||||
"events_count": 0,
|
||||
}
|
||||
|
||||
query = cur.mogrify("""SELECT COUNT(1) AS sessions_count,
|
||||
query = cur.mogrify(
|
||||
"""SELECT COUNT(1) AS sessions_count,
|
||||
COALESCE(SUM(events_count),0) AS events_count
|
||||
FROM public.sessions
|
||||
WHERE project_id=%(project_id)s
|
||||
AND start_ts<=%(end_ts)s
|
||||
AND duration IS NOT NULL;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
if row is not None:
|
||||
params["sessions_count"] = row["sessions_count"]
|
||||
params["events_count"] = row["events_count"]
|
||||
|
||||
query = cur.mogrify("""UPDATE public.projects_stats
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.projects_stats
|
||||
SET sessions_count=%(sessions_count)s,
|
||||
events_count=%(events_count)s,
|
||||
last_update_at=(now() AT TIME ZONE 'utc'::text)
|
||||
WHERE project_id=%(project_id)s;""",
|
||||
params)
|
||||
params,
|
||||
)
|
||||
cur.execute(query)
|
||||
|
|
|
|||
|
|
@ -50,8 +50,8 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
cur.execute(
|
||||
cur.mogrify(
|
||||
"""SELECT username, token, url
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
data = helper.dict_to_camel_case(cur.fetchone())
|
||||
|
|
@ -95,10 +95,9 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
def add(self, username, token, url, obfuscate=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
INSERT INTO public.jira_cloud(username, token, user_id,url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s,%(url)s)
|
||||
RETURNING username, token, url;""",
|
||||
cur.mogrify(""" \
|
||||
INSERT INTO public.jira_cloud(username, token, user_id, url)
|
||||
VALUES (%(username)s, %(token)s, %(user_id)s, %(url)s) RETURNING username, token, url;""",
|
||||
{"user_id": self._user_id, "username": username,
|
||||
"token": token, "url": url})
|
||||
)
|
||||
|
|
@ -112,9 +111,10 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
def delete(self):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
DELETE FROM public.jira_cloud
|
||||
WHERE user_id=%(user_id)s;""",
|
||||
cur.mogrify(""" \
|
||||
DELETE
|
||||
FROM public.jira_cloud
|
||||
WHERE user_id = %(user_id)s;""",
|
||||
{"user_id": self._user_id})
|
||||
)
|
||||
return {"state": "success"}
|
||||
|
|
@ -125,7 +125,7 @@ class JIRAIntegration(base.BaseIntegration):
|
|||
changes={
|
||||
"username": data.username,
|
||||
"token": data.token if len(data.token) > 0 and data.token.find("***") == -1 \
|
||||
else self.integration.token,
|
||||
else self.integration["token"],
|
||||
"url": str(data.url)
|
||||
},
|
||||
obfuscate=True
|
||||
|
|
|
|||
|
|
@ -98,17 +98,23 @@ def __edit(project_id, col_index, colname, new_name):
|
|||
if col_index not in list(old_metas.keys()):
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
if old_metas[col_index]["key"] != new_name:
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""UPDATE public.projects
|
||||
SET {colname} = %(value)s
|
||||
WHERE project_id = %(project_id)s
|
||||
AND deleted_at ISNULL
|
||||
RETURNING {colname};""",
|
||||
RETURNING {colname},
|
||||
(SELECT {colname} FROM projects WHERE project_id = %(project_id)s) AS old_{colname};""",
|
||||
{"project_id": project_id, "value": new_name})
|
||||
cur.execute(query=query)
|
||||
new_name = cur.fetchone()[colname]
|
||||
row = cur.fetchone()
|
||||
new_name = row[colname]
|
||||
old_name = row['old_' + colname]
|
||||
old_metas[col_index]["key"] = new_name
|
||||
projects.rename_metadata_condition(project_id=project_id,
|
||||
old_metadata_key=old_name,
|
||||
new_metadata_key=new_name)
|
||||
return {"data": old_metas[col_index]}
|
||||
|
||||
|
||||
|
|
@ -121,8 +127,8 @@ def edit(tenant_id, project_id, index: int, new_name: str):
|
|||
def delete(tenant_id, project_id, index: int):
|
||||
index = int(index)
|
||||
old_segments = get(project_id)
|
||||
old_segments = [k["index"] for k in old_segments]
|
||||
if index not in old_segments:
|
||||
old_indexes = [k["index"] for k in old_segments]
|
||||
if index not in old_indexes:
|
||||
return {"errors": ["custom field not found"]}
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -132,7 +138,8 @@ def delete(tenant_id, project_id, index: int):
|
|||
WHERE project_id = %(project_id)s AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id})
|
||||
cur.execute(query=query)
|
||||
|
||||
projects.delete_metadata_condition(project_id=project_id,
|
||||
metadata_key=old_segments[old_indexes.index(index)]["key"])
|
||||
return {"data": get(project_id)}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -6,8 +6,5 @@ logger = logging.getLogger(__name__)
|
|||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental metrics")
|
||||
from chalicelib.core.metrics import heatmaps_ch as heatmaps
|
||||
from chalicelib.core.metrics import product_analytics_ch as product_analytics
|
||||
else:
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.metrics import product_analytics
|
||||
pass
|
||||
|
|
@ -352,6 +352,100 @@ def update_card(metric_id, user_id, project_id, data: schemas.CardSchema):
|
|||
return get_card(metric_id=metric_id, project_id=project_id, user_id=user_id)
|
||||
|
||||
|
||||
def search_metrics(project_id, user_id, data: schemas.MetricSearchSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s", "metrics.deleted_at ISNULL"]
|
||||
params = {
|
||||
"project_id": project_id,
|
||||
"user_id": user_id,
|
||||
"offset": (data.page - 1) * data.limit,
|
||||
"limit": data.limit,
|
||||
}
|
||||
if data.mine_only:
|
||||
constraints.append("user_id = %(user_id)s")
|
||||
else:
|
||||
constraints.append("(user_id = %(user_id)s OR metrics.is_public)")
|
||||
if data.shared_only:
|
||||
constraints.append("is_public")
|
||||
|
||||
if data.filter is not None:
|
||||
if data.filter.type:
|
||||
constraints.append("metrics.metric_type = %(filter_type)s")
|
||||
params["filter_type"] = data.filter.type
|
||||
if data.filter.query and len(data.filter.query) > 0:
|
||||
constraints.append("(metrics.name ILIKE %(filter_query)s OR owner.owner_name ILIKE %(filter_query)s)")
|
||||
params["filter_query"] = helper.values_for_operator(
|
||||
value=data.filter.query, op=schemas.SearchEventOperator.CONTAINS
|
||||
)
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
sub_join = ""
|
||||
if include_series:
|
||||
sub_join = """LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(metric_series.* ORDER BY index),'[]'::jsonb) AS series
|
||||
FROM metric_series
|
||||
WHERE metric_series.metric_id = metrics.metric_id
|
||||
AND metric_series.deleted_at ISNULL
|
||||
) AS metric_series ON (TRUE)"""
|
||||
|
||||
sort_column = data.sort.field if data.sort.field is not None and len(data.sort.field) > 0 \
|
||||
else "created_at"
|
||||
# change ascend to asc and descend to desc
|
||||
sort_order = data.sort.order.value if hasattr(data.sort.order, "value") else data.sort.order
|
||||
if sort_order == "ascend":
|
||||
sort_order = "asc"
|
||||
elif sort_order == "descend":
|
||||
sort_order = "desc"
|
||||
|
||||
query = cur.mogrify(
|
||||
f"""SELECT count(1) OVER () AS total,metric_id, project_id, user_id, name, is_public, created_at, edited_at,
|
||||
metric_type, metric_of, metric_format, metric_value, view_type, is_pinned,
|
||||
dashboards, owner_email, owner_name, default_config AS config, thumbnail
|
||||
FROM metrics
|
||||
{sub_join}
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT COALESCE(jsonb_agg(connected_dashboards.* ORDER BY is_public, name),'[]'::jsonb) AS dashboards
|
||||
FROM (
|
||||
SELECT DISTINCT dashboard_id, name, is_public
|
||||
FROM dashboards
|
||||
INNER JOIN dashboard_widgets USING (dashboard_id)
|
||||
WHERE deleted_at ISNULL
|
||||
AND dashboard_widgets.metric_id = metrics.metric_id
|
||||
AND project_id = %(project_id)s
|
||||
AND ((dashboards.user_id = %(user_id)s OR is_public))
|
||||
) AS connected_dashboards
|
||||
) AS connected_dashboards ON (TRUE)
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT email AS owner_email, name AS owner_name
|
||||
FROM users
|
||||
WHERE deleted_at ISNULL
|
||||
AND users.user_id = metrics.user_id
|
||||
) AS owner ON (TRUE)
|
||||
WHERE {" AND ".join(constraints)}
|
||||
ORDER BY {sort_column} {sort_order}
|
||||
LIMIT %(limit)s OFFSET %(offset)s;""",
|
||||
params
|
||||
)
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
if len(rows) > 0:
|
||||
total = rows[0]["total"]
|
||||
if include_series:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
for s in r.get("series", []):
|
||||
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
|
||||
else:
|
||||
for r in rows:
|
||||
r.pop("total")
|
||||
r["created_at"] = TimeUTC.datetime_to_timestamp(r["created_at"])
|
||||
r["edited_at"] = TimeUTC.datetime_to_timestamp(r["edited_at"])
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
else:
|
||||
total = 0
|
||||
|
||||
return {"total": total, "list": rows}
|
||||
|
||||
|
||||
def search_all(project_id, user_id, data: schemas.SearchCardsSchema, include_series=False):
|
||||
constraints = ["metrics.project_id = %(project_id)s",
|
||||
"metrics.deleted_at ISNULL"]
|
||||
|
|
@ -448,7 +542,7 @@ def __get_global_attributes(row):
|
|||
if row is None or row.get("cardInfo") is None:
|
||||
return row
|
||||
card_info = row.get("cardInfo", {})
|
||||
row["compareTo"] = card_info.get("compareTo", [])
|
||||
row["compareTo"] = card_info["compareTo"] if card_info.get("compareTo") is not None else []
|
||||
return row
|
||||
|
||||
|
||||
|
|
|
|||
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
11
api/chalicelib/core/metrics/heatmaps/__init__.py
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental heatmaps")
|
||||
from .heatmaps_ch import *
|
||||
else:
|
||||
from .heatmaps import *
|
||||
|
|
@ -5,8 +5,8 @@ from decouple import config
|
|||
logger = logging.getLogger(__name__)
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
from chalicelib.core.sessions import sessions_ch as sessions
|
||||
import chalicelib.core.sessions.sessions_ch as sessions
|
||||
else:
|
||||
from chalicelib.core.sessions import sessions
|
||||
import chalicelib.core.sessions.sessions_pg as sessions
|
||||
|
||||
from chalicelib.core.sessions import sessions_mobs
|
||||
|
|
|
|||
|
|
@ -175,7 +175,7 @@ def get_simple_funnel(filter_d: schemas.CardSeriesFilterSchema, project: schemas
|
|||
value_key=e_k
|
||||
) if not specific_condition else specific_condition)
|
||||
|
||||
full_args = {"eventTypes": tuple(event_types), **full_args, **values}
|
||||
full_args = {"eventTypes": event_types, **full_args, **values}
|
||||
n_stages = len(n_stages_query)
|
||||
if n_stages == 0:
|
||||
return []
|
||||
|
|
|
|||
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
10
api/chalicelib/core/metrics/product_analytics/__init__.py
Normal file
|
|
@ -0,0 +1,10 @@
|
|||
import logging
|
||||
|
||||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
logger.info(">>> Using experimental product-analytics")
|
||||
from .product_analytics_ch import *
|
||||
else:
|
||||
from .product_analytics import *
|
||||
|
|
@ -3,7 +3,7 @@ from time import time
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import metadata
|
||||
from chalicelib.core.metrics.product_analytics import __transform_journey
|
||||
from .product_analytics import __transform_journey
|
||||
from chalicelib.utils import ch_client, exp_ch_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
|
@ -85,10 +85,9 @@ def __complete_missing_steps(start_time, end_time, density, neutral, rows, time_
|
|||
# compute avg_time_from_previous at the same level as sessions_count (this was removed in v1.22)
|
||||
# if start-point is selected, the selected event is ranked n°1
|
||||
def path_analysis(project_id: int, data: schemas.CardPathAnalysis):
|
||||
# # This code is used for testing only
|
||||
# return __get_test_data()
|
||||
|
||||
# ------ end of testing code ---
|
||||
if not data.hide_excess:
|
||||
data.hide_excess = True
|
||||
data.rows = 50
|
||||
sub_events = []
|
||||
start_points_conditions = []
|
||||
step_0_conditions = []
|
||||
|
|
@ -413,7 +413,6 @@ def update_project_conditions(project_id, conditions):
|
|||
create_project_conditions(project_id, to_be_created)
|
||||
|
||||
if to_be_updated:
|
||||
logger.debug(to_be_updated)
|
||||
update_project_condition(project_id, to_be_updated)
|
||||
|
||||
return get_conditions(project_id)
|
||||
|
|
@ -428,3 +427,45 @@ def get_projects_ids(tenant_id):
|
|||
cur.execute(query=query)
|
||||
rows = cur.fetchall()
|
||||
return [r["project_id"] for r in rows]
|
||||
|
||||
|
||||
def delete_metadata_condition(project_id, metadata_key):
|
||||
sql = """\
|
||||
UPDATE public.projects_conditions
|
||||
SET filters=(SELECT COALESCE(jsonb_agg(elem), '[]'::jsonb)
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE NOT (elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(metadata_key)s))
|
||||
WHERE project_id = %(project_id)s
|
||||
AND jsonb_typeof(filters) = 'array'
|
||||
AND EXISTS (SELECT 1
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(metadata_key)s);"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"project_id": project_id, "metadata_key": metadata_key})
|
||||
cur.execute(query)
|
||||
|
||||
|
||||
def rename_metadata_condition(project_id, old_metadata_key, new_metadata_key):
|
||||
sql = """\
|
||||
UPDATE public.projects_conditions
|
||||
SET filters = (SELECT jsonb_agg(CASE
|
||||
WHEN elem ->> 'type' = 'metadata' AND elem ->> 'source' = %(old_metadata_key)s
|
||||
THEN elem || ('{"source": "'||%(new_metadata_key)s||'"}')::jsonb
|
||||
ELSE elem END)
|
||||
FROM jsonb_array_elements(filters) AS elem)
|
||||
WHERE project_id = %(project_id)s
|
||||
AND jsonb_typeof(filters) = 'array'
|
||||
AND EXISTS (SELECT 1
|
||||
FROM jsonb_array_elements(filters) AS elem
|
||||
WHERE elem ->> 'type' = 'metadata'
|
||||
AND elem ->> 'source' = %(old_metadata_key)s);"""
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(sql, {"project_id": project_id, "old_metadata_key": old_metadata_key,
|
||||
"new_metadata_key": new_metadata_key})
|
||||
cur.execute(query)
|
||||
|
||||
# TODO: make project conditions use metadata-column-name instead of metadata-key
|
||||
|
|
|
|||
|
|
@ -3,9 +3,11 @@ import logging
|
|||
from decouple import config
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
from . import sessions as sessions_legacy
|
||||
from . import sessions_pg
|
||||
from . import sessions_pg as sessions_legacy
|
||||
from . import sessions_ch
|
||||
|
||||
if config("EXP_METRICS", cast=bool, default=False):
|
||||
from . import sessions_ch as sessions
|
||||
else:
|
||||
from . import sessions
|
||||
from . import sessions_pg as sessions
|
||||
|
|
|
|||
|
|
@ -1,10 +1,12 @@
|
|||
from decouple import config
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
||||
import json
|
||||
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.core.issue_tracking import integrations_manager, base_issue
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def __get_saved_data(project_id, session_id, issue_id, tool):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import List, Union
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata
|
||||
from chalicelib.core.sessions import performance_event, sessions_legacy
|
||||
from . import performance_event, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper, ch_client, exp_ch_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -153,7 +153,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -178,7 +178,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -870,12 +870,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
events_conditions[-1]["condition"] = []
|
||||
if not is_any and event.value not in [None, "*", ""]:
|
||||
event_where.append(
|
||||
sh.multi_conditions(f"(main1.message {op} %({e_k})s OR main1.name {op} %({e_k})s)",
|
||||
sh.multi_conditions(f"(toString(main1.`$properties`.message) {op} %({e_k})s OR toString(main1.`$properties`.name) {op} %({e_k})s)",
|
||||
event.value, value_key=e_k))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
events_extra_join += f" AND {event_where[-1]}"
|
||||
if len(event.source) > 0 and event.source[0] not in [None, "*", ""]:
|
||||
event_where.append(sh.multi_conditions(f"main1.source = %({s_k})s", event.source, value_key=s_k))
|
||||
event_where.append(sh.multi_conditions(f"toString(main1.`$properties`.source) = %({s_k})s", event.source, value_key=s_k))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
events_extra_join += f" AND {event_where[-1]}"
|
||||
|
||||
|
|
@ -1108,8 +1108,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
is_any = sh.isAny_opreator(f.operator)
|
||||
if is_any or len(f.value) == 0:
|
||||
continue
|
||||
is_negative_operator = sh.is_negation_operator(f.operator)
|
||||
f.value = helper.values_for_operator(value=f.value, op=f.operator)
|
||||
op = sh.get_sql_operator(f.operator)
|
||||
r_op = ""
|
||||
if is_negative_operator:
|
||||
r_op = sh.reverse_sql_operator(op)
|
||||
e_k_f = e_k + f"_fetch{j}"
|
||||
full_args = {**full_args, **sh.multi_values(f.value, value_key=e_k_f)}
|
||||
if f.type == schemas.FetchFilterType.FETCH_URL:
|
||||
|
|
@ -1118,6 +1122,12 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.url_path {r_op} %({e_k_f})s", f.value, value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_STATUS_CODE:
|
||||
event_where.append(json_condition(
|
||||
"main", "$properties", 'status', op, f.value, e_k_f, True, True
|
||||
|
|
@ -1130,6 +1140,13 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.method {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_DURATION:
|
||||
event_where.append(
|
||||
sh.multi_conditions(f"main.`$duration_s` {f.operator} %({e_k_f})s/1000", f.value,
|
||||
|
|
@ -1142,12 +1159,26 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.request_body {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
elif f.type == schemas.FetchFilterType.FETCH_RESPONSE_BODY:
|
||||
event_where.append(json_condition(
|
||||
"main", "$properties", 'response_body', op, f.value, e_k_f
|
||||
))
|
||||
events_conditions[-1]["condition"].append(event_where[-1])
|
||||
apply = True
|
||||
if is_negative_operator:
|
||||
events_conditions_not.append(
|
||||
{
|
||||
"type": f"sub.`$event_name`='{exp_ch_helper.get_event_type(event_type, platform=platform)}'"})
|
||||
events_conditions_not[-1]["condition"] = sh.multi_conditions(
|
||||
f"sub.`$properties`.response_body {r_op} %({e_k_f})s", f.value,
|
||||
value_key=e_k_f)
|
||||
else:
|
||||
logging.warning(f"undefined FETCH filter: {f.type}")
|
||||
if not apply:
|
||||
|
|
@ -1395,17 +1426,30 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
if extra_conditions and len(extra_conditions) > 0:
|
||||
_extra_or_condition = []
|
||||
for i, c in enumerate(extra_conditions):
|
||||
if sh.isAny_opreator(c.operator):
|
||||
if sh.isAny_opreator(c.operator) and c.type != schemas.EventType.REQUEST_DETAILS.value:
|
||||
continue
|
||||
e_k = f"ec_value{i}"
|
||||
op = sh.get_sql_operator(c.operator)
|
||||
c.value = helper.values_for_operator(value=c.value, op=c.operator)
|
||||
full_args = {**full_args,
|
||||
**sh.multi_values(c.value, value_key=e_k)}
|
||||
if c.type == events.EventType.LOCATION.ui_type:
|
||||
if c.type in (schemas.EventType.LOCATION.value, schemas.EventType.REQUEST.value):
|
||||
_extra_or_condition.append(
|
||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||
c.value, value_key=e_k))
|
||||
elif c.type == schemas.EventType.REQUEST_DETAILS.value:
|
||||
for j, c_f in enumerate(c.filters):
|
||||
if sh.isAny_opreator(c_f.operator) or len(c_f.value) == 0:
|
||||
continue
|
||||
e_k += f"_{j}"
|
||||
op = sh.get_sql_operator(c_f.operator)
|
||||
c_f.value = helper.values_for_operator(value=c_f.value, op=c_f.operator)
|
||||
full_args = {**full_args,
|
||||
**sh.multi_values(c_f.value, value_key=e_k)}
|
||||
if c_f.type == schemas.FetchFilterType.FETCH_URL.value:
|
||||
_extra_or_condition.append(
|
||||
sh.multi_conditions(f"extra_event.url_path {op} %({e_k})s",
|
||||
c_f.value, value_key=e_k))
|
||||
else:
|
||||
logging.warning(f"unsupported extra_event type:${c.type}")
|
||||
if len(_extra_or_condition) > 0:
|
||||
|
|
@ -1416,9 +1460,10 @@ def search_query_parts_ch(data: schemas.SessionsSearchPayloadSchema, error_statu
|
|||
query_part = f"""{f"({events_query_part}) AS f" if len(events_query_part) > 0 else ""}"""
|
||||
else:
|
||||
if len(events_query_part) > 0:
|
||||
extra_join += f"""INNER JOIN (SELECT *
|
||||
extra_join += f"""INNER JOIN (SELECT DISTINCT ON (session_id) *
|
||||
FROM {MAIN_SESSIONS_TABLE} AS s {extra_event}
|
||||
WHERE {" AND ".join(extra_constraints)}) AS s ON(s.session_id=f.session_id)"""
|
||||
WHERE {" AND ".join(extra_constraints)}
|
||||
ORDER BY _timestamp DESC) AS s ON(s.session_id=f.session_id)"""
|
||||
else:
|
||||
deduplication_keys = ["session_id"] + extra_deduplication
|
||||
extra_join = f"""(SELECT *
|
||||
|
|
|
|||
|
|
@ -0,0 +1 @@
|
|||
from .sessions_devtool import *
|
||||
|
|
@ -4,7 +4,7 @@ import schemas
|
|||
from chalicelib.utils.storage import StorageClient
|
||||
|
||||
|
||||
def __get_devtools_keys(project_id, session_id):
|
||||
def get_devtools_keys(project_id, session_id):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id
|
||||
|
|
@ -16,7 +16,7 @@ def __get_devtools_keys(project_id, session_id):
|
|||
|
||||
def get_urls(session_id, project_id, context: schemas.CurrentContext, check_existence: bool = True):
|
||||
results = []
|
||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
if check_existence and not StorageClient.exists(bucket=config("sessions_bucket"), key=k):
|
||||
continue
|
||||
results.append(StorageClient.get_presigned_url_for_sharing(
|
||||
|
|
@ -29,5 +29,5 @@ def get_urls(session_id, project_id, context: schemas.CurrentContext, check_exis
|
|||
|
||||
def delete_mobs(project_id, session_ids):
|
||||
for session_id in session_ids:
|
||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
for k in get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
StorageClient.tag_for_deletion(bucket=config("sessions_bucket"), key=k)
|
||||
|
|
@ -0,0 +1 @@
|
|||
from .sessions_favorite import *
|
||||
|
|
@ -1,76 +1,81 @@
|
|||
from functools import cache
|
||||
|
||||
import schemas
|
||||
from chalicelib.core.autocomplete import autocomplete
|
||||
from chalicelib.utils.event_filter_definition import SupportedFilter
|
||||
|
||||
SUPPORTED_TYPES = {
|
||||
schemas.FilterType.USER_OS: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||
schemas.FilterType.USER_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||
schemas.FilterType.REV_ID: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||
schemas.FilterType.REFERRER: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||
# Mobile
|
||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(
|
||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||
query=autocomplete.__generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||
|
||||
}
|
||||
@cache
|
||||
def supported_types():
|
||||
return {
|
||||
schemas.FilterType.USER_OS: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS)),
|
||||
schemas.FilterType.USER_BROWSER: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_BROWSER)),
|
||||
schemas.FilterType.USER_DEVICE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE)),
|
||||
schemas.FilterType.USER_COUNTRY: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY)),
|
||||
schemas.FilterType.USER_CITY: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_CITY)),
|
||||
schemas.FilterType.USER_STATE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_STATE)),
|
||||
schemas.FilterType.USER_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID)),
|
||||
schemas.FilterType.REV_ID: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID)),
|
||||
schemas.FilterType.REFERRER: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REFERRER)),
|
||||
schemas.FilterType.UTM_CAMPAIGN: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_CAMPAIGN)),
|
||||
schemas.FilterType.UTM_MEDIUM: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_MEDIUM)),
|
||||
schemas.FilterType.UTM_SOURCE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.UTM_SOURCE)),
|
||||
# Mobile
|
||||
schemas.FilterType.USER_OS_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_OS_MOBILE)),
|
||||
schemas.FilterType.USER_DEVICE_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(
|
||||
typename=schemas.FilterType.USER_DEVICE_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_DEVICE_MOBILE)),
|
||||
schemas.FilterType.USER_COUNTRY_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_COUNTRY_MOBILE)),
|
||||
schemas.FilterType.USER_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ID_MOBILE)),
|
||||
schemas.FilterType.USER_ANONYMOUS_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.USER_ANONYMOUS_ID_MOBILE)),
|
||||
schemas.FilterType.REV_ID_MOBILE: SupportedFilter(
|
||||
get=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE),
|
||||
query=autocomplete.generic_autocomplete_metas(typename=schemas.FilterType.REV_ID_MOBILE)),
|
||||
|
||||
}
|
||||
|
||||
|
||||
def search(text: str, meta_type: schemas.FilterType, project_id: int):
|
||||
rows = []
|
||||
if meta_type not in list(SUPPORTED_TYPES.keys()):
|
||||
if meta_type not in list(supported_types().keys()):
|
||||
return {"errors": ["unsupported type"]}
|
||||
rows += SUPPORTED_TYPES[meta_type].get(project_id=project_id, text=text)
|
||||
rows += supported_types()[meta_type].get(project_id=project_id, text=text)
|
||||
# for IOS events autocomplete
|
||||
# if meta_type + "_IOS" in list(SUPPORTED_TYPES.keys()):
|
||||
# rows += SUPPORTED_TYPES[meta_type + "_IOS"].get(project_id=project_id, text=text)
|
||||
|
|
|
|||
|
|
@ -74,7 +74,7 @@ def get_all_notes_by_project_id(tenant_id, project_id, user_id, data: schemas.Se
|
|||
|
||||
# filter by ownership or shared status
|
||||
if data.shared_only:
|
||||
conditions.append("sessions_notes.is_public")
|
||||
conditions.append("sessions_notes.is_public IS TRUE")
|
||||
elif data.mine_only:
|
||||
conditions.append("sessions_notes.user_id = %(user_id)s")
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,7 @@ from typing import List, Union
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata
|
||||
from chalicelib.core.sessions import performance_event
|
||||
from . import performance_event
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
|
||||
|
|
@ -148,7 +148,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -165,7 +165,7 @@ def search2_table(data: schemas.SessionsSearchPayloadSchema, project_id: int, de
|
|||
"isEvent": True,
|
||||
"value": [],
|
||||
"operator": e.operator,
|
||||
"filters": []
|
||||
"filters": e.filters
|
||||
})
|
||||
for v in e.value:
|
||||
if v not in extra_conditions[e.operator].value:
|
||||
|
|
@ -989,7 +989,7 @@ def search_query_parts(data: schemas.SessionsSearchPayloadSchema, error_status,
|
|||
sh.multi_conditions(f"ev.{events.EventType.LOCATION.column} {op} %({e_k})s",
|
||||
c.value, value_key=e_k))
|
||||
else:
|
||||
logger.warning(f"unsupported extra_event type:${c.type}")
|
||||
logger.warning(f"unsupported extra_event type: {c.type}")
|
||||
if len(_extra_or_condition) > 0:
|
||||
extra_constraints.append("(" + " OR ".join(_extra_or_condition) + ")")
|
||||
query_part = f"""\
|
||||
|
|
@ -1,8 +1,8 @@
|
|||
import schemas
|
||||
from chalicelib.core import events, metadata, events_mobile, \
|
||||
issues, assist, canvas, user_testing
|
||||
from chalicelib.core.sessions import sessions_mobs, sessions_devtool
|
||||
from chalicelib.utils import errors_helper
|
||||
from . import sessions_mobs, sessions_devtool
|
||||
from chalicelib.core.errors.modules import errors_helper
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.core.modules import MOB_KEY, get_file_key
|
||||
|
||||
|
|
|
|||
|
|
@ -1,11 +1,9 @@
|
|||
import logging
|
||||
from typing import List, Union
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, projects
|
||||
from chalicelib.core.sessions import sessions_favorite, performance_event, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
from chalicelib.utils import sql_helper as sh
|
||||
from chalicelib.core import metadata, projects
|
||||
from . import sessions_favorite, sessions_legacy
|
||||
from chalicelib.utils import pg_client, helper
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
@ -45,7 +43,13 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
|||
count_only=False, issue=None, ids_only=False, platform="web"):
|
||||
if data.bookmarked:
|
||||
data.startTimestamp, data.endTimestamp = sessions_favorite.get_start_end_timestamp(project.project_id, user_id)
|
||||
|
||||
if data.startTimestamp is None:
|
||||
logger.debug(f"No vault sessions found for project:{project.project_id}")
|
||||
return {
|
||||
'total': 0,
|
||||
'sessions': [],
|
||||
'src': 1
|
||||
}
|
||||
full_args, query_part = sessions_legacy.search_query_parts(data=data, error_status=error_status,
|
||||
errors_only=errors_only,
|
||||
favorite_only=data.bookmarked, issue=issue,
|
||||
|
|
@ -118,7 +122,10 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
|||
sort = 'session_id'
|
||||
if data.sort is not None and data.sort != "session_id":
|
||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
if data.sort == 'datetime':
|
||||
sort = 'start_ts'
|
||||
else:
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
|
||||
meta_keys = metadata.get(project_id=project.project_id)
|
||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||
|
|
@ -168,7 +175,8 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project: schemas.
|
|||
# reverse=data.order.upper() == "DESC")
|
||||
return {
|
||||
'total': total,
|
||||
'sessions': helper.list_to_camel_case(sessions)
|
||||
'sessions': helper.list_to_camel_case(sessions),
|
||||
'src': 1
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
1
api/chalicelib/core/sessions/sessions_viewed/__init__.py
Normal file
1
api/chalicelib/core/sessions/sessions_viewed/__init__.py
Normal file
|
|
@ -0,0 +1 @@
|
|||
from .sessions_viewed import *
|
||||
|
|
@ -1,6 +1,7 @@
|
|||
import logging
|
||||
|
||||
from chalicelib.core import sessions, assist
|
||||
from chalicelib.core import assist
|
||||
from . import sessions
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ def refresh_spot_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row.get("spot_jwt_iat"), row.get("spot_jwt_refresh_jti"), row.get("spot_jwt_refresh_iat")
|
||||
return users.RefreshSpotJWTs(**row)
|
||||
|
||||
|
||||
def logout(user_id: int):
|
||||
|
|
@ -26,13 +26,13 @@ def logout(user_id: int):
|
|||
|
||||
|
||||
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
||||
spot_jwt_iat, spot_jwt_r_jti, spot_jwt_r_iat = refresh_spot_jwt_iat_jti(user_id=user_id)
|
||||
j = refresh_spot_jwt_iat_jti(user_id=user_id)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_iat,
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_iat,
|
||||
aud=AUDIENCE, for_spot=True),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=spot_jwt_r_iat,
|
||||
aud=AUDIENCE, jwt_jti=spot_jwt_r_jti, for_spot=True),
|
||||
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (spot_jwt_iat - spot_jwt_r_iat)
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.spot_jwt_refresh_iat,
|
||||
aud=AUDIENCE, jwt_jti=j.spot_jwt_refresh_jti, for_spot=True),
|
||||
"refreshTokenMaxAge": config("JWT_SPOT_REFRESH_EXPIRATION", cast=int) - (j.spot_jwt_iat - j.spot_jwt_refresh_iat)
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -1,13 +1,14 @@
|
|||
import json
|
||||
import secrets
|
||||
from typing import Optional
|
||||
|
||||
from decouple import config
|
||||
from fastapi import BackgroundTasks
|
||||
from pydantic import BaseModel
|
||||
from pydantic import BaseModel, model_validator
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import authorizers, metadata
|
||||
from chalicelib.core import tenants, spot, scope
|
||||
from chalicelib.core import authorizers
|
||||
from chalicelib.core import tenants, spot
|
||||
from chalicelib.utils import email_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
|
|
@ -83,7 +84,6 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
|
|||
"name": name, "invitation_token": invitation_token})
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
cur.execute(query)
|
||||
result["created_at"] = TimeUTC.datetime_to_timestamp(result["created_at"])
|
||||
return helper.dict_to_camel_case(result)
|
||||
|
||||
|
|
@ -284,7 +284,7 @@ def edit_member(user_id_to_update, tenant_id, changes: schemas.EditMemberSchema,
|
|||
if editor_id != user_id_to_update:
|
||||
admin = get_user_role(tenant_id=tenant_id, user_id=editor_id)
|
||||
if not admin["superAdmin"] and not admin["admin"]:
|
||||
return {"errors": ["unauthorized"]}
|
||||
return {"errors": ["unauthorized, you must have admin privileges"]}
|
||||
if admin["admin"] and user["superAdmin"]:
|
||||
return {"errors": ["only the owner can edit his own details"]}
|
||||
else:
|
||||
|
|
@ -552,14 +552,35 @@ def refresh_auth_exists(user_id, jwt_jti=None):
|
|||
return r is not None
|
||||
|
||||
|
||||
class ChangeJwt(BaseModel):
|
||||
class FullLoginJWTs(BaseModel):
|
||||
jwt_iat: int
|
||||
jwt_refresh_jti: int
|
||||
jwt_refresh_jti: str
|
||||
jwt_refresh_iat: int
|
||||
spot_jwt_iat: int
|
||||
spot_jwt_refresh_jti: int
|
||||
spot_jwt_refresh_jti: str
|
||||
spot_jwt_refresh_iat: int
|
||||
|
||||
@model_validator(mode="before")
|
||||
@classmethod
|
||||
def _transform_data(cls, values):
|
||||
if values.get("jwt_refresh_jti") is not None:
|
||||
values["jwt_refresh_jti"] = str(values["jwt_refresh_jti"])
|
||||
if values.get("spot_jwt_refresh_jti") is not None:
|
||||
values["spot_jwt_refresh_jti"] = str(values["spot_jwt_refresh_jti"])
|
||||
return values
|
||||
|
||||
|
||||
class RefreshLoginJWTs(FullLoginJWTs):
|
||||
spot_jwt_iat: Optional[int] = None
|
||||
spot_jwt_refresh_jti: Optional[str] = None
|
||||
spot_jwt_refresh_iat: Optional[int] = None
|
||||
|
||||
|
||||
class RefreshSpotJWTs(FullLoginJWTs):
|
||||
jwt_iat: Optional[int] = None
|
||||
jwt_refresh_jti: Optional[str] = None
|
||||
jwt_refresh_iat: Optional[int] = None
|
||||
|
||||
|
||||
def change_jwt_iat_jti(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
|
|
@ -580,7 +601,7 @@ def change_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return ChangeJwt(**row)
|
||||
return FullLoginJWTs(**row)
|
||||
|
||||
|
||||
def refresh_jwt_iat_jti(user_id):
|
||||
|
|
@ -595,7 +616,7 @@ def refresh_jwt_iat_jti(user_id):
|
|||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row.get("jwt_iat"), row.get("jwt_refresh_jti"), row.get("jwt_refresh_iat")
|
||||
return RefreshLoginJWTs(**row)
|
||||
|
||||
|
||||
def authenticate(email, password, for_change_password=False) -> dict | bool | None:
|
||||
|
|
@ -627,9 +648,12 @@ def authenticate(email, password, for_change_password=False) -> dict | bool | No
|
|||
response = {
|
||||
"jwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'], iat=j_r.jwt_iat,
|
||||
aud=AUDIENCE),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'], tenant_id=r['tenantId'],
|
||||
iat=j_r.jwt_refresh_iat, aud=AUDIENCE,
|
||||
jwt_jti=j_r.jwt_refresh_jti),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=r['userId'],
|
||||
tenant_id=r['tenantId'],
|
||||
iat=j_r.jwt_refresh_iat,
|
||||
aud=AUDIENCE,
|
||||
jwt_jti=j_r.jwt_refresh_jti,
|
||||
for_spot=False),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int),
|
||||
"email": email,
|
||||
"spotJwt": authorizers.generate_jwt(user_id=r['userId'], tenant_id=r['tenantId'],
|
||||
|
|
@ -660,13 +684,13 @@ def logout(user_id: int):
|
|||
|
||||
|
||||
def refresh(user_id: int, tenant_id: int = -1) -> dict:
|
||||
jwt_iat, jwt_r_jti, jwt_r_iat = refresh_jwt_iat_jti(user_id=user_id)
|
||||
j = refresh_jwt_iat_jti(user_id=user_id)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=jwt_iat,
|
||||
"jwt": authorizers.generate_jwt(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_iat,
|
||||
aud=AUDIENCE),
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=jwt_r_iat,
|
||||
aud=AUDIENCE, jwt_jti=jwt_r_jti),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (jwt_iat - jwt_r_iat)
|
||||
"refreshToken": authorizers.generate_jwt_refresh(user_id=user_id, tenant_id=tenant_id, iat=j.jwt_refresh_iat,
|
||||
aud=AUDIENCE, jwt_jti=j.jwt_refresh_jti),
|
||||
"refreshTokenMaxAge": config("JWT_REFRESH_EXPIRATION", cast=int) - (j.jwt_iat - j.jwt_refresh_iat),
|
||||
}
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -41,8 +41,7 @@ class ClickHouseClient:
|
|||
keys = tuple(x for x, y in results[1])
|
||||
return [dict(zip(keys, i)) for i in results[0]]
|
||||
except Exception as err:
|
||||
logger.error("--------- CH EXCEPTION -----------")
|
||||
logger.error(err)
|
||||
logger.error("--------- CH EXCEPTION -----------", exc_info=err)
|
||||
logger.error("--------- CH QUERY EXCEPTION -----------")
|
||||
logger.error(self.format(query=query, parameters=parameters)
|
||||
.replace('\n', '\\n')
|
||||
|
|
|
|||
|
|
@ -34,7 +34,10 @@ if config("CH_COMPRESSION", cast=bool, default=True):
|
|||
def transform_result(self, original_function):
|
||||
@wraps(original_function)
|
||||
def wrapper(*args, **kwargs):
|
||||
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
||||
if kwargs.get("parameters"):
|
||||
logger.debug(str.encode(self.format(query=kwargs.get("query", ""), parameters=kwargs.get("parameters"))))
|
||||
elif len(args) > 0:
|
||||
logger.debug(str.encode(args[0]))
|
||||
result = original_function(*args, **kwargs)
|
||||
if isinstance(result, clickhouse_connect.driver.query.QueryResult):
|
||||
column_names = result.column_names
|
||||
|
|
@ -108,14 +111,14 @@ def make_pool():
|
|||
try:
|
||||
CH_pool.close_all()
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||
try:
|
||||
CH_pool = ClickHouseConnectionPool(min_size=config("CH_MINCONN", cast=int, default=4),
|
||||
max_size=config("CH_MAXCONN", cast=int, default=8))
|
||||
if CH_pool is not None:
|
||||
logger.info("Connection pool created successfully for CH")
|
||||
except ConnectionError as error:
|
||||
logger.error("Error while connecting to CH", error)
|
||||
logger.error("Error while connecting to CH", exc_info=error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
|
|
@ -146,13 +149,11 @@ class ClickHouseClient:
|
|||
def __enter__(self):
|
||||
return self.__client
|
||||
|
||||
def format(self, query, *, parameters=None):
|
||||
if parameters is None:
|
||||
return query
|
||||
return query % {
|
||||
key: f"'{value}'" if isinstance(value, str) else value
|
||||
for key, value in parameters.items()
|
||||
}
|
||||
def format(self, query, parameters=None):
|
||||
if parameters:
|
||||
ctx = QueryContext(query=query, parameters=parameters)
|
||||
return ctx.final_query
|
||||
return query
|
||||
|
||||
def __exit__(self, *args):
|
||||
if config('CH_POOL', cast=bool, default=True):
|
||||
|
|
@ -174,4 +175,4 @@ async def terminate():
|
|||
CH_pool.close_all()
|
||||
logger.info("Closed all connexions to CH")
|
||||
except Exception as error:
|
||||
logger.error("Error while closing all connexions to CH", error)
|
||||
logger.error("Error while closing all connexions to CH", exc_info=error)
|
||||
|
|
|
|||
|
|
@ -1,14 +0,0 @@
|
|||
from chalicelib.core.sourcemaps import sourcemaps
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
for c in s.get("context", []):
|
||||
for sci, sc in enumerate(c):
|
||||
if isinstance(sc, str) and len(sc) > 1000:
|
||||
c[sci] = sc[:1000]
|
||||
# convert bytes to string:
|
||||
if isinstance(s["filename"], bytes):
|
||||
s["filename"] = s["filename"].decode("utf-8")
|
||||
return error
|
||||
|
|
@ -2,6 +2,8 @@ from typing import List
|
|||
|
||||
|
||||
def get_step_size(startTimestamp, endTimestamp, density, decimal=False, factor=1000):
|
||||
if endTimestamp == 0:
|
||||
raise Exception("endTimestamp cannot be 0 in order to get step size")
|
||||
step_size = (endTimestamp // factor - startTimestamp // factor)
|
||||
if density <= 1:
|
||||
return step_size
|
||||
|
|
|
|||
|
|
@ -19,6 +19,16 @@ PG_CONFIG = dict(_PG_CONFIG)
|
|||
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
||||
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
PG_CONFIG = {
|
||||
**PG_CONFIG,
|
||||
# Keepalive settings
|
||||
"keepalives": 1, # Enable keepalives
|
||||
"keepalives_idle": 300, # Seconds before sending keepalive
|
||||
"keepalives_interval": 10, # Seconds between keepalives
|
||||
"keepalives_count": 3 # Number of keepalives before giving up
|
||||
}
|
||||
|
||||
|
||||
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
||||
def __init__(self, minconn, maxconn, *args, **kwargs):
|
||||
|
|
@ -55,6 +65,7 @@ RETRY = 0
|
|||
|
||||
def make_pool():
|
||||
if not config('PG_POOL', cast=bool, default=True):
|
||||
logger.info("PG_POOL is disabled, not creating a new one")
|
||||
return
|
||||
global postgreSQL_pool
|
||||
global RETRY
|
||||
|
|
@ -62,7 +73,7 @@ def make_pool():
|
|||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while closing all connexions to PostgreSQL", error)
|
||||
logger.error("Error while closing all connexions to PostgreSQL", exc_info=error)
|
||||
try:
|
||||
postgreSQL_pool = ORThreadedConnectionPool(config("PG_MINCONN", cast=int, default=4),
|
||||
config("PG_MAXCONN", cast=int, default=8),
|
||||
|
|
@ -70,10 +81,10 @@ def make_pool():
|
|||
if postgreSQL_pool is not None:
|
||||
logger.info("Connection pool created successfully")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while connecting to PostgreSQL", error)
|
||||
logger.error("Error while connecting to PostgreSQL", exc_info=error)
|
||||
if RETRY < RETRY_MAX:
|
||||
RETRY += 1
|
||||
logger.info(f"waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
logger.info(f"Waiting for {RETRY_INTERVAL}s before retry n°{RETRY}")
|
||||
time.sleep(RETRY_INTERVAL)
|
||||
make_pool()
|
||||
else:
|
||||
|
|
@ -97,13 +108,17 @@ class PostgresClient:
|
|||
elif long_query:
|
||||
long_config = dict(_PG_CONFIG)
|
||||
long_config["application_name"] += "-LONG"
|
||||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('pg_long_timeout', cast=int, default=5 * 60) * 1000}"
|
||||
if config('PG_TIMEOUT_LONG', cast=int, default=1) > 0:
|
||||
long_config["options"] = f"-c statement_timeout=" \
|
||||
f"{config('PG_TIMEOUT_LONG', cast=int, default=5 * 60) * 1000}"
|
||||
else:
|
||||
logger.info("Disabled timeout for long query")
|
||||
self.connection = psycopg2.connect(**long_config)
|
||||
elif not use_pool or not config('PG_POOL', cast=bool, default=True):
|
||||
single_config = dict(_PG_CONFIG)
|
||||
single_config["application_name"] += "-NOPOOL"
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
if config('PG_TIMEOUT', cast=int, default=1) > 0:
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
self.connection = psycopg2.connect(**single_config)
|
||||
else:
|
||||
self.connection = postgreSQL_pool.getconn()
|
||||
|
|
@ -123,7 +138,7 @@ class PostgresClient:
|
|||
if not self.use_pool or self.long_query or self.unlimited_query:
|
||||
self.connection.close()
|
||||
except Exception as error:
|
||||
logger.error("Error while committing/closing PG-connection", error)
|
||||
logger.error("Error while committing/closing PG-connection", exc_info=error)
|
||||
if str(error) == "connection already closed" \
|
||||
and self.use_pool \
|
||||
and not self.long_query \
|
||||
|
|
@ -150,7 +165,7 @@ class PostgresClient:
|
|||
try:
|
||||
self.connection.rollback()
|
||||
except psycopg2.InterfaceError as e:
|
||||
logger.error("!!! Error while rollbacking connection", e)
|
||||
logger.error("!!! Error while rollbacking connection", exc_info=e)
|
||||
logger.error("!!! Trying to recreate the cursor")
|
||||
self.recreate_cursor()
|
||||
raise error
|
||||
|
|
@ -161,19 +176,18 @@ class PostgresClient:
|
|||
try:
|
||||
self.connection.rollback()
|
||||
except Exception as error:
|
||||
logger.error("Error while rollbacking connection for recreation", error)
|
||||
logger.error("Error while rollbacking connection for recreation", exc_info=error)
|
||||
try:
|
||||
self.cursor.close()
|
||||
except Exception as error:
|
||||
logger.error("Error while closing cursor for recreation", error)
|
||||
logger.error("Error while closing cursor for recreation", exc_info=error)
|
||||
self.cursor = None
|
||||
return self.__enter__()
|
||||
|
||||
|
||||
async def init():
|
||||
logger.info(f">use PG_POOL:{config('PG_POOL', default=True)}")
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
make_pool()
|
||||
|
||||
|
||||
async def terminate():
|
||||
|
|
@ -183,4 +197,4 @@ async def terminate():
|
|||
postgreSQL_pool.closeall()
|
||||
logger.info("Closed all connexions to PostgreSQL")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logger.error("Error while closing all connexions to PostgreSQL", error)
|
||||
logger.error("Error while closing all connexions to PostgreSQL", exc_info=error)
|
||||
|
|
|
|||
|
|
@ -4,37 +4,41 @@ import schemas
|
|||
|
||||
|
||||
def get_sql_operator(op: Union[schemas.SearchEventOperator, schemas.ClickEventExtraOperator, schemas.MathOperator]):
|
||||
if isinstance(op, Enum):
|
||||
op = op.value
|
||||
return {
|
||||
schemas.SearchEventOperator.IS: "=",
|
||||
schemas.SearchEventOperator.ON: "=",
|
||||
schemas.SearchEventOperator.ON_ANY: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH: "ILIKE",
|
||||
schemas.SearchEventOperator.IS.value: "=",
|
||||
schemas.SearchEventOperator.ON.value: "=",
|
||||
schemas.SearchEventOperator.ON_ANY.value: "IN",
|
||||
schemas.SearchEventOperator.IS_NOT.value: "!=",
|
||||
schemas.SearchEventOperator.NOT_ON.value: "!=",
|
||||
schemas.SearchEventOperator.CONTAINS.value: "ILIKE",
|
||||
schemas.SearchEventOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
||||
schemas.SearchEventOperator.STARTS_WITH.value: "ILIKE",
|
||||
schemas.SearchEventOperator.ENDS_WITH.value: "ILIKE",
|
||||
# Selector operators:
|
||||
schemas.ClickEventExtraOperator.IS: "=",
|
||||
schemas.ClickEventExtraOperator.IS_NOT: "!=",
|
||||
schemas.ClickEventExtraOperator.CONTAINS: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS: "NOT ILIKE",
|
||||
schemas.ClickEventExtraOperator.STARTS_WITH: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.ENDS_WITH: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.IS.value: "=",
|
||||
schemas.ClickEventExtraOperator.IS_NOT.value: "!=",
|
||||
schemas.ClickEventExtraOperator.CONTAINS.value: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value: "NOT ILIKE",
|
||||
schemas.ClickEventExtraOperator.STARTS_WITH.value: "ILIKE",
|
||||
schemas.ClickEventExtraOperator.ENDS_WITH.value: "ILIKE",
|
||||
|
||||
schemas.MathOperator.GREATER: ">",
|
||||
schemas.MathOperator.GREATER_EQ: ">=",
|
||||
schemas.MathOperator.LESS: "<",
|
||||
schemas.MathOperator.LESS_EQ: "<=",
|
||||
schemas.MathOperator.GREATER.value: ">",
|
||||
schemas.MathOperator.GREATER_EQ.value: ">=",
|
||||
schemas.MathOperator.LESS.value: "<",
|
||||
schemas.MathOperator.LESS_EQ.value: "<=",
|
||||
}.get(op, "=")
|
||||
|
||||
|
||||
def is_negation_operator(op: schemas.SearchEventOperator):
|
||||
return op in [schemas.SearchEventOperator.IS_NOT,
|
||||
schemas.SearchEventOperator.NOT_ON,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS,
|
||||
schemas.ClickEventExtraOperator.IS_NOT,
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS]
|
||||
if isinstance(op, Enum):
|
||||
op = op.value
|
||||
return op in [schemas.SearchEventOperator.IS_NOT.value,
|
||||
schemas.SearchEventOperator.NOT_ON.value,
|
||||
schemas.SearchEventOperator.NOT_CONTAINS.value,
|
||||
schemas.ClickEventExtraOperator.IS_NOT.value,
|
||||
schemas.ClickEventExtraOperator.NOT_CONTAINS.value]
|
||||
|
||||
|
||||
def reverse_sql_operator(op):
|
||||
|
|
|
|||
|
|
@ -1,591 +0,0 @@
|
|||
-- -- Original Q3
|
||||
-- WITH ranked_events AS (SELECT *
|
||||
-- FROM ranked_events_1736344377403),
|
||||
-- n1 AS (SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM ranked_events
|
||||
-- WHERE event_number_in_session = 1
|
||||
-- AND isNotNull(next_value)
|
||||
-- GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n2 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n1
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n1.next_value = re.e_value AND n1.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 2
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n3 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n2
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n2.next_value = re.e_value AND n2.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 3
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n4 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n3
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n3.next_value = re.e_value AND n3.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 4
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8),
|
||||
-- n5 AS (SELECT *
|
||||
-- FROM (SELECT re.event_number_in_session AS event_number_in_session,
|
||||
-- re.event_type AS event_type,
|
||||
-- re.e_value AS e_value,
|
||||
-- re.next_type AS next_type,
|
||||
-- re.next_value AS next_value,
|
||||
-- COUNT(1) AS sessions_count
|
||||
-- FROM n4
|
||||
-- INNER JOIN ranked_events AS re
|
||||
-- ON (n4.next_value = re.e_value AND n4.next_type = re.event_type)
|
||||
-- WHERE re.event_number_in_session = 5
|
||||
-- GROUP BY re.event_number_in_session, re.event_type, re.e_value, re.next_type,
|
||||
-- re.next_value) AS sub_level
|
||||
-- ORDER BY sessions_count DESC
|
||||
-- LIMIT 8)
|
||||
-- SELECT *
|
||||
-- FROM (SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n1
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n2
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n3
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n4
|
||||
-- UNION ALL
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- next_type,
|
||||
-- next_value,
|
||||
-- sessions_count
|
||||
-- FROM n5) AS chart_steps
|
||||
-- ORDER BY event_number_in_session;
|
||||
|
||||
-- Q1
|
||||
-- CREATE TEMPORARY TABLE pre_ranked_events_1736344377403 AS
|
||||
CREATE TABLE pre_ranked_events_1736344377403 ENGINE = Memory AS
|
||||
(WITH initial_event AS (SELECT events.session_id, MIN(datetime) AS start_event_timestamp
|
||||
FROM experimental.events AS events
|
||||
WHERE ((event_type = 'LOCATION' AND (url_path = '/en/deployment/')))
|
||||
AND events.project_id = toUInt16(65)
|
||||
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||
GROUP BY 1),
|
||||
pre_ranked_events AS (SELECT *
|
||||
FROM (SELECT session_id,
|
||||
event_type,
|
||||
datetime,
|
||||
url_path AS e_value,
|
||||
row_number() OVER (PARTITION BY session_id
|
||||
ORDER BY datetime ,
|
||||
message_id ) AS event_number_in_session
|
||||
FROM experimental.events AS events
|
||||
INNER JOIN initial_event ON (events.session_id = initial_event.session_id)
|
||||
WHERE events.project_id = toUInt16(65)
|
||||
AND events.datetime >= toDateTime(1735599600000 / 1000)
|
||||
AND events.datetime < toDateTime(1736290799999 / 1000)
|
||||
AND (events.event_type = 'LOCATION')
|
||||
AND events.datetime >= initial_event.start_event_timestamp
|
||||
) AS full_ranked_events
|
||||
WHERE event_number_in_session <= 5)
|
||||
SELECT *
|
||||
FROM pre_ranked_events);
|
||||
;
|
||||
|
||||
SELECT *
|
||||
FROM pre_ranked_events_1736344377403
|
||||
WHERE event_number_in_session < 3;
|
||||
|
||||
|
||||
|
||||
-- ---------Q2-----------
|
||||
-- CREATE TEMPORARY TABLE ranked_events_1736344377403 AS
|
||||
DROP TABLE ranked_events_1736344377403;
|
||||
CREATE TABLE ranked_events_1736344377403 ENGINE = Memory AS
|
||||
(WITH pre_ranked_events AS (SELECT *
|
||||
FROM pre_ranked_events_1736344377403),
|
||||
start_points AS (SELECT DISTINCT session_id
|
||||
FROM pre_ranked_events
|
||||
WHERE ((event_type = 'LOCATION' AND (e_value = '/en/deployment/')))
|
||||
AND pre_ranked_events.event_number_in_session = 1),
|
||||
ranked_events AS (SELECT pre_ranked_events.*,
|
||||
leadInFrame(e_value)
|
||||
OVER (PARTITION BY session_id ORDER BY datetime
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_value,
|
||||
leadInFrame(toNullable(event_type))
|
||||
OVER (PARTITION BY session_id ORDER BY datetime
|
||||
ROWS BETWEEN UNBOUNDED PRECEDING AND UNBOUNDED FOLLOWING) AS next_type
|
||||
FROM start_points
|
||||
INNER JOIN pre_ranked_events USING (session_id))
|
||||
SELECT *
|
||||
FROM ranked_events);
|
||||
|
||||
|
||||
-- ranked events
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 2
|
||||
-- AND e_value='/en/deployment/deploy-docker/'
|
||||
-- AND next_value NOT IN ('/en/deployment/','/en/plugins/','/en/using-or/')
|
||||
-- AND e_value NOT IN ('/en/deployment/deploy-docker/','/en/getting-started/','/en/deployment/deploy-ubuntu/')
|
||||
AND isNotNull(next_value)
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY event_number_in_session, sessions_count DESC;
|
||||
|
||||
|
||||
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY event_number_in_session, sessions_count DESC;
|
||||
|
||||
SELECT COUNT(1) AS sessions_count
|
||||
FROM ranked_events_1736344377403
|
||||
WHERE event_number_in_session = 2
|
||||
AND isNull(next_value)
|
||||
;
|
||||
|
||||
-- ---------Q3 MORE -----------
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_1736344377403),
|
||||
n1 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n2 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 2
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n3 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 3
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
drop_n AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNull(n1.next_type)
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNull(n2.next_type)),
|
||||
-- TODO: make this as top_steps, where every step will go to next as top/others
|
||||
top_n1 AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNotNull(next_type)
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3),
|
||||
top_n2 AS (-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE (event_type, e_value) IN (SELECT event_type,
|
||||
e_value
|
||||
FROM n2
|
||||
WHERE isNotNull(next_type)
|
||||
GROUP BY event_type, e_value
|
||||
ORDER BY SUM(sessions_count) DESC
|
||||
LIMIT 3)
|
||||
ORDER BY sessions_count DESC),
|
||||
top_n AS (SELECT *
|
||||
FROM top_n1
|
||||
UNION ALL
|
||||
SELECT *
|
||||
FROM top_n2),
|
||||
u_top_n AS (SELECT DISTINCT event_number_in_session,
|
||||
event_type,
|
||||
e_value
|
||||
FROM top_n),
|
||||
others_n AS (
|
||||
-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNotNull(next_type)
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 1000000 OFFSET 3
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNotNull(next_type)
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 1000000 OFFSET 3)
|
||||
SELECT *
|
||||
FROM (
|
||||
-- Top
|
||||
SELECT *
|
||||
FROM top_n
|
||||
-- UNION ALL
|
||||
-- -- Others
|
||||
-- SELECT event_number_in_session,
|
||||
-- event_type,
|
||||
-- e_value,
|
||||
-- 'OTHER' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(sessions_count)
|
||||
-- FROM others_n
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
-- UNION ALL
|
||||
-- -- Top go to Drop
|
||||
-- SELECT drop_n.event_number_in_session,
|
||||
-- drop_n.event_type,
|
||||
-- drop_n.e_value,
|
||||
-- drop_n.next_type,
|
||||
-- drop_n.next_value,
|
||||
-- drop_n.sessions_count
|
||||
-- FROM drop_n
|
||||
-- INNER JOIN u_top_n ON (drop_n.event_number_in_session = u_top_n.event_number_in_session
|
||||
-- AND drop_n.event_type = u_top_n.event_type
|
||||
-- AND drop_n.e_value = u_top_n.e_value)
|
||||
-- ORDER BY drop_n.event_number_in_session
|
||||
-- -- -- UNION ALL
|
||||
-- -- -- Top go to Others
|
||||
-- SELECT top_n.event_number_in_session,
|
||||
-- top_n.event_type,
|
||||
-- top_n.e_value,
|
||||
-- 'OTHER' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(top_n.sessions_count) AS sessions_count
|
||||
-- FROM top_n
|
||||
-- LEFT JOIN others_n ON (others_n.event_number_in_session = (top_n.event_number_in_session + 1)
|
||||
-- AND top_n.next_type = others_n.event_type
|
||||
-- AND top_n.next_value = others_n.e_value)
|
||||
-- WHERE others_n.event_number_in_session IS NULL
|
||||
-- AND top_n.next_type IS NOT NULL
|
||||
-- GROUP BY event_number_in_session, event_type, e_value
|
||||
-- UNION ALL
|
||||
-- -- Others got to Top
|
||||
-- SELECT others_n.event_number_in_session,
|
||||
-- 'OTHER' AS event_type,
|
||||
-- NULL AS e_value,
|
||||
-- others_n.s_next_type AS next_type,
|
||||
-- others_n.s_next_value AS next_value,
|
||||
-- SUM(sessions_count) AS sessions_count
|
||||
-- FROM others_n
|
||||
-- INNER JOIN top_n ON (others_n.event_number_in_session = top_n.event_number_in_session + 1 AND
|
||||
-- others_n.s_next_type = top_n.event_type AND
|
||||
-- others_n.s_next_value = top_n.event_type)
|
||||
-- GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||
-- UNION ALL
|
||||
-- -- TODO: find if this works or not
|
||||
-- -- Others got to Others
|
||||
-- SELECT others_n.event_number_in_session,
|
||||
-- 'OTHER' AS event_type,
|
||||
-- NULL AS e_value,
|
||||
-- 'OTHERS' AS next_type,
|
||||
-- NULL AS next_value,
|
||||
-- SUM(sessions_count) AS sessions_count
|
||||
-- FROM others_n
|
||||
-- LEFT JOIN u_top_n ON ((others_n.event_number_in_session + 1) = u_top_n.event_number_in_session
|
||||
-- AND others_n.s_next_type = u_top_n.event_type
|
||||
-- AND others_n.s_next_value = u_top_n.e_value)
|
||||
-- WHERE u_top_n.event_number_in_session IS NULL
|
||||
-- GROUP BY others_n.event_number_in_session
|
||||
)
|
||||
ORDER BY event_number_in_session;
|
||||
|
||||
|
||||
-- ---------Q3 TOP ON VALUE ONLY -----------
|
||||
WITH ranked_events AS (SELECT *
|
||||
FROM ranked_events_1736344377403),
|
||||
n1 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 1
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n2 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 2
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
n3 AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
next_type,
|
||||
next_value,
|
||||
COUNT(1) AS sessions_count
|
||||
FROM ranked_events
|
||||
WHERE event_number_in_session = 3
|
||||
GROUP BY event_number_in_session, event_type, e_value, next_type, next_value
|
||||
ORDER BY sessions_count DESC),
|
||||
|
||||
drop_n AS (-- STEP 1
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n1
|
||||
WHERE isNull(n1.next_type)
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
sessions_count
|
||||
FROM n2
|
||||
WHERE isNull(n2.next_type)),
|
||||
top_n AS (SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n1
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
LIMIT 1
|
||||
UNION ALL
|
||||
-- STEP 2
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n2
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3
|
||||
UNION ALL
|
||||
-- STEP 3
|
||||
SELECT event_number_in_session,
|
||||
event_type,
|
||||
e_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM n3
|
||||
GROUP BY event_number_in_session, event_type, e_value
|
||||
ORDER BY sessions_count DESC
|
||||
LIMIT 3),
|
||||
top_n_with_next AS (SELECT n1.*
|
||||
FROM n1
|
||||
UNION ALL
|
||||
SELECT n2.*
|
||||
FROM n2
|
||||
INNER JOIN top_n ON (n2.event_number_in_session = top_n.event_number_in_session
|
||||
AND n2.event_type = top_n.event_type
|
||||
AND n2.e_value = top_n.e_value)),
|
||||
others_n AS (
|
||||
-- STEP 2
|
||||
SELECT n2.*
|
||||
FROM n2
|
||||
WHERE (n2.event_number_in_session, n2.event_type, n2.e_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value
|
||||
FROM top_n
|
||||
WHERE top_n.event_number_in_session = 2)
|
||||
UNION ALL
|
||||
-- STEP 3
|
||||
SELECT n3.*
|
||||
FROM n3
|
||||
WHERE (n3.event_number_in_session, n3.event_type, n3.e_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value
|
||||
FROM top_n
|
||||
WHERE top_n.event_number_in_session = 3))
|
||||
SELECT *
|
||||
FROM (
|
||||
-- SELECT sum(top_n_with_next.sessions_count)
|
||||
-- FROM top_n_with_next
|
||||
-- WHERE event_number_in_session = 1
|
||||
-- -- AND isNotNull(next_value)
|
||||
-- AND (next_type, next_value) IN
|
||||
-- (SELECT others_n.event_type, others_n.e_value FROM others_n WHERE others_n.event_number_in_session = 2)
|
||||
-- -- SELECT * FROM others_n
|
||||
-- -- SELECT * FROM n2
|
||||
-- SELECT *
|
||||
-- FROM top_n
|
||||
-- );
|
||||
-- Top to Top: valid
|
||||
SELECT top_n_with_next.*
|
||||
FROM top_n_with_next
|
||||
INNER JOIN top_n
|
||||
ON (top_n_with_next.event_number_in_session + 1 = top_n.event_number_in_session
|
||||
AND top_n_with_next.next_type = top_n.event_type
|
||||
AND top_n_with_next.next_value = top_n.e_value)
|
||||
UNION ALL
|
||||
-- Top to Others: valid
|
||||
SELECT top_n_with_next.event_number_in_session,
|
||||
top_n_with_next.event_type,
|
||||
top_n_with_next.e_value,
|
||||
'OTHER' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(top_n_with_next.sessions_count) AS sessions_count
|
||||
FROM top_n_with_next
|
||||
WHERE (top_n_with_next.event_number_in_session + 1, top_n_with_next.next_type, top_n_with_next.next_value) IN
|
||||
(SELECT others_n.event_number_in_session, others_n.event_type, others_n.e_value FROM others_n)
|
||||
GROUP BY top_n_with_next.event_number_in_session, top_n_with_next.event_type, top_n_with_next.e_value
|
||||
UNION ALL
|
||||
-- Top go to Drop: valid
|
||||
SELECT drop_n.event_number_in_session,
|
||||
drop_n.event_type,
|
||||
drop_n.e_value,
|
||||
drop_n.next_type,
|
||||
drop_n.next_value,
|
||||
drop_n.sessions_count
|
||||
FROM drop_n
|
||||
INNER JOIN top_n ON (drop_n.event_number_in_session = top_n.event_number_in_session
|
||||
AND drop_n.event_type = top_n.event_type
|
||||
AND drop_n.e_value = top_n.e_value)
|
||||
ORDER BY drop_n.event_number_in_session
|
||||
UNION ALL
|
||||
-- Others got to Drop: valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
'DROP' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(others_n.sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNull(others_n.next_type)
|
||||
AND others_n.event_number_in_session < 3
|
||||
GROUP BY others_n.event_number_in_session, next_type, next_value
|
||||
UNION ALL
|
||||
-- Others got to Top:valid
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
others_n.next_type,
|
||||
others_n.next_value,
|
||||
SUM(others_n.sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) IN
|
||||
(SELECT top_n.event_number_in_session, top_n.event_type, top_n.e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session, others_n.next_type, others_n.next_value
|
||||
UNION ALL
|
||||
-- Others got to Others
|
||||
SELECT others_n.event_number_in_session,
|
||||
'OTHER' AS event_type,
|
||||
NULL AS e_value,
|
||||
'OTHERS' AS next_type,
|
||||
NULL AS next_value,
|
||||
SUM(sessions_count) AS sessions_count
|
||||
FROM others_n
|
||||
WHERE isNotNull(others_n.next_type)
|
||||
AND others_n.event_number_in_session < 3
|
||||
AND (others_n.event_number_in_session + 1, others_n.next_type, others_n.next_value) NOT IN
|
||||
(SELECT event_number_in_session, event_type, e_value FROM top_n)
|
||||
GROUP BY others_n.event_number_in_session)
|
||||
ORDER BY event_number_in_session, sessions_count
|
||||
DESC;
|
||||
|
||||
|
||||
|
|
@ -7,27 +7,30 @@ from fastapi import HTTPException, status
|
|||
from starlette.responses import RedirectResponse, FileResponse, JSONResponse, Response
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import scope
|
||||
from chalicelib.core import assist, signup, feature_flags
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.errors import errors, errors_details
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
|
||||
sessions_assignments, unprocessed_sessions, sessions_search
|
||||
from chalicelib.core import scope
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaborations.collaboration_slack import Slack
|
||||
from chalicelib.core.errors import errors, errors_details
|
||||
from chalicelib.core.metrics import heatmaps
|
||||
from chalicelib.core.sessions import sessions, sessions_notes, sessions_replay, sessions_favorite, sessions_viewed, \
|
||||
sessions_assignments, unprocessed_sessions, sessions_search
|
||||
from chalicelib.utils import captcha, smtp
|
||||
from chalicelib.utils import contextual_validators
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from or_dependencies import OR_context, OR_role
|
||||
from routers.base import get_routers
|
||||
from routers.subs import spot
|
||||
from chalicelib.utils import contextual_validators
|
||||
|
||||
logger = logging.getLogger(__name__)
|
||||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
COOKIE_PATH = "/api/refresh"
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
COOKIE_PATH = "/refresh"
|
||||
else:
|
||||
COOKIE_PATH = "/api/refresh"
|
||||
|
||||
|
||||
@public_app.get('/signup', tags=['signup'])
|
||||
|
|
|
|||
|
|
@ -9,172 +9,244 @@ from routers.base import get_routers
|
|||
public_app, app, app_apikey = get_routers()
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards', tags=["dashboard"])
|
||||
@app.post("/{projectId}/dashboards", tags=["dashboard"])
|
||||
def create_dashboards(projectId: int, data: schemas.CreateDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.create_dashboard(project_id=projectId, user_id=context.user_id, data=data)
|
||||
return dashboards.create_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, data=data
|
||||
)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards", tags=["dashboard"])
|
||||
def get_dashboards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)}
|
||||
return {
|
||||
"data": dashboards.get_dashboards(project_id=projectId, user_id=context.user_id)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def get_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = dashboards.get_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
data = dashboards.get_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["dashboard not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.put("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def update_dashboard(projectId: int, dashboardId: int, data: schemas.EditDashboardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.update_dashboard(project_id=projectId, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
return {
|
||||
"data": dashboards.update_dashboard(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}', tags=["dashboard"])
|
||||
@app.delete("/{projectId}/dashboards/{dashboardId}", tags=["dashboard"])
|
||||
def delete_dashboard(projectId: int, dashboardId: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.delete_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)
|
||||
return dashboards.delete_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
|
||||
|
||||
@app.get('/{projectId}/dashboards/{dashboardId}/pin', tags=["dashboard"])
|
||||
@app.get("/{projectId}/dashboards/{dashboardId}/pin", tags=["dashboard"])
|
||||
def pin_dashboard(projectId: int, dashboardId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.pin_dashboard(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId)}
|
||||
return {
|
||||
"data": dashboards.pin_dashboard(
|
||||
project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/cards', tags=["cards"])
|
||||
def add_card_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||
@app.post("/{projectId}/dashboards/{dashboardId}/cards", tags=["cards"])
|
||||
def add_card_to_dashboard(projectId: int, dashboardId: int, data: schemas.AddWidgetToDashboardPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.add_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
data=data)}
|
||||
return {
|
||||
"data": dashboards.add_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
@app.post("/{projectId}/dashboards/{dashboardId}/metrics", tags=["dashboard"])
|
||||
# @app.put('/{projectId}/dashboards/{dashboardId}/metrics', tags=["dashboard"])
|
||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int,
|
||||
data: schemas.CardSchema = Body(...),
|
||||
def create_metric_and_add_to_dashboard(projectId: int, dashboardId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": dashboards.create_metric_add_widget(project=context.project, user_id=context.user_id,
|
||||
dashboard_id=dashboardId, data=data)}
|
||||
return {
|
||||
"data": dashboards.create_metric_add_widget(
|
||||
project=context.project,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
data=data,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.put('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
@app.put("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
||||
def update_widget_in_dashboard(projectId: int, dashboardId: int, widgetId: int,
|
||||
data: schemas.UpdateWidgetPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.update_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId, data=data)
|
||||
return dashboards.update_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
widget_id=widgetId,
|
||||
data=data,
|
||||
)
|
||||
|
||||
|
||||
@app.delete('/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}', tags=["dashboard"])
|
||||
@app.delete("/{projectId}/dashboards/{dashboardId}/widgets/{widgetId}", tags=["dashboard"])
|
||||
def remove_widget_from_dashboard(projectId: int, dashboardId: int, widgetId: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return dashboards.remove_widget(project_id=projectId, user_id=context.user_id, dashboard_id=dashboardId,
|
||||
widget_id=widgetId)
|
||||
return dashboards.remove_widget(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
dashboard_id=dashboardId,
|
||||
widget_id=widgetId,
|
||||
)
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try", tags=["cards"])
|
||||
def try_card(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_chart(project=context.project, data=data, user_id=context.user_id)}
|
||||
return {
|
||||
"data": custom_metrics.get_chart(
|
||||
project=context.project, data=data, user_id=context.user_id
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try/sessions', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try/sessions", tags=["cards"])
|
||||
def try_card_sessions(projectId: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions(project=context.project, user_id=context.user_id, data=data)
|
||||
data = custom_metrics.get_sessions(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/try/issues', tags=["cards"])
|
||||
@app.post("/{projectId}/cards/try/issues", tags=["cards"])
|
||||
def try_card_issues(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_issues(project=context.project, user_id=context.user_id, data=data)}
|
||||
return {
|
||||
"data": custom_metrics.get_issues(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/cards', tags=["cards"])
|
||||
@app.get("/{projectId}/cards", tags=["cards"])
|
||||
def get_cards(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)}
|
||||
return {
|
||||
"data": custom_metrics.get_all(project_id=projectId, user_id=context.user_id)
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards', tags=["cards"])
|
||||
@app.post("/{projectId}/cards", tags=["cards"])
|
||||
def create_card(projectId: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return custom_metrics.create_card(project=context.project, user_id=context.user_id, data=data)
|
||||
return custom_metrics.create_card(
|
||||
project=context.project, user_id=context.user_id, data=data
|
||||
)
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/search', tags=["cards"])
|
||||
def search_cards(projectId: int, data: schemas.SearchCardsSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/search", tags=["cards"])
|
||||
def search_cards(projectId: int, data: schemas.MetricSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.search_all(project_id=projectId, user_id=context.user_id, data=data)}
|
||||
return {
|
||||
"data": custom_metrics.search_metrics(
|
||||
project_id=projectId, user_id=context.user_id, data=data
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/cards/{metric_id}', tags=["cards"])
|
||||
@app.get("/{projectId}/cards/{metric_id}", tags=["cards"])
|
||||
def get_card(projectId: int, metric_id: Union[int, str], context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if metric_id.isnumeric():
|
||||
metric_id = int(metric_id)
|
||||
else:
|
||||
return {"errors": ["invalid card_id"]}
|
||||
data = custom_metrics.get_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
|
||||
data = custom_metrics.get_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["card not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/sessions', tags=["cards"])
|
||||
def get_card_sessions(projectId: int, metric_id: int,
|
||||
data: schemas.CardSessionsSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/{metric_id}/sessions", tags=["cards"])
|
||||
def get_card_sessions(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_sessions_by_card_id(project=context.project, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
data = custom_metrics.get_sessions_by_card_id(
|
||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/issues/{issueId}/sessions', tags=["dashboard"])
|
||||
@app.post("/{projectId}/cards/{metric_id}/issues/{issueId}/sessions", tags=["dashboard"])
|
||||
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
|
||||
data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.get_funnel_sessions_by_issue(project_id=projectId, user_id=context.user_id,
|
||||
metric_id=metric_id, issue_id=issueId, data=data)
|
||||
data = custom_metrics.get_funnel_sessions_by_issue(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
metric_id=metric_id,
|
||||
issue_id=issueId,
|
||||
data=data,
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/chart', tags=["card"])
|
||||
@app.post("/{projectId}/cards/{metric_id}/chart", tags=["card"])
|
||||
def get_card_chart(projectId: int, metric_id: int, data: schemas.CardSessionsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.make_chart_from_card(project=context.project, user_id=context.user_id, metric_id=metric_id,
|
||||
data=data)
|
||||
data = custom_metrics.make_chart_from_card(
|
||||
project=context.project, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}', tags=["dashboard"])
|
||||
@app.post("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||
def update_card(projectId: int, metric_id: int, data: schemas.CardSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = custom_metrics.update_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data)
|
||||
data = custom_metrics.update_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id, data=data
|
||||
)
|
||||
if data is None:
|
||||
return {"errors": ["custom metric not found"]}
|
||||
return {"data": data}
|
||||
|
||||
|
||||
@app.post('/{projectId}/cards/{metric_id}/status', tags=["dashboard"])
|
||||
def update_card_state(projectId: int, metric_id: int,
|
||||
data: schemas.UpdateCardStatusSchema = Body(...),
|
||||
@app.post("/{projectId}/cards/{metric_id}/status", tags=["dashboard"])
|
||||
def update_card_state(projectId: int, metric_id: int, data: schemas.UpdateCardStatusSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.change_state(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
|
||||
status=data.active)}
|
||||
"data": custom_metrics.change_state(
|
||||
project_id=projectId,
|
||||
user_id=context.user_id,
|
||||
metric_id=metric_id,
|
||||
status=data.active,
|
||||
)
|
||||
}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/cards/{metric_id}', tags=["dashboard"])
|
||||
def delete_card(projectId: int, metric_id: int, _=Body(None),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": custom_metrics.delete_card(project_id=projectId, user_id=context.user_id, metric_id=metric_id)}
|
||||
@app.delete("/{projectId}/cards/{metric_id}", tags=["dashboard"])
|
||||
def delete_card(projectId: int, metric_id: int, _=Body(None), context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": custom_metrics.delete_card(
|
||||
project_id=projectId, user_id=context.user_id, metric_id=metric_id
|
||||
)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,3 +1,4 @@
|
|||
from decouple import config
|
||||
from fastapi import Depends
|
||||
from starlette.responses import JSONResponse, Response
|
||||
|
||||
|
|
@ -8,7 +9,10 @@ from routers.base import get_routers
|
|||
|
||||
public_app, app, app_apikey = get_routers(prefix="/spot", tags=["spot"])
|
||||
|
||||
COOKIE_PATH = "/api/spot/refresh"
|
||||
if config("LOCAL_DEV", cast=bool, default=False):
|
||||
COOKIE_PATH = "/spot/refresh"
|
||||
else:
|
||||
COOKIE_PATH = "/api/spot/refresh"
|
||||
|
||||
|
||||
@app.get('/logout')
|
||||
|
|
|
|||
|
|
@ -1,7 +1,8 @@
|
|||
from fastapi import Depends, Body
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions, events, jobs, projects
|
||||
from chalicelib.core import events, jobs, projects
|
||||
from chalicelib.core.sessions import sessions
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
|
|
|
|||
|
|
@ -960,36 +960,6 @@ class CardSessionsSchema(_TimedSchema, _PaginatedSchema):
|
|||
|
||||
return self
|
||||
|
||||
# We don't need this as the UI is expecting filters to override the full series' filters
|
||||
# @model_validator(mode="after")
|
||||
# def __merge_out_filters_with_series(self):
|
||||
# for f in self.filters:
|
||||
# for s in self.series:
|
||||
# found = False
|
||||
#
|
||||
# if f.is_event:
|
||||
# sub = s.filter.events
|
||||
# else:
|
||||
# sub = s.filter.filters
|
||||
#
|
||||
# for e in sub:
|
||||
# if f.type == e.type and f.operator == e.operator:
|
||||
# found = True
|
||||
# if f.is_event:
|
||||
# # If extra event: append value
|
||||
# for v in f.value:
|
||||
# if v not in e.value:
|
||||
# e.value.append(v)
|
||||
# else:
|
||||
# # If extra filter: override value
|
||||
# e.value = f.value
|
||||
# if not found:
|
||||
# sub.append(f)
|
||||
#
|
||||
# self.filters = []
|
||||
#
|
||||
# return self
|
||||
|
||||
# UI is expecting filters to override the full series' filters
|
||||
@model_validator(mode="after")
|
||||
def __override_series_filters_with_outer_filters(self):
|
||||
|
|
@ -1060,6 +1030,16 @@ class CardTable(__CardSchema):
|
|||
values["metricValue"] = []
|
||||
return values
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __enforce_AND_operator(self):
|
||||
self.metric_of = MetricOfTable(self.metric_of)
|
||||
if self.metric_of in (MetricOfTable.VISITED_URL, MetricOfTable.FETCH, \
|
||||
MetricOfTable.VISITED_URL.value, MetricOfTable.FETCH.value):
|
||||
for s in self.series:
|
||||
if s.filter is not None:
|
||||
s.filter.events_order = SearchEventOrder.AND
|
||||
return self
|
||||
|
||||
@model_validator(mode="after")
|
||||
def __transform(self):
|
||||
self.metric_of = MetricOfTable(self.metric_of)
|
||||
|
|
@ -1135,7 +1115,7 @@ class CardPathAnalysis(__CardSchema):
|
|||
view_type: MetricOtherViewType = Field(...)
|
||||
metric_value: List[ProductAnalyticsSelectedEventType] = Field(default_factory=list)
|
||||
density: int = Field(default=4, ge=2, le=10)
|
||||
rows: int = Field(default=3, ge=1, le=10)
|
||||
rows: int = Field(default=5, ge=1, le=10)
|
||||
|
||||
start_type: Literal["start", "end"] = Field(default="start")
|
||||
start_point: List[PathAnalysisSubFilterSchema] = Field(default_factory=list)
|
||||
|
|
@ -1368,6 +1348,42 @@ class SearchCardsSchema(_PaginatedSchema):
|
|||
query: Optional[str] = Field(default=None)
|
||||
|
||||
|
||||
class MetricSortColumnType(str, Enum):
|
||||
NAME = "name"
|
||||
METRIC_TYPE = "metric_type"
|
||||
METRIC_OF = "metric_of"
|
||||
IS_PUBLIC = "is_public"
|
||||
CREATED_AT = "created_at"
|
||||
EDITED_AT = "edited_at"
|
||||
|
||||
|
||||
class MetricFilterColumnType(str, Enum):
|
||||
NAME = "name"
|
||||
METRIC_TYPE = "metric_type"
|
||||
METRIC_OF = "metric_of"
|
||||
IS_PUBLIC = "is_public"
|
||||
USER_ID = "user_id"
|
||||
CREATED_AT = "created_at"
|
||||
EDITED_AT = "edited_at"
|
||||
|
||||
|
||||
class MetricListSort(BaseModel):
|
||||
field: Optional[str] = Field(default=None)
|
||||
order: Optional[str] = Field(default=SortOrderType.DESC)
|
||||
|
||||
|
||||
class MetricFilter(BaseModel):
|
||||
type: Optional[str] = Field(default=None)
|
||||
query: Optional[str] = Field(default=None)
|
||||
|
||||
|
||||
class MetricSearchSchema(_PaginatedSchema):
|
||||
filter: Optional[MetricFilter] = Field(default=None)
|
||||
sort: Optional[MetricListSort] = Field(default=MetricListSort())
|
||||
shared_only: bool = Field(default=False)
|
||||
mine_only: bool = Field(default=False)
|
||||
|
||||
|
||||
class _HeatMapSearchEventRaw(SessionSearchEventSchema2):
|
||||
type: Literal[EventType.LOCATION] = Field(...)
|
||||
|
||||
|
|
|
|||
|
|
@ -10,7 +10,8 @@ const EVENTS_DEFINITION = {
|
|||
UPDATE_EVENT: "UPDATE_SESSION", // tab become active/inactive, page title change, changed session object (rare case), call start/end
|
||||
CONNECT_ERROR: "connect_error",
|
||||
CONNECT_FAILED: "connect_failed",
|
||||
ERROR: "error"
|
||||
ERROR: "error",
|
||||
WEBRTC_AGENT_CALL: "WEBRTC_AGENT_CALL",
|
||||
},
|
||||
//The following list of events will be only emitted by the server
|
||||
server: {
|
||||
|
|
@ -18,14 +19,16 @@ const EVENTS_DEFINITION = {
|
|||
}
|
||||
};
|
||||
EVENTS_DEFINITION.emit = {
|
||||
NEW_AGENT: "NEW_AGENT",
|
||||
NO_AGENTS: "NO_AGENT",
|
||||
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
|
||||
AGENTS_CONNECTED: "AGENTS_CONNECTED",
|
||||
NO_SESSIONS: "SESSION_DISCONNECTED",
|
||||
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
|
||||
SESSION_RECONNECTED: "SESSION_RECONNECTED",
|
||||
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT
|
||||
NEW_AGENT: "NEW_AGENT",
|
||||
NO_AGENTS: "NO_AGENT",
|
||||
AGENT_DISCONNECT: "AGENT_DISCONNECTED",
|
||||
AGENTS_CONNECTED: "AGENTS_CONNECTED",
|
||||
AGENTS_INFO_CONNECTED: "AGENTS_INFO_CONNECTED",
|
||||
NO_SESSIONS: "SESSION_DISCONNECTED",
|
||||
SESSION_ALREADY_CONNECTED: "SESSION_ALREADY_CONNECTED",
|
||||
SESSION_RECONNECTED: "SESSION_RECONNECTED",
|
||||
UPDATE_EVENT: EVENTS_DEFINITION.listen.UPDATE_EVENT,
|
||||
WEBRTC_CONFIG: "WEBRTC_CONFIG",
|
||||
};
|
||||
|
||||
const BASE_sessionInfo = {
|
||||
|
|
|
|||
|
|
@ -27,9 +27,14 @@ const respond = function (req, res, data) {
|
|||
res.setHeader('Content-Type', 'application/json');
|
||||
res.end(JSON.stringify(result));
|
||||
} else {
|
||||
res.cork(() => {
|
||||
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
|
||||
});
|
||||
if (!res.aborted) {
|
||||
res.cork(() => {
|
||||
res.writeStatus('200 OK').writeHeader('Content-Type', 'application/json').end(JSON.stringify(result));
|
||||
});
|
||||
} else {
|
||||
logger.debug("response aborted");
|
||||
return;
|
||||
}
|
||||
}
|
||||
const duration = performance.now() - req.startTs;
|
||||
IncreaseTotalRequests();
|
||||
|
|
|
|||
|
|
@ -42,7 +42,7 @@ const findSessionSocketId = async (io, roomId, tabId) => {
|
|||
};
|
||||
|
||||
async function getRoomData(io, roomID) {
|
||||
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [];
|
||||
let tabsCount = 0, agentsCount = 0, tabIDs = [], agentIDs = [], config = null, agentInfos = [];
|
||||
const connected_sockets = await io.in(roomID).fetchSockets();
|
||||
if (connected_sockets.length > 0) {
|
||||
for (let socket of connected_sockets) {
|
||||
|
|
@ -52,13 +52,19 @@ async function getRoomData(io, roomID) {
|
|||
} else {
|
||||
agentsCount++;
|
||||
agentIDs.push(socket.id);
|
||||
agentInfos.push({ ...socket.handshake.query.agentInfo, socketId: socket.id });
|
||||
if (socket.handshake.query.config !== undefined) {
|
||||
config = socket.handshake.query.config;
|
||||
}
|
||||
}
|
||||
}
|
||||
} else {
|
||||
tabsCount = -1;
|
||||
agentsCount = -1;
|
||||
agentInfos = [];
|
||||
agentIDs = [];
|
||||
}
|
||||
return {tabsCount, agentsCount, tabIDs, agentIDs};
|
||||
return {tabsCount, agentsCount, tabIDs, agentIDs, config, agentInfos};
|
||||
}
|
||||
|
||||
function processNewSocket(socket) {
|
||||
|
|
@ -78,7 +84,7 @@ async function onConnect(socket) {
|
|||
IncreaseOnlineConnections(socket.handshake.query.identity);
|
||||
|
||||
const io = getServer();
|
||||
const {tabsCount, agentsCount, tabIDs, agentIDs} = await getRoomData(io, socket.handshake.query.roomId);
|
||||
const {tabsCount, agentsCount, tabIDs, agentInfos, agentIDs, config} = await getRoomData(io, socket.handshake.query.roomId);
|
||||
|
||||
if (socket.handshake.query.identity === IDENTITIES.session) {
|
||||
// Check if session with the same tabID already connected, if so, refuse new connexion
|
||||
|
|
@ -100,7 +106,9 @@ async function onConnect(socket) {
|
|||
// Inform all connected agents about reconnected session
|
||||
if (agentsCount > 0) {
|
||||
logger.debug(`notifying new session about agent-existence`);
|
||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, config);
|
||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_CONNECTED, agentIDs);
|
||||
io.to(socket.id).emit(EVENTS_DEFINITION.emit.AGENTS_INFO_CONNECTED, agentInfos);
|
||||
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.SESSION_RECONNECTED, socket.id);
|
||||
}
|
||||
} else if (tabsCount <= 0) {
|
||||
|
|
@ -118,7 +126,8 @@ async function onConnect(socket) {
|
|||
// Stats
|
||||
startAssist(socket, socket.handshake.query.agentID);
|
||||
}
|
||||
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, socket.handshake.query.agentInfo);
|
||||
io.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.WEBRTC_CONFIG, socket.handshake.query.config);
|
||||
socket.to(socket.handshake.query.roomId).emit(EVENTS_DEFINITION.emit.NEW_AGENT, socket.id, { ...socket.handshake.query.agentInfo });
|
||||
}
|
||||
|
||||
// Set disconnect handler
|
||||
|
|
@ -127,6 +136,9 @@ async function onConnect(socket) {
|
|||
// Handle update event
|
||||
socket.on(EVENTS_DEFINITION.listen.UPDATE_EVENT, (...args) => onUpdateEvent(socket, ...args));
|
||||
|
||||
// Handle webrtc events
|
||||
socket.on(EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, (...args) => onWebrtcAgentHandler(socket, ...args));
|
||||
|
||||
// Handle errors
|
||||
socket.on(EVENTS_DEFINITION.listen.ERROR, err => errorHandler(EVENTS_DEFINITION.listen.ERROR, err));
|
||||
socket.on(EVENTS_DEFINITION.listen.CONNECT_ERROR, err => errorHandler(EVENTS_DEFINITION.listen.CONNECT_ERROR, err));
|
||||
|
|
@ -186,6 +198,16 @@ async function onUpdateEvent(socket, ...args) {
|
|||
}
|
||||
}
|
||||
|
||||
async function onWebrtcAgentHandler(socket, ...args) {
|
||||
if (socket.handshake.query.identity === IDENTITIES.agent) {
|
||||
const agentIdToConnect = args[0]?.data?.toAgentId;
|
||||
logger.debug(`${socket.id} sent webrtc event to agent:${agentIdToConnect}`);
|
||||
if (agentIdToConnect && socket.handshake.sessionData.AGENTS_CONNECTED.includes(agentIdToConnect)) {
|
||||
socket.to(agentIdToConnect).emit(EVENTS_DEFINITION.listen.WEBRTC_AGENT_CALL, args[0]);
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
async function onAny(socket, eventName, ...args) {
|
||||
if (Object.values(EVENTS_DEFINITION.listen).indexOf(eventName) >= 0) {
|
||||
logger.debug(`received event:${eventName}, should be handled by another listener, stopping onAny.`);
|
||||
|
|
|
|||
|
|
@ -114,9 +114,9 @@ ENV TZ=UTC \
|
|||
RUN if [ "$SERVICE_NAME" = "http" ]; then \
|
||||
wget https://raw.githubusercontent.com/ua-parser/uap-core/master/regexes.yaml -O "$UAPARSER_FILE" &&\
|
||||
wget https://static.openreplay.com/geoip/GeoLite2-City.mmdb -O "$MAXMINDDB_FILE"; \
|
||||
elif [ "$SERVICE_NAME" = "imagestorage" ]; then \
|
||||
elif [ "$SERVICE_NAME" = "images" ]; then \
|
||||
apk add --no-cache zstd; \
|
||||
elif [ "$SERVICE_NAME" = "canvas-handler" ]; then \
|
||||
elif [ "$SERVICE_NAME" = "canvases" ]; then \
|
||||
apk add --no-cache zstd; \
|
||||
elif [ "$SERVICE_NAME" = "spot" ]; then \
|
||||
apk add --no-cache ffmpeg; \
|
||||
|
|
|
|||
|
|
@ -8,8 +8,7 @@ import (
|
|||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
analyticsMetrics "openreplay/backend/pkg/metrics/analytics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/server"
|
||||
"openreplay/backend/pkg/server/api"
|
||||
|
|
@ -19,16 +18,18 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := analyticsConfig.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("analytics")
|
||||
metrics.New(log, append(webMetrics.List(), append(analyticsMetrics.List(), databaseMetrics.List()...)...))
|
||||
dbMetrics := database.New("analytics")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetrics.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetrics, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||
builder, err := analytics.NewServiceBuilder(log, cfg, webMetrics, dbMetrics, pgConn)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -22,13 +22,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, assetsMetrics.List())
|
||||
// Observability
|
||||
assetMetrics := assetsMetrics.New("assets")
|
||||
metrics.New(log, assetMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
cacher, err := cacher.NewCacher(cfg, objStore)
|
||||
cacher, err := cacher.NewCacher(cfg, objStore, assetMetrics)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init cacher: %s", err)
|
||||
}
|
||||
|
|
@ -37,7 +39,7 @@ func main() {
|
|||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
assetsMetrics.IncreaseProcessesSessions()
|
||||
assetMetrics.IncreaseProcessesSessions()
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
if err != nil {
|
||||
|
|
|
|||
|
|
@ -8,12 +8,12 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/canvas-handler"
|
||||
config "openreplay/backend/internal/config/canvas-handler"
|
||||
"openreplay/backend/internal/canvases"
|
||||
config "openreplay/backend/internal/config/canvases"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
|
||||
canvasesMetrics "openreplay/backend/pkg/metrics/canvas"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
|
@ -22,22 +22,28 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
// Observability
|
||||
canvasMetrics := canvasesMetrics.New("canvases")
|
||||
metrics.New(log, canvasMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
|
||||
srv, err := canvas_handler.New(cfg, log, objStore)
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
srv, err := canvases.New(cfg, log, objStore, producer, canvasMetrics)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init canvas service: %s", err)
|
||||
log.Fatal(ctx, "can't init canvases service: %s", err)
|
||||
}
|
||||
|
||||
canvasConsumer := queue.NewConsumer(
|
||||
cfg.GroupCanvasImage,
|
||||
[]string{
|
||||
cfg.TopicCanvasImages,
|
||||
cfg.TopicCanvasTrigger,
|
||||
},
|
||||
messages.NewImagesMessageIterator(func(data []byte, sessID uint64) {
|
||||
isSessionEnd := func(data []byte) bool {
|
||||
|
|
@ -55,14 +61,34 @@ func main() {
|
|||
}
|
||||
return true
|
||||
}
|
||||
isTriggerEvent := func(data []byte) (string, string, bool) {
|
||||
reader := messages.NewBytesReader(data)
|
||||
msgType, err := reader.ReadUint()
|
||||
if err != nil {
|
||||
return "", "", false
|
||||
}
|
||||
if msgType != messages.MsgCustomEvent {
|
||||
return "", "", false
|
||||
}
|
||||
msg, err := messages.ReadMessage(msgType, reader)
|
||||
if err != nil {
|
||||
return "", "", false
|
||||
}
|
||||
customEvent := msg.(*messages.CustomEvent)
|
||||
return customEvent.Payload, customEvent.Name, true
|
||||
}
|
||||
sessCtx := context.WithValue(context.Background(), "sessionID", sessID)
|
||||
|
||||
if isSessionEnd(data) {
|
||||
if err := srv.PackSessionCanvases(sessCtx, sessID); err != nil {
|
||||
if err := srv.PrepareSessionCanvases(sessCtx, sessID); err != nil {
|
||||
if !strings.Contains(err.Error(), "no such file or directory") {
|
||||
log.Error(sessCtx, "can't pack session's canvases: %s", err)
|
||||
}
|
||||
}
|
||||
} else if path, name, ok := isTriggerEvent(data); ok {
|
||||
if err := srv.ProcessSessionCanvas(sessCtx, sessID, path, name); err != nil {
|
||||
log.Error(sessCtx, "can't process session's canvas: %s", err)
|
||||
}
|
||||
} else {
|
||||
if err := srv.SaveCanvasToDisk(sessCtx, sessID, data); err != nil {
|
||||
log.Error(sessCtx, "can't process canvas image: %s", err)
|
||||
|
|
@ -73,7 +99,7 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Info(ctx, "canvas handler service started")
|
||||
log.Info(ctx, "canvases service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -14,7 +14,7 @@ import (
|
|||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
|
|
@ -26,22 +26,24 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, databaseMetrics.List())
|
||||
// Observability
|
||||
dbMetric := database.New("db")
|
||||
metrics.New(log, dbMetric.List())
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
chConn := clickhouse.NewConnector(cfg.Clickhouse)
|
||||
chConn := clickhouse.NewConnector(cfg.Clickhouse, dbMetric)
|
||||
if err := chConn.Prepare(); err != nil {
|
||||
log.Fatal(ctx, "can't prepare clickhouse: %s", err)
|
||||
}
|
||||
defer chConn.Stop()
|
||||
|
||||
// Init db proxy module (postgres + clickhouse + batches)
|
||||
dbProxy := postgres.NewConn(log, pgConn, chConn)
|
||||
dbProxy := postgres.NewConn(log, pgConn, chConn, dbMetric)
|
||||
defer dbProxy.Close()
|
||||
|
||||
// Init redis connection
|
||||
|
|
@ -51,8 +53,8 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
||||
tagsManager := tags.New(log, pgConn)
|
||||
|
||||
// Init data saver
|
||||
|
|
@ -63,8 +65,8 @@ func main() {
|
|||
// Web messages
|
||||
messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgCustomEvent, messages.MsgCustomIssue,
|
||||
messages.MsgFetch, messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction, messages.MsgMouseClick,
|
||||
messages.MsgJSException, messages.MsgResourceTiming, messages.MsgCustomEvent, messages.MsgCustomIssue,
|
||||
messages.MsgNetworkRequest, messages.MsgGraphQL, messages.MsgStateAction, messages.MsgMouseClick,
|
||||
messages.MsgMouseClickDeprecated, messages.MsgSetPageLocation, messages.MsgSetPageLocationDeprecated,
|
||||
messages.MsgPageLoadTiming, messages.MsgPageRenderTiming,
|
||||
messages.MsgPageEvent, messages.MsgPageEventDeprecated, messages.MsgMouseThrashing, messages.MsgInputChange,
|
||||
|
|
|
|||
|
|
@ -19,7 +19,7 @@ import (
|
|||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
enderMetrics "openreplay/backend/pkg/metrics/ender"
|
||||
"openreplay/backend/pkg/projects"
|
||||
"openreplay/backend/pkg/queue"
|
||||
|
|
@ -31,9 +31,12 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := ender.New(log)
|
||||
metrics.New(log, append(enderMetrics.List(), databaseMetrics.List()...))
|
||||
// Observability
|
||||
dbMetric := database.New("ender")
|
||||
enderMetric := enderMetrics.New("ender")
|
||||
metrics.New(log, append(enderMetric.List(), dbMetric.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
|
|
@ -45,10 +48,10 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
projManager := projects.New(log, pgConn, redisClient)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient)
|
||||
projManager := projects.New(log, pgConn, redisClient, dbMetric)
|
||||
sessManager := sessions.New(log, pgConn, projManager, redisClient, dbMetric)
|
||||
|
||||
sessionEndGenerator, err := sessionender.New(intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessionEndGenerator, err := sessionender.New(enderMetric, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init ender service: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -23,7 +23,9 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, heuristicsMetrics.List())
|
||||
// Observability
|
||||
heuristicsMetric := heuristicsMetrics.New("heuristics")
|
||||
metrics.New(log, heuristicsMetric.List())
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
|
|
@ -62,7 +64,7 @@ func main() {
|
|||
}
|
||||
|
||||
// Run service and wait for TERM signal
|
||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager)
|
||||
service := heuristics.New(log, cfg, producer, consumer, eventBuilder, memoryManager, heuristicsMetric)
|
||||
log.Info(ctx, "Heuristics service started")
|
||||
terminator.Wait(log, service)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,7 +9,7 @@ import (
|
|||
"openreplay/backend/pkg/db/redis"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
databaseMetrics "openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/server"
|
||||
|
|
@ -20,13 +20,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := http.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("http")
|
||||
metrics.New(log, append(webMetrics.List(), databaseMetrics.List()...))
|
||||
dbMetric := database.New("http")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(15000)
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
|
|
@ -38,7 +40,7 @@ func main() {
|
|||
}
|
||||
defer redisClient.Close()
|
||||
|
||||
builder, err := services.New(log, cfg, webMetrics, producer, pgConn, redisClient)
|
||||
builder, err := services.New(log, cfg, webMetrics, dbMetric, producer, pgConn, redisClient)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "failed while creating services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,12 +9,12 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/imagestorage"
|
||||
"openreplay/backend/internal/screenshot-handler"
|
||||
config "openreplay/backend/internal/config/images"
|
||||
"openreplay/backend/internal/images"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/imagestorage"
|
||||
imagesMetrics "openreplay/backend/pkg/metrics/images"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
|
@ -23,16 +23,18 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
// Observability
|
||||
imageMetrics := imagesMetrics.New("images")
|
||||
metrics.New(log, imageMetrics.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
|
||||
srv, err := screenshot_handler.New(cfg, log, objStore)
|
||||
srv, err := images.New(cfg, log, objStore, imageMetrics)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||
log.Fatal(ctx, "can't init images service: %s", err)
|
||||
}
|
||||
|
||||
workDir := cfg.FSDir
|
||||
|
|
@ -74,7 +76,7 @@ func main() {
|
|||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
||||
log.Info(ctx, "Image storage service started")
|
||||
log.Info(ctx, "Images service started")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
|
@ -18,16 +18,18 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("integrations")
|
||||
metrics.New(log, append(webMetrics.List(), database.List()...))
|
||||
dbMetric := database.New("integrations")
|
||||
metrics.New(log, append(webMetrics.List(), dbMetric.List()...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||
builder, err := integrations.NewServiceBuilder(log, cfg, webMetrics, dbMetric, pgConn)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -9,14 +9,14 @@ import (
|
|||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
config "openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/internal/sink/assetscache"
|
||||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
|
@ -24,7 +24,9 @@ import (
|
|||
func main() {
|
||||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := sink.New(log)
|
||||
cfg := config.New(log)
|
||||
// Observability
|
||||
sinkMetrics := sink.New("sink")
|
||||
metrics.New(log, sinkMetrics.List())
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
|
|
@ -39,7 +41,7 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(ctx, "can't init rewriter: %s", err)
|
||||
}
|
||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer)
|
||||
assetMessageHandler := assetscache.New(log, cfg, rewriter, producer, sinkMetrics)
|
||||
counter := storage.NewLogCounter()
|
||||
|
||||
var (
|
||||
|
|
@ -98,7 +100,6 @@ func main() {
|
|||
// Process assets
|
||||
if msg.TypeID() == messages.MsgSetNodeAttributeURLBased ||
|
||||
msg.TypeID() == messages.MsgSetCSSDataURLBased ||
|
||||
msg.TypeID() == messages.MsgCSSInsertRuleURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSReplaceURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
|
|
@ -192,7 +193,7 @@ func main() {
|
|||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawMobile,
|
||||
},
|
||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false),
|
||||
messages.NewSinkMessageIterator(log, msgHandler, nil, false, sinkMetrics),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -19,16 +19,20 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := spotConfig.New(log)
|
||||
// Observability
|
||||
webMetrics := web.New("spot")
|
||||
metrics.New(log, append(webMetrics.List(), append(spotMetrics.List(), databaseMetrics.List()...)...))
|
||||
spotMetric := spotMetrics.New("spot")
|
||||
dbMetric := databaseMetrics.New("spot")
|
||||
metrics.New(log, append(webMetrics.List(), append(spotMetric.List(), dbMetric.List()...)...))
|
||||
|
||||
pgConn, err := pool.New(cfg.Postgres.String())
|
||||
pgConn, err := pool.New(dbMetric, cfg.Postgres.String())
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init postgres connection: %s", err)
|
||||
}
|
||||
defer pgConn.Close()
|
||||
|
||||
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, pgConn)
|
||||
prefix := api.NoPrefix
|
||||
builder, err := spot.NewServiceBuilder(log, cfg, webMetrics, spotMetric, dbMetric, pgConn, prefix)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init services: %s", err)
|
||||
}
|
||||
|
|
@ -37,7 +41,7 @@ func main() {
|
|||
if err != nil {
|
||||
log.Fatal(ctx, "failed while creating router: %s", err)
|
||||
}
|
||||
router.AddHandlers(api.NoPrefix, builder.SpotsAPI)
|
||||
router.AddHandlers(prefix, builder.SpotsAPI)
|
||||
router.AddMiddlewares(builder.Auth.Middleware, builder.RateLimiter.Middleware, builder.AuditTrail.Middleware)
|
||||
|
||||
server.Run(ctx, log, &cfg.HTTP, router)
|
||||
|
|
|
|||
|
|
@ -23,13 +23,15 @@ func main() {
|
|||
ctx := context.Background()
|
||||
log := logger.New()
|
||||
cfg := config.New(log)
|
||||
metrics.New(log, storageMetrics.List())
|
||||
// Observability
|
||||
storageMetric := storageMetrics.New("storage")
|
||||
metrics.New(log, storageMetric.List())
|
||||
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init object storage: %s", err)
|
||||
}
|
||||
srv, err := storage.New(cfg, log, objStore)
|
||||
srv, err := storage.New(cfg, log, objStore, storageMetric)
|
||||
if err != nil {
|
||||
log.Fatal(ctx, "can't init storage service: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -27,6 +27,7 @@ type cacher struct {
|
|||
objStorage objectstorage.ObjectStorage // AWS Docs: "These clients are safe to use concurrently."
|
||||
httpClient *http.Client // Docs: "Clients are safe for concurrent use by multiple goroutines."
|
||||
rewriter *assets.Rewriter // Read only
|
||||
metrics metrics.Assets
|
||||
Errors chan error
|
||||
sizeLimit int
|
||||
requestHeaders map[string]string
|
||||
|
|
@ -37,7 +38,7 @@ func (c *cacher) CanCache() bool {
|
|||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher, error) {
|
||||
func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage, metrics metrics.Assets) (*cacher, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, errors.New("config is nil")
|
||||
|
|
@ -93,6 +94,7 @@ func NewCacher(cfg *config.Config, store objectstorage.ObjectStorage) (*cacher,
|
|||
Errors: make(chan error),
|
||||
sizeLimit: cfg.AssetsSizeLimit,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
metrics: metrics,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c, nil
|
||||
|
|
@ -115,7 +117,7 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
c.metrics.RecordDownloadDuration(float64(time.Now().Sub(start).Milliseconds()), res.StatusCode)
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
printErr := true
|
||||
|
|
@ -162,12 +164,12 @@ func (c *cacher) cacheURL(t *Task) {
|
|||
start = time.Now()
|
||||
err = c.objStorage.Upload(strings.NewReader(strData), t.cachePath, contentType, contentEncoding, objectstorage.NoCompression)
|
||||
if err != nil {
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), true)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
metrics.IncreaseSavedSessions()
|
||||
c.metrics.RecordUploadDuration(float64(time.Now().Sub(start).Milliseconds()), false)
|
||||
c.metrics.IncreaseSavedSessions()
|
||||
|
||||
if isCSS {
|
||||
if t.depth > 0 {
|
||||
|
|
|
|||
|
|
@ -1,162 +0,0 @@
|
|||
package canvas_handler
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/canvas-handler"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
)
|
||||
|
||||
type ImageStorage struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
basePath string
|
||||
saverPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
objStorage objectstorage.ObjectStorage
|
||||
}
|
||||
|
||||
type saveTask struct {
|
||||
ctx context.Context
|
||||
sessionID uint64
|
||||
name string
|
||||
image *bytes.Buffer
|
||||
}
|
||||
|
||||
type uploadTask struct {
|
||||
ctx context.Context
|
||||
path string
|
||||
name string
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
}
|
||||
path := cfg.FSDir + "/"
|
||||
if cfg.CanvasDir != "" {
|
||||
path += cfg.CanvasDir + "/"
|
||||
}
|
||||
s := &ImageStorage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
basePath: path,
|
||||
objStorage: objStorage,
|
||||
}
|
||||
s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
|
||||
s.uploaderPool = pool.NewPool(4, 8, s.sendToS3)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) Wait() {
|
||||
v.saverPool.Pause()
|
||||
v.uploaderPool.Pause()
|
||||
}
|
||||
|
||||
func (v *ImageStorage) SaveCanvasToDisk(ctx context.Context, sessID uint64, data []byte) error {
|
||||
type canvasData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
var msg = &canvasData{}
|
||||
if err := json.Unmarshal(data, msg); err != nil {
|
||||
return fmt.Errorf("can't parse canvas message, err: %s", err)
|
||||
}
|
||||
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) writeToDisk(payload interface{}) {
|
||||
task := payload.(*saveTask)
|
||||
path := fmt.Sprintf("%s%d/", v.basePath, task.sessionID)
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't create a dir, err: %s", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
outFile, err := os.Create(path + task.name)
|
||||
if err != nil {
|
||||
v.log.Fatal(task.ctx, "can't create an image: %s", err)
|
||||
}
|
||||
if _, err := io.Copy(outFile, task.image); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't copy data to image: %s", err)
|
||||
}
|
||||
outFile.Close()
|
||||
|
||||
v.log.Info(task.ctx, "canvas image saved, name: %s, size: %3.3f mb", task.name, float64(task.image.Len())/1024.0/1024.0)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) PackSessionCanvases(ctx context.Context, sessID uint64) error {
|
||||
path := fmt.Sprintf("%s%d/", v.basePath, sessID)
|
||||
|
||||
// Check that the directory exists
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
names := make(map[string]bool)
|
||||
|
||||
// Build the list of canvas images sets
|
||||
for _, file := range files {
|
||||
name := strings.Split(file.Name(), ".")
|
||||
parts := strings.Split(name[0], "_")
|
||||
if len(name) != 2 || len(parts) != 3 {
|
||||
v.log.Warn(ctx, "unknown file name: %s, skipping", file.Name())
|
||||
continue
|
||||
}
|
||||
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
|
||||
names[canvasID] = true
|
||||
}
|
||||
|
||||
sessionID := strconv.FormatUint(sessID, 10)
|
||||
for name := range names {
|
||||
// Save to archives
|
||||
archPath := fmt.Sprintf("%s%s.tar.zst", path, name)
|
||||
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' | tar -cf - --files-from=- | zstd -o %s",
|
||||
path, name, archPath)
|
||||
cmd := exec.Command("sh", "-c", fullCmd)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
return fmt.Errorf("failed to execute command, err: %s, stderr: %v", err, stderr.String())
|
||||
}
|
||||
v.uploaderPool.Submit(&uploadTask{ctx: ctx, path: archPath, name: sessionID + "/" + name + ".tar.zst"})
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) sendToS3(payload interface{}) {
|
||||
task := payload.(*uploadTask)
|
||||
start := time.Now()
|
||||
video, err := os.ReadFile(task.path)
|
||||
if err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to read canvas archive: %s", err)
|
||||
}
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to upload canvas to storage: %s", err)
|
||||
}
|
||||
v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
return
|
||||
}
|
||||
224
backend/internal/canvases/service.go
Normal file
224
backend/internal/canvases/service.go
Normal file
|
|
@ -0,0 +1,224 @@
|
|||
package canvases
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"context"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"strings"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/canvases"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics/canvas"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
type ImageStorage struct {
|
||||
cfg *config.Config
|
||||
log logger.Logger
|
||||
basePath string
|
||||
saverPool pool.WorkerPool
|
||||
packerPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
objStorage objectstorage.ObjectStorage
|
||||
producer types.Producer
|
||||
metrics canvas.Canvas
|
||||
}
|
||||
|
||||
type saveTask struct {
|
||||
ctx context.Context
|
||||
sessionID uint64
|
||||
name string
|
||||
image *bytes.Buffer
|
||||
}
|
||||
|
||||
type packTask struct {
|
||||
ctx context.Context
|
||||
sessionID uint64
|
||||
path string
|
||||
name string
|
||||
}
|
||||
|
||||
type uploadTask struct {
|
||||
ctx context.Context
|
||||
path string
|
||||
name string
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, producer types.Producer, metrics canvas.Canvas) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case log == nil:
|
||||
return nil, fmt.Errorf("logger is empty")
|
||||
case objStorage == nil:
|
||||
return nil, fmt.Errorf("objectStorage is empty")
|
||||
case producer == nil:
|
||||
return nil, fmt.Errorf("producer is empty")
|
||||
case metrics == nil:
|
||||
return nil, fmt.Errorf("metrics is empty")
|
||||
}
|
||||
path := cfg.FSDir + "/"
|
||||
if cfg.CanvasDir != "" {
|
||||
path += cfg.CanvasDir + "/"
|
||||
}
|
||||
s := &ImageStorage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
basePath: path,
|
||||
objStorage: objStorage,
|
||||
producer: producer,
|
||||
metrics: metrics,
|
||||
}
|
||||
s.saverPool = pool.NewPool(2, 2, s.writeToDisk)
|
||||
s.packerPool = pool.NewPool(8, 16, s.packCanvas)
|
||||
s.uploaderPool = pool.NewPool(8, 16, s.sendToS3)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) Wait() {
|
||||
v.saverPool.Pause()
|
||||
v.uploaderPool.Pause()
|
||||
}
|
||||
|
||||
func (v *ImageStorage) SaveCanvasToDisk(ctx context.Context, sessID uint64, data []byte) error {
|
||||
type canvasData struct {
|
||||
Name string
|
||||
Data []byte
|
||||
}
|
||||
var msg = &canvasData{}
|
||||
if err := json.Unmarshal(data, msg); err != nil {
|
||||
return fmt.Errorf("can't parse canvas message, err: %s", err)
|
||||
}
|
||||
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, name: msg.Name, image: bytes.NewBuffer(msg.Data)})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) writeToDisk(payload interface{}) {
|
||||
task := payload.(*saveTask)
|
||||
path := fmt.Sprintf("%s%d/", v.basePath, task.sessionID)
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't create a dir, err: %s", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
outFile, err := os.Create(path + task.name)
|
||||
if err != nil {
|
||||
v.log.Fatal(task.ctx, "can't create an image: %s", err)
|
||||
}
|
||||
if _, err := io.Copy(outFile, task.image); err != nil {
|
||||
v.log.Fatal(task.ctx, "can't copy data to image: %s", err)
|
||||
}
|
||||
if outFile != nil {
|
||||
if err := outFile.Close(); err != nil {
|
||||
v.log.Warn(task.ctx, "can't close out file: %s", err)
|
||||
}
|
||||
}
|
||||
v.metrics.RecordCanvasImageSize(float64(task.image.Len()))
|
||||
v.metrics.IncreaseTotalSavedImages()
|
||||
|
||||
v.log.Debug(task.ctx, "canvas image saved, name: %s, size: %3.3f mb", task.name, float64(task.image.Len())/1024.0/1024.0)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) PrepareSessionCanvases(ctx context.Context, sessID uint64) error {
|
||||
start := time.Now()
|
||||
path := fmt.Sprintf("%s%d/", v.basePath, sessID)
|
||||
|
||||
// Check that the directory exists
|
||||
files, err := os.ReadDir(path)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
if len(files) == 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
// Build the list of canvas images sets
|
||||
names := make(map[string]int)
|
||||
for _, file := range files {
|
||||
if strings.HasSuffix(file.Name(), ".tar.zst") {
|
||||
continue // Skip already created archives
|
||||
}
|
||||
name := strings.Split(file.Name(), ".")
|
||||
parts := strings.Split(name[0], "_")
|
||||
if len(name) != 2 || len(parts) != 3 {
|
||||
v.log.Warn(ctx, "unknown file name: %s, skipping", file.Name())
|
||||
continue
|
||||
}
|
||||
canvasID := fmt.Sprintf("%s_%s", parts[0], parts[1])
|
||||
names[canvasID]++
|
||||
}
|
||||
|
||||
for name, number := range names {
|
||||
msg := &messages.CustomEvent{
|
||||
Name: name,
|
||||
Payload: path,
|
||||
}
|
||||
if err := v.producer.Produce(v.cfg.TopicCanvasTrigger, sessID, msg.Encode()); err != nil {
|
||||
v.log.Error(ctx, "can't send canvas trigger: %s", err)
|
||||
}
|
||||
v.metrics.RecordImagesPerCanvas(float64(number))
|
||||
}
|
||||
v.metrics.RecordCanvasesPerSession(float64(len(names)))
|
||||
v.metrics.RecordPreparingDuration(time.Since(start).Seconds())
|
||||
|
||||
v.log.Debug(ctx, "session canvases (%d) prepared in %.3fs, session: %d", len(names), time.Since(start).Seconds(), sessID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) ProcessSessionCanvas(ctx context.Context, sessID uint64, path, name string) error {
|
||||
v.packerPool.Submit(&packTask{ctx: ctx, sessionID: sessID, path: path, name: name})
|
||||
return nil
|
||||
}
|
||||
|
||||
func (v *ImageStorage) packCanvas(payload interface{}) {
|
||||
task := payload.(*packTask)
|
||||
start := time.Now()
|
||||
sessionID := strconv.FormatUint(task.sessionID, 10)
|
||||
|
||||
// Save to archives
|
||||
archPath := fmt.Sprintf("%s%s.tar.zst", task.path, task.name)
|
||||
fullCmd := fmt.Sprintf("find %s -type f -name '%s*' ! -name '*.tar.zst' | tar -cf - --files-from=- | zstd -f -o %s",
|
||||
task.path, task.name, archPath)
|
||||
cmd := exec.Command("sh", "-c", fullCmd)
|
||||
var stdout, stderr bytes.Buffer
|
||||
cmd.Stdout = &stdout
|
||||
cmd.Stderr = &stderr
|
||||
|
||||
err := cmd.Run()
|
||||
if err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to execute command, err: %s, stderr: %v", err, stderr.String())
|
||||
}
|
||||
v.metrics.RecordArchivingDuration(time.Since(start).Seconds())
|
||||
v.metrics.IncreaseTotalCreatedArchives()
|
||||
|
||||
v.log.Debug(task.ctx, "canvas packed successfully in %.3fs, session: %d", time.Since(start).Seconds(), task.sessionID)
|
||||
v.uploaderPool.Submit(&uploadTask{ctx: task.ctx, path: archPath, name: sessionID + "/" + task.name + ".tar.zst"})
|
||||
}
|
||||
|
||||
func (v *ImageStorage) sendToS3(payload interface{}) {
|
||||
task := payload.(*uploadTask)
|
||||
start := time.Now()
|
||||
video, err := os.ReadFile(task.path)
|
||||
if err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to read canvas archive: %s", err)
|
||||
}
|
||||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to upload canvas to storage: %s", err)
|
||||
}
|
||||
v.metrics.RecordUploadingDuration(time.Since(start).Seconds())
|
||||
v.metrics.RecordArchiveSize(float64(len(video)))
|
||||
|
||||
v.log.Debug(task.ctx, "replay file (size: %d) uploaded successfully in %.3fs", len(video), time.Since(start).Seconds())
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package canvas_handler
|
||||
package canvases
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
|
|
@ -12,8 +12,8 @@ type Config struct {
|
|||
objectstorage.ObjectsConfig
|
||||
FSDir string `env:"FS_DIR,required"`
|
||||
CanvasDir string `env:"CANVAS_DIR,default=canvas"`
|
||||
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"`
|
||||
TopicCanvasTrigger string `env:"TOPIC_CANVAS_TRIGGER,required"`
|
||||
TopicCanvasImages string `env:"TOPIC_CANVAS_IMAGES,required"` // For canvas images and sessionEnd events from ender
|
||||
TopicCanvasTrigger string `env:"TOPIC_CANVAS_TRIGGER,required"` // For trigger events to start processing (archive and upload)
|
||||
GroupCanvasImage string `env:"GROUP_CANVAS_IMAGE,required"`
|
||||
UseProfiler bool `env:"PROFILER_ENABLED,default=false"`
|
||||
}
|
||||
|
|
@ -88,6 +88,6 @@ type HTTP struct {
|
|||
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
|
||||
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=131072"` // 128KB, 1000 for HTTP service
|
||||
UseAccessControlHeaders bool `env:"USE_CORS,default=false"`
|
||||
JWTSecret string `env:"JWT_SECRET,required"`
|
||||
JWTSpotSecret string `env:"JWT_SPOT_SECRET,required"`
|
||||
JWTSecret string `env:"JWT_SECRET"`
|
||||
JWTSpotSecret string `env:"JWT_SPOT_SECRET"`
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package imagestorage
|
||||
package images
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config/common"
|
||||
|
|
@ -2,11 +2,12 @@ package datasaver
|
|||
|
||||
import (
|
||||
"context"
|
||||
"encoding/json"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/pkg/db/clickhouse"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/logger"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
queue "openreplay/backend/pkg/queue/types"
|
||||
|
|
@ -50,10 +51,6 @@ func New(log logger.Logger, cfg *db.Config, pg *postgres.Conn, ch clickhouse.Con
|
|||
}
|
||||
|
||||
func (s *saverImpl) Handle(msg Message) {
|
||||
if msg.TypeID() == MsgCustomEvent {
|
||||
defer s.Handle(types.WrapCustomEvent(msg.(*CustomEvent)))
|
||||
}
|
||||
|
||||
var (
|
||||
sessCtx = context.WithValue(context.Background(), "sessionID", msg.SessionID())
|
||||
session *sessions.Session
|
||||
|
|
@ -69,17 +66,34 @@ func (s *saverImpl) Handle(msg Message) {
|
|||
return
|
||||
}
|
||||
|
||||
if msg.TypeID() == MsgCustomEvent {
|
||||
m := msg.(*CustomEvent)
|
||||
// Try to parse custom event payload to JSON and extract or_payload field
|
||||
type CustomEventPayload struct {
|
||||
CustomTimestamp uint64 `json:"or_timestamp"`
|
||||
}
|
||||
customPayload := &CustomEventPayload{}
|
||||
if err := json.Unmarshal([]byte(m.Payload), customPayload); err == nil {
|
||||
if customPayload.CustomTimestamp >= session.Timestamp {
|
||||
s.log.Info(sessCtx, "custom event timestamp received: %v", m.Timestamp)
|
||||
msg.Meta().Timestamp = customPayload.CustomTimestamp
|
||||
s.log.Info(sessCtx, "custom event timestamp updated: %v", m.Timestamp)
|
||||
}
|
||||
}
|
||||
defer s.Handle(types.WrapCustomEvent(m))
|
||||
}
|
||||
|
||||
if IsMobileType(msg.TypeID()) {
|
||||
if err := s.handleMobileMessage(sessCtx, session, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
s.log.Error(sessCtx, "mobile message insertion error, msg: %+v, err: %s", msg, err)
|
||||
s.log.Error(sessCtx, "mobile message insertion error, msg: %+v, err: %.200s", msg, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
} else {
|
||||
if err := s.handleWebMessage(sessCtx, session, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
s.log.Error(sessCtx, "web message insertion error, msg: %+v, err: %s", msg, err)
|
||||
s.log.Error(sessCtx, "web message insertion error, msg: %+v, err: %.200s", msg, err)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
|
|
|||
|
|
@ -11,7 +11,7 @@ import (
|
|||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/memory"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
heuristicMetrics "openreplay/backend/pkg/metrics/heuristics"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
|
|
@ -23,11 +23,12 @@ type heuristicsImpl struct {
|
|||
consumer types.Consumer
|
||||
events builders.EventBuilder
|
||||
mm memory.Manager
|
||||
metrics heuristicMetrics.Heuristics
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager) service.Interface {
|
||||
func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Consumer, e builders.EventBuilder, mm memory.Manager, metrics heuristicMetrics.Heuristics) service.Interface {
|
||||
s := &heuristicsImpl{
|
||||
log: log,
|
||||
ctx: context.Background(),
|
||||
|
|
@ -36,6 +37,7 @@ func New(log logger.Logger, cfg *heuristics.Config, p types.Producer, c types.Co
|
|||
consumer: c,
|
||||
events: e,
|
||||
mm: mm,
|
||||
metrics: metrics,
|
||||
done: make(chan struct{}),
|
||||
finished: make(chan struct{}),
|
||||
}
|
||||
|
|
@ -51,7 +53,7 @@ func (h *heuristicsImpl) run() {
|
|||
if err := h.producer.Produce(h.cfg.TopicAnalytics, evt.SessionID(), evt.Encode()); err != nil {
|
||||
h.log.Error(h.ctx, "can't send new event to queue: %s", err)
|
||||
} else {
|
||||
metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||
h.metrics.IncreaseTotalEvents(messageTypeName(evt))
|
||||
}
|
||||
case <-tick:
|
||||
h.producer.Flush(h.cfg.ProducerTimeout)
|
||||
|
|
|
|||
|
|
@ -12,6 +12,7 @@ import (
|
|||
featureflagsAPI "openreplay/backend/pkg/featureflags/api"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/metrics/web"
|
||||
"openreplay/backend/pkg/objectstorage/store"
|
||||
"openreplay/backend/pkg/projects"
|
||||
|
|
@ -36,8 +37,8 @@ type ServicesBuilder struct {
|
|||
UxTestsAPI api.Handlers
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(log, pgconn, redis)
|
||||
func New(log logger.Logger, cfg *http.Config, webMetrics web.Web, dbMetrics database.Database, producer types.Producer, pgconn pool.Pool, redis *redis.Client) (*ServicesBuilder, error) {
|
||||
projs := projects.New(log, pgconn, redis, dbMetrics)
|
||||
objStore, err := store.NewStore(&cfg.ObjectsConfig)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
|
|
@ -53,11 +54,11 @@ func New(log logger.Logger, cfg *http.Config, metrics web.Web, producer types.Pr
|
|||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
conditions := conditions.New(pgconn)
|
||||
flaker := flakeid.NewFlaker(cfg.WorkerID)
|
||||
sessions := sessions.New(log, pgconn, projs, redis)
|
||||
sessions := sessions.New(log, pgconn, projs, redis, dbMetrics)
|
||||
featureFlags := featureflags.New(pgconn)
|
||||
tags := tags.New(log, pgconn)
|
||||
uxTesting := uxtesting.New(pgconn)
|
||||
responser := api.NewResponser(metrics)
|
||||
responser := api.NewResponser(webMetrics)
|
||||
builder := &ServicesBuilder{}
|
||||
if builder.WebAPI, err = websessions.NewHandlers(cfg, log, responser, producer, projs, sessions, uaModule, geoModule, tokenizer, conditions, flaker); err != nil {
|
||||
return nil, err
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package screenshot_handler
|
||||
package images
|
||||
|
||||
import (
|
||||
"archive/tar"
|
||||
|
|
@ -6,16 +6,18 @@ import (
|
|||
"context"
|
||||
"fmt"
|
||||
"io"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
config "openreplay/backend/internal/config/imagestorage"
|
||||
|
||||
config "openreplay/backend/internal/config/images"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/metrics/images"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
)
|
||||
|
||||
type saveTask struct {
|
||||
|
|
@ -37,20 +39,28 @@ type ImageStorage struct {
|
|||
objStorage objectstorage.ObjectStorage
|
||||
saverPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
metrics images.Images
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*ImageStorage, error) {
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics images.Images) (*ImageStorage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case log == nil:
|
||||
return nil, fmt.Errorf("logger is empty")
|
||||
case objStorage == nil:
|
||||
return nil, fmt.Errorf("objStorage is empty")
|
||||
case metrics == nil:
|
||||
return nil, fmt.Errorf("metrics is empty")
|
||||
}
|
||||
s := &ImageStorage{
|
||||
cfg: cfg,
|
||||
log: log,
|
||||
objStorage: objStorage,
|
||||
metrics: metrics,
|
||||
}
|
||||
s.saverPool = pool.NewPool(4, 8, s.writeToDisk)
|
||||
s.uploaderPool = pool.NewPool(4, 4, s.sendToS3)
|
||||
s.uploaderPool = pool.NewPool(8, 8, s.sendToS3)
|
||||
return s, nil
|
||||
}
|
||||
|
||||
|
|
@ -87,8 +97,11 @@ func (v *ImageStorage) Process(ctx context.Context, sessID uint64, data []byte)
|
|||
v.log.Error(ctx, "ExtractTarGz: unknown type: %d in %s", header.Typeflag, header.Name)
|
||||
}
|
||||
}
|
||||
v.metrics.RecordOriginalArchiveExtractionDuration(time.Since(start).Seconds())
|
||||
v.metrics.RecordOriginalArchiveSize(float64(len(images)))
|
||||
v.metrics.IncreaseTotalSavedArchives()
|
||||
|
||||
v.log.Info(ctx, "arch size: %d, extracted archive in: %s", len(data), time.Since(start))
|
||||
v.log.Debug(ctx, "arch size: %d, extracted archive in: %s", len(data), time.Since(start))
|
||||
v.saverPool.Submit(&saveTask{ctx: ctx, sessionID: sessID, images: images})
|
||||
return nil
|
||||
}
|
||||
|
|
@ -100,17 +113,17 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
|
|||
if v.cfg.ScreenshotsDir != "" {
|
||||
path += v.cfg.ScreenshotsDir + "/"
|
||||
}
|
||||
|
||||
path += strconv.FormatUint(task.sessionID, 10) + "/"
|
||||
|
||||
// Ensure the directory exists
|
||||
if err := os.MkdirAll(path, 0755); err != nil {
|
||||
v.log.Fatal(task.ctx, "Error creating directories: %v", err)
|
||||
v.log.Fatal(task.ctx, "error creating directories: %v", err)
|
||||
}
|
||||
|
||||
// Write images to disk
|
||||
saved := 0
|
||||
for name, img := range task.images {
|
||||
start := time.Now()
|
||||
outFile, err := os.Create(path + name) // or open file in rewrite mode
|
||||
if err != nil {
|
||||
v.log.Error(task.ctx, "can't create file: %s", err.Error())
|
||||
|
|
@ -118,18 +131,21 @@ func (v *ImageStorage) writeToDisk(payload interface{}) {
|
|||
if _, err := io.Copy(outFile, img); err != nil {
|
||||
v.log.Error(task.ctx, "can't copy file: %s", err.Error())
|
||||
}
|
||||
outFile.Close()
|
||||
if outFile == nil {
|
||||
continue
|
||||
}
|
||||
if err := outFile.Close(); err != nil {
|
||||
v.log.Warn(task.ctx, "can't close file: %s", err.Error())
|
||||
}
|
||||
v.metrics.RecordSavingImageDuration(time.Since(start).Seconds())
|
||||
v.metrics.IncreaseTotalSavedImages()
|
||||
saved++
|
||||
}
|
||||
v.log.Info(task.ctx, "saved %d images to disk", saved)
|
||||
v.log.Debug(task.ctx, "saved %d images to disk", saved)
|
||||
return
|
||||
}
|
||||
|
||||
func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, filesPath string) error {
|
||||
// Temporarily disabled for tests
|
||||
if v.objStorage == nil {
|
||||
return fmt.Errorf("object storage is empty")
|
||||
}
|
||||
start := time.Now()
|
||||
sessionID := strconv.FormatUint(sessID, 10)
|
||||
selector := fmt.Sprintf("%s*.jpeg", filesPath)
|
||||
|
|
@ -146,8 +162,10 @@ func (v *ImageStorage) PackScreenshots(ctx context.Context, sessID uint64, files
|
|||
if err != nil {
|
||||
return fmt.Errorf("failed to execute command: %v, stderr: %v", err, stderr.String())
|
||||
}
|
||||
v.log.Info(ctx, "packed replay in %v", time.Since(start))
|
||||
v.metrics.RecordArchivingDuration(time.Since(start).Seconds())
|
||||
v.metrics.IncreaseTotalCreatedArchives()
|
||||
|
||||
v.log.Debug(ctx, "packed replay in %v", time.Since(start))
|
||||
v.uploaderPool.Submit(&uploadTask{ctx: ctx, sessionID: sessionID, path: archPath, name: sessionID + "/replay.tar.zst"})
|
||||
return nil
|
||||
}
|
||||
|
|
@ -162,6 +180,9 @@ func (v *ImageStorage) sendToS3(payload interface{}) {
|
|||
if err := v.objStorage.Upload(bytes.NewReader(video), task.name, "application/octet-stream", objectstorage.NoContentEncoding, objectstorage.Zstd); err != nil {
|
||||
v.log.Fatal(task.ctx, "failed to upload replay file: %s", err)
|
||||
}
|
||||
v.log.Info(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
v.metrics.RecordUploadingDuration(time.Since(start).Seconds())
|
||||
v.metrics.RecordArchiveSize(float64(len(video)))
|
||||
|
||||
v.log.Debug(task.ctx, "replay file (size: %d) uploaded successfully in %v", len(video), time.Since(start))
|
||||
return
|
||||
}
|
||||
|
|
@ -21,6 +21,7 @@ type session struct {
|
|||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
type SessionEnder struct {
|
||||
metrics ender.Ender
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
timeCtrl *timeController
|
||||
|
|
@ -28,8 +29,9 @@ type SessionEnder struct {
|
|||
enabled bool
|
||||
}
|
||||
|
||||
func New(timeout int64, parts int) (*SessionEnder, error) {
|
||||
func New(metrics ender.Ender, timeout int64, parts int) (*SessionEnder, error) {
|
||||
return &SessionEnder{
|
||||
metrics: metrics,
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
timeCtrl: NewTimeController(parts),
|
||||
|
|
@ -56,7 +58,7 @@ func (se *SessionEnder) ActivePartitions(parts []uint64) {
|
|||
for sessID, _ := range se.sessions {
|
||||
if !activeParts[sessID%se.parts] {
|
||||
delete(se.sessions, sessID)
|
||||
ender.DecreaseActiveSessions()
|
||||
se.metrics.DecreaseActiveSessions()
|
||||
removedSessions++
|
||||
} else {
|
||||
activeSessions++
|
||||
|
|
@ -89,8 +91,8 @@ func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
|||
isEnded: false,
|
||||
isMobile: messages.IsMobileType(msg.TypeID()),
|
||||
}
|
||||
ender.IncreaseActiveSessions()
|
||||
ender.IncreaseTotalSessions()
|
||||
se.metrics.IncreaseActiveSessions()
|
||||
se.metrics.IncreaseTotalSessions()
|
||||
return
|
||||
}
|
||||
// Keep the highest user's timestamp for correct session duration value
|
||||
|
|
@ -139,8 +141,8 @@ func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
|||
sess.isEnded = true
|
||||
if res, _ := handler(sessID, sess.lastUserTime); res {
|
||||
delete(se.sessions, sessID)
|
||||
ender.DecreaseActiveSessions()
|
||||
ender.IncreaseClosedSessions()
|
||||
se.metrics.DecreaseActiveSessions()
|
||||
se.metrics.IncreaseClosedSessions()
|
||||
removedSessions++
|
||||
if endCase == 2 {
|
||||
brokerTime[1]++
|
||||
|
|
|
|||
|
|
@ -12,7 +12,7 @@ import (
|
|||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/sink"
|
||||
sinkMetrics "openreplay/backend/pkg/metrics/sink"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
|
@ -30,9 +30,10 @@ type AssetsCache struct {
|
|||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
blackList []string // use "example.com" to filter all domains or ".example.com" to filter only third-level domain
|
||||
metrics sinkMetrics.Sink
|
||||
}
|
||||
|
||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics sinkMetrics.Sink) *AssetsCache {
|
||||
assetsCache := &AssetsCache{
|
||||
log: log,
|
||||
cfg: cfg,
|
||||
|
|
@ -40,6 +41,7 @@ func New(log logger.Logger, cfg *sink.Config, rewriter *assets.Rewriter, produce
|
|||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
blackList: make([]string, 0),
|
||||
metrics: metrics,
|
||||
}
|
||||
// Parse black list for cache layer
|
||||
if len(cfg.CacheBlackList) > 0 {
|
||||
|
|
@ -76,7 +78,7 @@ func (e *AssetsCache) clearCache() {
|
|||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
metrics.DecreaseCachedAssets()
|
||||
e.metrics.DecreaseCachedAssets()
|
||||
}
|
||||
}
|
||||
e.log.Info(context.Background(), "cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
|
|
@ -131,17 +133,6 @@ func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
|||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.CSSInsertRuleURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
}
|
||||
newMsg := &messages.CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.AdoptedSSReplaceURLBased:
|
||||
if e.shouldSkipAsset(m.BaseURL) {
|
||||
return msg
|
||||
|
|
@ -205,7 +196,7 @@ func parseHost(baseURL string) (string, error) {
|
|||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
metrics.IncreaseTotalAssets()
|
||||
e.metrics.IncreaseTotalAssets()
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
|
|
@ -228,7 +219,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
metrics.IncreaseSkippedAssets()
|
||||
e.metrics.IncreaseSkippedAssets()
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
|
|
@ -240,8 +231,8 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
metrics.RecordAssetSize(float64(len(res)))
|
||||
metrics.RecordProcessAssetDuration(float64(duration))
|
||||
e.metrics.RecordAssetSize(float64(len(res)))
|
||||
e.metrics.RecordProcessAssetDuration(float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
|
|
@ -250,7 +241,7 @@ func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) st
|
|||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
metrics.IncreaseCachedAssets()
|
||||
e.metrics.IncreaseCachedAssets()
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
|
|
|
|||
|
|
@ -18,7 +18,7 @@ import (
|
|||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/logger"
|
||||
"openreplay/backend/pkg/messages"
|
||||
metrics "openreplay/backend/pkg/metrics/storage"
|
||||
storageMetrics "openreplay/backend/pkg/metrics/storage"
|
||||
"openreplay/backend/pkg/objectstorage"
|
||||
"openreplay/backend/pkg/pool"
|
||||
)
|
||||
|
|
@ -77,9 +77,10 @@ type Storage struct {
|
|||
splitTime uint64
|
||||
processorPool pool.WorkerPool
|
||||
uploaderPool pool.WorkerPool
|
||||
metrics storageMetrics.Storage
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage) (*Storage, error) {
|
||||
func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectStorage, metrics storageMetrics.Storage) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
|
|
@ -92,6 +93,7 @@ func New(cfg *config.Config, log logger.Logger, objStorage objectstorage.ObjectS
|
|||
objStorage: objStorage,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
splitTime: parseSplitTime(cfg.FileSplitTime),
|
||||
metrics: metrics,
|
||||
}
|
||||
s.processorPool = pool.NewPool(1, 1, s.doCompression)
|
||||
s.uploaderPool = pool.NewPool(1, 1, s.uploadSession)
|
||||
|
|
@ -141,7 +143,7 @@ func (s *Storage) Process(ctx context.Context, msg *messages.SessionEnd) (err er
|
|||
if err != nil {
|
||||
if strings.Contains(err.Error(), "big file") {
|
||||
s.log.Warn(ctx, "can't process session: %s", err)
|
||||
metrics.IncreaseStorageTotalSkippedSessions()
|
||||
s.metrics.IncreaseStorageTotalSkippedSessions()
|
||||
return nil
|
||||
}
|
||||
return err
|
||||
|
|
@ -159,8 +161,8 @@ func (s *Storage) prepareSession(path string, tp FileType, task *Task) error {
|
|||
return err
|
||||
}
|
||||
|
||||
metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
s.metrics.RecordSessionReadDuration(float64(time.Now().Sub(startRead).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionSize(float64(len(mob)), tp.String())
|
||||
|
||||
// Put opened session file into task struct
|
||||
task.SetMob(mob, index, tp)
|
||||
|
|
@ -174,7 +176,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
|||
// Check file size before download into memory
|
||||
info, err := os.Stat(filePath)
|
||||
if err == nil && info.Size() > s.cfg.MaxFileSize {
|
||||
metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
||||
s.metrics.RecordSkippedSessionSize(float64(info.Size()), tp.String())
|
||||
return nil, -1, fmt.Errorf("big file, size: %d", info.Size())
|
||||
}
|
||||
// Read file into memory
|
||||
|
|
@ -190,7 +192,7 @@ func (s *Storage) openSession(ctx context.Context, filePath string, tp FileType)
|
|||
if err != nil {
|
||||
return nil, -1, fmt.Errorf("can't sort session, err: %s", err)
|
||||
}
|
||||
metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionSortDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
return mob, index, nil
|
||||
}
|
||||
|
||||
|
|
@ -234,12 +236,12 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
// Compression
|
||||
start := time.Now()
|
||||
data := s.compress(task.ctx, mob, task.compression)
|
||||
metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionCompressDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
// Encryption
|
||||
start = time.Now()
|
||||
result := s.encryptSession(task.ctx, data.Bytes(), task.key)
|
||||
metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
s.metrics.RecordSessionEncryptionDuration(float64(time.Now().Sub(start).Milliseconds()), tp.String())
|
||||
|
||||
if tp == DOM {
|
||||
task.doms = bytes.NewBuffer(result)
|
||||
|
|
@ -296,8 +298,8 @@ func (s *Storage) packSession(task *Task, tp FileType) {
|
|||
wg.Wait()
|
||||
|
||||
// Record metrics
|
||||
metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
||||
metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
s.metrics.RecordSessionEncryptionDuration(float64(firstEncrypt+secondEncrypt), tp.String())
|
||||
s.metrics.RecordSessionCompressDuration(float64(firstPart+secondPart), tp.String())
|
||||
}
|
||||
|
||||
func (s *Storage) encryptSession(ctx context.Context, data []byte, encryptionKey string) []byte {
|
||||
|
|
@ -382,7 +384,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.doms != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.domsRawSize/float64(task.doms.Len()), DOM.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.doms, task.id+string(DOM)+"s", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -395,7 +397,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.dome != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.domeRawSize/float64(task.dome.Len()), DOM.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dome, task.id+string(DOM)+"e", "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -408,7 +410,7 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
go func() {
|
||||
if task.dev != nil {
|
||||
// Record compression ratio
|
||||
metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
||||
s.metrics.RecordSessionCompressionRatio(task.devRawSize/float64(task.dev.Len()), DEV.String())
|
||||
// Upload session to s3
|
||||
start := time.Now()
|
||||
if err := s.objStorage.Upload(task.dev, task.id+string(DEV), "application/octet-stream", objectstorage.NoContentEncoding, task.compression); err != nil {
|
||||
|
|
@ -419,9 +421,9 @@ func (s *Storage) uploadSession(payload interface{}) {
|
|||
wg.Done()
|
||||
}()
|
||||
wg.Wait()
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
metrics.IncreaseStorageTotalSessions()
|
||||
s.metrics.RecordSessionUploadDuration(float64(uploadDoms+uploadDome), DOM.String())
|
||||
s.metrics.RecordSessionUploadDuration(float64(uploadDev), DEV.String())
|
||||
s.metrics.IncreaseStorageTotalSessions()
|
||||
}
|
||||
|
||||
func (s *Storage) doCompression(payload interface{}) {
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package analytics
|
|||
import (
|
||||
"github.com/go-playground/validator/v10"
|
||||
"openreplay/backend/pkg/analytics/charts"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/config/analytics"
|
||||
|
|
@ -26,9 +27,9 @@ type ServicesBuilder struct {
|
|||
ChartsAPI api.Handlers
|
||||
}
|
||||
|
||||
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, pgconn pool.Pool) (*ServicesBuilder, error) {
|
||||
func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.Web, dbMetrics database.Database, pgconn pool.Pool) (*ServicesBuilder, error) {
|
||||
responser := api.NewResponser(webMetrics)
|
||||
audiTrail, err := tracer.NewTracer(log, pgconn)
|
||||
audiTrail, err := tracer.NewTracer(log, pgconn, dbMetrics)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
|
|
@ -58,7 +59,7 @@ func NewServiceBuilder(log logger.Logger, cfg *analytics.Config, webMetrics web.
|
|||
return nil, err
|
||||
}
|
||||
return &ServicesBuilder{
|
||||
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil),
|
||||
Auth: auth.NewAuth(log, cfg.JWTSecret, cfg.JWTSpotSecret, pgconn, nil, api.NoPrefix),
|
||||
RateLimiter: limiter.NewUserRateLimiter(10, 30, 1*time.Minute, 5*time.Minute),
|
||||
AuditTrail: audiTrail,
|
||||
CardsAPI: cardsHandlers,
|
||||
|
|
|
|||
|
|
@ -18,13 +18,14 @@ type Bulk interface {
|
|||
}
|
||||
|
||||
type bulkImpl struct {
|
||||
conn driver.Conn
|
||||
table string
|
||||
query string
|
||||
values [][]interface{}
|
||||
conn driver.Conn
|
||||
metrics database.Database
|
||||
table string
|
||||
query string
|
||||
values [][]interface{}
|
||||
}
|
||||
|
||||
func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
||||
func NewBulk(conn driver.Conn, metrics database.Database, table, query string) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("clickhouse connection is empty")
|
||||
|
|
@ -34,10 +35,11 @@ func NewBulk(conn driver.Conn, table, query string) (Bulk, error) {
|
|||
return nil, errors.New("query is empty")
|
||||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
table: table,
|
||||
query: query,
|
||||
values: make([][]interface{}, 0),
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
table: table,
|
||||
query: query,
|
||||
values: make([][]interface{}, 0),
|
||||
}, nil
|
||||
}
|
||||
|
||||
|
|
@ -60,8 +62,8 @@ func (b *bulkImpl) Send() error {
|
|||
}
|
||||
err = batch.Send()
|
||||
// Save bulk metrics
|
||||
database.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||
b.metrics.RecordBulkElements(float64(len(b.values)), "ch", b.table)
|
||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "ch", b.table)
|
||||
// Prepare values slice for a new data
|
||||
b.values = make([][]interface{}, 0)
|
||||
return err
|
||||
|
|
|
|||
|
|
@ -18,6 +18,7 @@ import (
|
|||
"openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/hashid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
"openreplay/backend/pkg/sessions"
|
||||
"openreplay/backend/pkg/url"
|
||||
)
|
||||
|
|
@ -57,13 +58,14 @@ func NewTask() *task {
|
|||
|
||||
type connectorImpl struct {
|
||||
conn driver.Conn
|
||||
metrics database.Database
|
||||
batches map[string]Bulk //driver.Batch
|
||||
workerTask chan *task
|
||||
done chan struct{}
|
||||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewConnector(cfg common.Clickhouse) Connector {
|
||||
func NewConnector(cfg common.Clickhouse, metrics database.Database) Connector {
|
||||
conn, err := clickhouse.Open(&clickhouse.Options{
|
||||
Addr: []string{cfg.GetTrimmedURL()},
|
||||
Auth: clickhouse.Auth{
|
||||
|
|
@ -84,6 +86,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
|
|||
|
||||
c := &connectorImpl{
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
batches: make(map[string]Bulk, 20),
|
||||
workerTask: make(chan *task, 1),
|
||||
done: make(chan struct{}),
|
||||
|
|
@ -94,7 +97,7 @@ func NewConnector(cfg common.Clickhouse) Connector {
|
|||
}
|
||||
|
||||
func (c *connectorImpl) newBatch(name, query string) error {
|
||||
batch, err := NewBulk(c.conn, name, query)
|
||||
batch, err := NewBulk(c.conn, c.metrics, name, query)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create new batch: %s", err)
|
||||
}
|
||||
|
|
@ -103,25 +106,25 @@ func (c *connectorImpl) newBatch(name, query string) error {
|
|||
}
|
||||
|
||||
var batches = map[string]string{
|
||||
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?)",
|
||||
"sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone, utm_source, utm_medium, utm_campaign) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?)",
|
||||
"autocompletes": "INSERT INTO experimental.autocomplete (project_id, type, value) VALUES (?, ?, SUBSTR(?, 1, 8000))",
|
||||
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"pages": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$current_url", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"errors": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", error_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"performance": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$duration_s", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"graphql": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"issuesEvents": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", issue_type, issue_id, "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"issues": "INSERT INTO experimental.issues (project_id, issue_id, type, context_string) VALUES (?, ?, ?, ?)",
|
||||
"mobile_sessions": "INSERT INTO experimental.sessions (session_id, project_id, user_id, user_uuid, user_os, user_os_version, user_device, user_device_type, user_country, user_state, user_city, datetime, duration, pages_count, events_count, errors_count, issue_score, referrer, issue_types, tracker_version, user_browser, user_browser_version, metadata_1, metadata_2, metadata_3, metadata_4, metadata_5, metadata_6, metadata_7, metadata_8, metadata_9, metadata_10, platform, timezone) VALUES (?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?, SUBSTR(?, 1, 8000), ?, ?, ?, ?, SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), SUBSTR(?, 1, 8000), ?, ?)",
|
||||
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_custom": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_clicks": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_swipes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_inputs": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_requests": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
"mobile_crashes": `INSERT INTO product_analytics.events (session_id, project_id, event_id, "$event_name", created_at, "$time", distinct_id, "$auto_captured", "$device", "$os_version", "$properties") VALUES (?, ?, ?, ?, ?, ?, ?, ?, ?, ?, ?)`,
|
||||
}
|
||||
|
||||
func (c *connectorImpl) Prepare() error {
|
||||
|
|
@ -212,6 +215,7 @@ func (c *connectorImpl) InsertWebSession(session *sessions.Session) error {
|
|||
session.Metadata8,
|
||||
session.Metadata9,
|
||||
session.Metadata10,
|
||||
"web",
|
||||
session.Timezone,
|
||||
session.UtmSource,
|
||||
session.UtmMedium,
|
||||
|
|
@ -243,8 +247,10 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
|
|||
return nil
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"label": msg.Label,
|
||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||
"label": msg.Label,
|
||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal input event: %s", err)
|
||||
|
|
@ -259,6 +265,8 @@ func (c *connectorImpl) InsertWebInputDuration(session *sessions.Session, msg *m
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
nullableUint16(uint16(msg.InputDuration)),
|
||||
jsonString,
|
||||
); err != nil {
|
||||
|
|
@ -275,12 +283,14 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
|
|||
return fmt.Errorf("can't extract url parts: %s", err)
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"issue_id": issueID,
|
||||
"issue_type": "mouse_thrashing",
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"issue_id": issueID,
|
||||
"issue_type": "mouse_thrashing",
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal issue event: %s", err)
|
||||
|
|
@ -295,6 +305,8 @@ func (c *connectorImpl) InsertMouseThrashing(session *sessions.Session, msg *mes
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
"mouse_thrashing",
|
||||
issueID,
|
||||
jsonString,
|
||||
|
|
@ -327,12 +339,14 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
|
|||
return fmt.Errorf("can't extract url parts: %s", err)
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"issue_id": issueID,
|
||||
"issue_type": msg.Type,
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"issue_id": issueID,
|
||||
"issue_type": msg.Type,
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal issue event: %s", err)
|
||||
|
|
@ -347,6 +361,8 @@ func (c *connectorImpl) InsertIssue(session *sessions.Session, msg *messages.Iss
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
msg.Type,
|
||||
issueID,
|
||||
jsonString,
|
||||
|
|
@ -418,6 +434,8 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
|
|||
"dom_building_time": domBuildingTime,
|
||||
"dom_content_loaded_event_time": domContentLoadedEventTime,
|
||||
"load_event_time": loadEventTime,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal page event: %s", err)
|
||||
|
|
@ -432,6 +450,8 @@ func (c *connectorImpl) InsertWebPageEvent(session *sessions.Session, msg *messa
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
cropString(msg.URL),
|
||||
jsonString,
|
||||
); err != nil {
|
||||
|
|
@ -465,15 +485,17 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
|||
return fmt.Errorf("can't extract url parts: %s", err)
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"label": msg.Label,
|
||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||
"selector": msg.Selector,
|
||||
"normalized_x": nX,
|
||||
"normalized_y": nY,
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"label": msg.Label,
|
||||
"hesitation_time": nullableUint32(uint32(msg.HesitationTime)),
|
||||
"selector": msg.Selector,
|
||||
"normalized_x": nX,
|
||||
"normalized_y": nY,
|
||||
"url": cropString(msg.Url),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal click event: %s", err)
|
||||
|
|
@ -488,6 +510,8 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
cropString(msg.Url),
|
||||
jsonString,
|
||||
); err != nil {
|
||||
|
|
@ -498,11 +522,6 @@ func (c *connectorImpl) InsertWebClickEvent(session *sessions.Session, msg *mess
|
|||
}
|
||||
|
||||
func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *types.ErrorEvent) error {
|
||||
keys, values := make([]string, 0, len(msg.Tags)), make([]*string, 0, len(msg.Tags))
|
||||
for k, v := range msg.Tags {
|
||||
keys = append(keys, k)
|
||||
values = append(values, v)
|
||||
}
|
||||
// Check error source before insert to avoid panic from clickhouse lib
|
||||
switch msg.Source {
|
||||
case "js_exception", "bugsnag", "cloudwatch", "datadog", "elasticsearch", "newrelic", "rollbar", "sentry", "stackdriver", "sumologic":
|
||||
|
|
@ -511,12 +530,11 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
|
|||
}
|
||||
msgID, _ := msg.ID(session.ProjectID)
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"source": msg.Source,
|
||||
"name": nullableString(msg.Name),
|
||||
"message": msg.Message,
|
||||
"error_id": msgID,
|
||||
"error_tags_keys": keys,
|
||||
"error_tags_values": values,
|
||||
"source": msg.Source,
|
||||
"name": nullableString(msg.Name),
|
||||
"message": msg.Message,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal error event: %s", err)
|
||||
|
|
@ -531,6 +549,9 @@ func (c *connectorImpl) InsertWebErrorEvent(session *sessions.Session, msg *type
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
msgID,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("errors", err)
|
||||
|
|
@ -562,6 +583,8 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
|
|||
"min_used_js_heap_size": msg.MinUsedJSHeapSize,
|
||||
"avg_used_js_heap_size": msg.AvgUsedJSHeapSize,
|
||||
"max_used_js_heap_size": msg.MaxUsedJSHeapSize,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal performance event: %s", err)
|
||||
|
|
@ -576,6 +599,8 @@ func (c *connectorImpl) InsertWebPerformanceTrackAggr(session *sessions.Session,
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("performance", err)
|
||||
|
|
@ -599,16 +624,18 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
|||
return fmt.Errorf("can't extract url parts: %s", err)
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"request_body": request,
|
||||
"response_body": response,
|
||||
"status": uint16(msg.Status),
|
||||
"method": url.EnsureMethod(msg.Method),
|
||||
"success": msg.Status < 400,
|
||||
"transfer_size": uint32(msg.TransferredBodySize),
|
||||
"url": cropString(msg.URL),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"request_body": request,
|
||||
"response_body": response,
|
||||
"status": uint16(msg.Status),
|
||||
"method": url.EnsureMethod(msg.Method),
|
||||
"success": msg.Status < 400,
|
||||
"transfer_size": uint32(msg.TransferredBodySize),
|
||||
"url": cropString(msg.URL),
|
||||
"url_host": host,
|
||||
"url_path": path,
|
||||
"url_hostpath": hostpath,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal request event: %s", err)
|
||||
|
|
@ -623,6 +650,8 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
nullableUint16(uint16(msg.Duration)),
|
||||
jsonString,
|
||||
); err != nil {
|
||||
|
|
@ -634,8 +663,10 @@ func (c *connectorImpl) InsertRequest(session *sessions.Session, msg *messages.N
|
|||
|
||||
func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.CustomEvent) error {
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"name": msg.Name,
|
||||
"payload": msg.Payload,
|
||||
"name": msg.Name,
|
||||
"payload": msg.Payload,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal custom event: %s", err)
|
||||
|
|
@ -650,6 +681,8 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("custom", err)
|
||||
|
|
@ -660,9 +693,11 @@ func (c *connectorImpl) InsertCustom(session *sessions.Session, msg *messages.Cu
|
|||
|
||||
func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.GraphQL) error {
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"name": msg.OperationName,
|
||||
"request_body": nullableString(msg.Variables),
|
||||
"response_body": nullableString(msg.Response),
|
||||
"name": msg.OperationName,
|
||||
"request_body": nullableString(msg.Variables),
|
||||
"response_body": nullableString(msg.Response),
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal graphql event: %s", err)
|
||||
|
|
@ -677,6 +712,8 @@ func (c *connectorImpl) InsertGraphQL(session *sessions.Session, msg *messages.G
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("graphql", err)
|
||||
|
|
@ -724,7 +761,7 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
|
|||
session.Metadata8,
|
||||
session.Metadata9,
|
||||
session.Metadata10,
|
||||
"ios",
|
||||
"mobile",
|
||||
session.Timezone,
|
||||
); err != nil {
|
||||
c.checkError("mobile_sessions", err)
|
||||
|
|
@ -735,8 +772,10 @@ func (c *connectorImpl) InsertMobileSession(session *sessions.Session) error {
|
|||
|
||||
func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messages.MobileEvent) error {
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"name": msg.Name,
|
||||
"payload": msg.Payload,
|
||||
"name": msg.Name,
|
||||
"payload": msg.Payload,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile custom event: %s", err)
|
||||
|
|
@ -751,6 +790,8 @@ func (c *connectorImpl) InsertMobileCustom(session *sessions.Session, msg *messa
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_custom", err)
|
||||
|
|
@ -764,7 +805,9 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
|
|||
return nil
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"label": msg.Label,
|
||||
"label": msg.Label,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile clicks event: %s", err)
|
||||
|
|
@ -779,6 +822,8 @@ func (c *connectorImpl) InsertMobileClick(session *sessions.Session, msg *messag
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_clicks", err)
|
||||
|
|
@ -792,8 +837,10 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
|
|||
return nil
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"label": msg.Label,
|
||||
"direction": nullableString(msg.Direction),
|
||||
"label": msg.Label,
|
||||
"direction": nullableString(msg.Direction),
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile swipe event: %s", err)
|
||||
|
|
@ -808,6 +855,8 @@ func (c *connectorImpl) InsertMobileSwipe(session *sessions.Session, msg *messag
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_swipes", err)
|
||||
|
|
@ -821,7 +870,9 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
|
|||
return nil
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"label": msg.Label,
|
||||
"label": msg.Label,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile input event: %s", err)
|
||||
|
|
@ -836,6 +887,8 @@ func (c *connectorImpl) InsertMobileInput(session *sessions.Session, msg *messag
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_inputs", err)
|
||||
|
|
@ -855,13 +908,15 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
|||
response = &msg.Response
|
||||
}
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"url": cropString(msg.URL),
|
||||
"request_body": request,
|
||||
"response_body": response,
|
||||
"status": uint16(msg.Status),
|
||||
"method": url.EnsureMethod(msg.Method),
|
||||
"duration": uint16(msg.Duration),
|
||||
"success": msg.Status < 400,
|
||||
"url": cropString(msg.URL),
|
||||
"request_body": request,
|
||||
"response_body": response,
|
||||
"status": uint16(msg.Status),
|
||||
"method": url.EnsureMethod(msg.Method),
|
||||
"duration": uint16(msg.Duration),
|
||||
"success": msg.Status < 400,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile request event: %s", err)
|
||||
|
|
@ -876,6 +931,8 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_requests", err)
|
||||
|
|
@ -886,9 +943,11 @@ func (c *connectorImpl) InsertMobileRequest(session *sessions.Session, msg *mess
|
|||
|
||||
func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messages.MobileCrash) error {
|
||||
jsonString, err := json.Marshal(map[string]interface{}{
|
||||
"name": msg.Name,
|
||||
"reason": msg.Reason,
|
||||
"stacktrace": msg.Stacktrace,
|
||||
"name": msg.Name,
|
||||
"reason": msg.Reason,
|
||||
"stacktrace": msg.Stacktrace,
|
||||
"user_device": session.UserDevice,
|
||||
"user_device_type": session.UserDeviceType,
|
||||
})
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't marshal mobile crash event: %s", err)
|
||||
|
|
@ -903,6 +962,8 @@ func (c *connectorImpl) InsertMobileCrash(session *sessions.Session, msg *messag
|
|||
eventTime.Unix(),
|
||||
session.UserUUID,
|
||||
true,
|
||||
session.Platform,
|
||||
session.UserOSVersion,
|
||||
jsonString,
|
||||
); err != nil {
|
||||
c.checkError("mobile_crashes", err)
|
||||
|
|
|
|||
|
|
@ -52,6 +52,7 @@ func NewBatchesTask(size int) *batchesTask {
|
|||
type BatchSet struct {
|
||||
log logger.Logger
|
||||
c pool.Pool
|
||||
metrics database.Database
|
||||
ctx context.Context
|
||||
batches map[uint64]*SessionBatch
|
||||
workerTask chan *batchesTask
|
||||
|
|
@ -59,10 +60,11 @@ type BatchSet struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBatchSet(log logger.Logger, c pool.Pool) *BatchSet {
|
||||
func NewBatchSet(log logger.Logger, c pool.Pool, metrics database.Database) *BatchSet {
|
||||
bs := &BatchSet{
|
||||
log: log,
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
ctx: context.Background(),
|
||||
batches: make(map[uint64]*SessionBatch),
|
||||
workerTask: make(chan *batchesTask, 1),
|
||||
|
|
@ -104,7 +106,7 @@ func (conn *BatchSet) Stop() {
|
|||
func (conn *BatchSet) sendBatches(t *batchesTask) {
|
||||
for _, batch := range t.batches {
|
||||
// Record batch size
|
||||
database.RecordBatchElements(float64(batch.Len()))
|
||||
conn.metrics.RecordBatchElements(float64(batch.Len()))
|
||||
|
||||
start := time.Now()
|
||||
|
||||
|
|
@ -120,7 +122,7 @@ func (conn *BatchSet) sendBatches(t *batchesTask) {
|
|||
}
|
||||
}
|
||||
br.Close() // returns err
|
||||
database.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
conn.metrics.RecordBatchInsertDuration(float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -24,6 +24,7 @@ type Bulk interface {
|
|||
|
||||
type bulkImpl struct {
|
||||
conn pool.Pool
|
||||
metrics database.Database
|
||||
table string
|
||||
columns string
|
||||
template string
|
||||
|
|
@ -75,12 +76,12 @@ func (b *bulkImpl) send() error {
|
|||
return fmt.Errorf("send bulk err: %s", err)
|
||||
}
|
||||
// Save bulk metrics
|
||||
database.RecordBulkElements(float64(size), "pg", b.table)
|
||||
database.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
b.metrics.RecordBulkElements(float64(size), "pg", b.table)
|
||||
b.metrics.RecordBulkInsertDuration(float64(time.Now().Sub(start).Milliseconds()), "pg", b.table)
|
||||
return nil
|
||||
}
|
||||
|
||||
func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
func NewBulk(conn pool.Pool, metrics database.Database, table, columns, template string, setSize, sizeLimit int) (Bulk, error) {
|
||||
switch {
|
||||
case conn == nil:
|
||||
return nil, errors.New("db conn is empty")
|
||||
|
|
@ -97,6 +98,7 @@ func NewBulk(conn pool.Pool, table, columns, template string, setSize, sizeLimit
|
|||
}
|
||||
return &bulkImpl{
|
||||
conn: conn,
|
||||
metrics: metrics,
|
||||
table: table,
|
||||
columns: columns,
|
||||
template: template,
|
||||
|
|
|
|||
|
|
@ -2,11 +2,14 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
"openreplay/backend/pkg/logger"
|
||||
)
|
||||
|
||||
var BULK_SIZE = 200
|
||||
|
||||
type bulksTask struct {
|
||||
bulks []Bulk
|
||||
}
|
||||
|
|
@ -19,6 +22,7 @@ type BulkSet struct {
|
|||
log logger.Logger
|
||||
c pool.Pool
|
||||
ctx context.Context
|
||||
metrics database.Database
|
||||
autocompletes Bulk
|
||||
requests Bulk
|
||||
customEvents Bulk
|
||||
|
|
@ -41,10 +45,11 @@ type BulkSet struct {
|
|||
finished chan struct{}
|
||||
}
|
||||
|
||||
func NewBulkSet(log logger.Logger, c pool.Pool) *BulkSet {
|
||||
func NewBulkSet(log logger.Logger, c pool.Pool, metrics database.Database) *BulkSet {
|
||||
bs := &BulkSet{
|
||||
log: log,
|
||||
c: c,
|
||||
metrics: metrics,
|
||||
ctx: context.Background(),
|
||||
workerTask: make(chan *bulksTask, 1),
|
||||
done: make(chan struct{}),
|
||||
|
|
@ -98,31 +103,31 @@ func (conn *BulkSet) Get(name string) Bulk {
|
|||
|
||||
func (conn *BulkSet) initBulks() {
|
||||
var err error
|
||||
conn.autocompletes, err = NewBulk(conn.c,
|
||||
conn.autocompletes, err = NewBulk(conn.c, conn.metrics,
|
||||
"autocomplete",
|
||||
"(value, type, project_id)",
|
||||
"($%d, $%d, $%d)",
|
||||
3, 200)
|
||||
3, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create autocomplete bulk: %s", err)
|
||||
}
|
||||
conn.requests, err = NewBulk(conn.c,
|
||||
conn.requests, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, duration, success)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), $%d, $%d)",
|
||||
6, 200)
|
||||
6, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create requests bulk: %s", err)
|
||||
}
|
||||
conn.customEvents, err = NewBulk(conn.c,
|
||||
conn.customEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, timestamp, seq_index, name, payload)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d)",
|
||||
5, 200)
|
||||
5, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create customEvents bulk: %s", err)
|
||||
}
|
||||
conn.webPageEvents, err = NewBulk(conn.c,
|
||||
conn.webPageEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.pages",
|
||||
"(session_id, message_id, timestamp, referrer, base_referrer, host, path, query, dom_content_loaded_time, "+
|
||||
"load_time, response_end, first_paint_time, first_contentful_paint_time, speed_index, visually_complete, "+
|
||||
|
|
@ -130,111 +135,111 @@ func (conn *BulkSet) initBulks() {
|
|||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), "+
|
||||
"NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0),"+
|
||||
" NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, 0), NULLIF($%d, ''))",
|
||||
19, 200)
|
||||
19, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webPageEvents bulk: %s", err)
|
||||
}
|
||||
conn.webInputDurations, err = NewBulk(conn.c,
|
||||
conn.webInputDurations, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.inputs",
|
||||
"(session_id, message_id, timestamp, label, hesitation, duration)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000),''), $%d, $%d)",
|
||||
6, 200)
|
||||
6, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webInputDurations bulk: %s", err)
|
||||
}
|
||||
conn.webGraphQL, err = NewBulk(conn.c,
|
||||
conn.webGraphQL, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.graphql",
|
||||
"(session_id, timestamp, message_id, name, request_body, response_body)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
6, 200)
|
||||
6, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webGraphQL bulk: %s", err)
|
||||
}
|
||||
conn.webErrors, err = NewBulk(conn.c,
|
||||
conn.webErrors, err = NewBulk(conn.c, conn.metrics,
|
||||
"errors",
|
||||
"(error_id, project_id, source, name, message, payload)",
|
||||
"($%d, $%d, $%d, $%d, $%d, $%d::jsonb)",
|
||||
6, 200)
|
||||
6, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrors bulk: %s", err)
|
||||
}
|
||||
conn.webErrorEvents, err = NewBulk(conn.c,
|
||||
conn.webErrorEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.errors",
|
||||
"(session_id, message_id, timestamp, error_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
4, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorEvents bulk: %s", err)
|
||||
}
|
||||
conn.webErrorTags, err = NewBulk(conn.c,
|
||||
conn.webErrorTags, err = NewBulk(conn.c, conn.metrics,
|
||||
"public.errors_tags",
|
||||
"(session_id, message_id, error_id, key, value)",
|
||||
"($%d, $%d, $%d, $%d, $%d)",
|
||||
5, 200)
|
||||
5, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webErrorTags bulk: %s", err)
|
||||
}
|
||||
conn.webIssues, err = NewBulk(conn.c,
|
||||
conn.webIssues, err = NewBulk(conn.c, conn.metrics,
|
||||
"issues",
|
||||
"(project_id, issue_id, type, context_string)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
4, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssues bulk: %s", err)
|
||||
}
|
||||
conn.webIssueEvents, err = NewBulk(conn.c,
|
||||
conn.webIssueEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.issues",
|
||||
"(session_id, issue_id, timestamp, seq_index, payload)",
|
||||
"($%d, $%d, $%d, $%d, CAST($%d AS jsonb))",
|
||||
5, 200)
|
||||
5, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webIssueEvents bulk: %s", err)
|
||||
}
|
||||
conn.webCustomEvents, err = NewBulk(conn.c,
|
||||
conn.webCustomEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.customs",
|
||||
"(session_id, seq_index, timestamp, name, payload, level)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 2000), $%d, $%d)",
|
||||
6, 200)
|
||||
6, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webCustomEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickEvents, err = NewBulk(conn.c,
|
||||
conn.webClickEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d)",
|
||||
8, 200)
|
||||
8, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webClickXYEvents, err = NewBulk(conn.c,
|
||||
conn.webClickXYEvents, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.clicks",
|
||||
"(session_id, message_id, timestamp, label, selector, url, path, hesitation, normalized_x, normalized_y)",
|
||||
"($%d, $%d, $%d, NULLIF(LEFT($%d, 2000), ''), LEFT($%d, 8000), LEFT($%d, 2000), LEFT($%d, 2000), $%d, $%d, $%d)",
|
||||
10, 200)
|
||||
10, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webClickEvents bulk: %s", err)
|
||||
}
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c,
|
||||
conn.webNetworkRequest, err = NewBulk(conn.c, conn.metrics,
|
||||
"events_common.requests",
|
||||
"(session_id, timestamp, seq_index, url, host, path, query, request_body, response_body, status_code, method, duration, success, transfer_size)",
|
||||
"($%d, $%d, $%d, LEFT($%d, 8000), LEFT($%d, 300), LEFT($%d, 2000), LEFT($%d, 8000), $%d, $%d, $%d::smallint, NULLIF($%d, '')::http_method, $%d, $%d, $%d)",
|
||||
14, 200)
|
||||
14, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webNetworkRequest bulk: %s", err)
|
||||
}
|
||||
conn.webCanvasNodes, err = NewBulk(conn.c,
|
||||
conn.webCanvasNodes, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.canvas_recordings",
|
||||
"(session_id, recording_id, timestamp)",
|
||||
"($%d, $%d, $%d)",
|
||||
3, 200)
|
||||
3, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webCanvasNodes bulk: %s", err)
|
||||
}
|
||||
conn.webTagTriggers, err = NewBulk(conn.c,
|
||||
conn.webTagTriggers, err = NewBulk(conn.c, conn.metrics,
|
||||
"events.tags",
|
||||
"(session_id, timestamp, seq_index, tag_id)",
|
||||
"($%d, $%d, $%d, $%d)",
|
||||
4, 200)
|
||||
4, BULK_SIZE)
|
||||
if err != nil {
|
||||
conn.log.Fatal(conn.ctx, "can't create webTagTriggers bulk: %s", err)
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,6 +2,7 @@ package postgres
|
|||
|
||||
import (
|
||||
"context"
|
||||
"openreplay/backend/pkg/metrics/database"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres/batch"
|
||||
"openreplay/backend/pkg/db/postgres/pool"
|
||||
|
|
@ -22,7 +23,7 @@ type Conn struct {
|
|||
chConn CH
|
||||
}
|
||||
|
||||
func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
|
||||
func NewConn(log logger.Logger, pool pool.Pool, ch CH, metrics database.Database) *Conn {
|
||||
if pool == nil {
|
||||
log.Fatal(context.Background(), "pg pool is empty")
|
||||
}
|
||||
|
|
@ -30,8 +31,8 @@ func NewConn(log logger.Logger, pool pool.Pool, ch CH) *Conn {
|
|||
log: log,
|
||||
Pool: pool,
|
||||
chConn: ch,
|
||||
bulks: NewBulkSet(log, pool),
|
||||
batches: batch.NewBatchSet(log, pool),
|
||||
bulks: NewBulkSet(log, pool, metrics),
|
||||
batches: batch.NewBatchSet(log, pool, metrics),
|
||||
}
|
||||
}
|
||||
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue