commit
658b545571
1492 changed files with 30933 additions and 130046 deletions
37
.github/workflows/api-ee.yaml
vendored
37
.github/workflows/api-ee.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -43,11 +48,26 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice" "alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
@ -91,9 +111,20 @@ jobs:
|
|||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: ee
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
37
.github/workflows/api.yaml
vendored
37
.github/workflows/api.yaml
vendored
|
|
@ -1,6 +1,11 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
inputs:
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -42,11 +47,26 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
PUSH_IMAGE=0 bash -x ./build.sh
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
images=("chalice" "alerts")
|
||||
for image in ${images[*]};do
|
||||
./trivy image --exit-code 1 --security-checks vuln --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
done
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
|
|
@ -90,9 +110,20 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
6
.github/workflows/frontend.yaml
vendored
6
.github/workflows/frontend.yaml
vendored
|
|
@ -41,7 +41,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
set -x
|
||||
|
|
@ -84,7 +84,7 @@ jobs:
|
|||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
|
||||
|
|
@ -130,7 +130,7 @@ jobs:
|
|||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
|
|
|
|||
103
.github/workflows/sourcemaps-reader.yaml
vendored
Normal file
103
.github/workflows/sourcemaps-reader.yaml
vendored
Normal file
|
|
@ -0,0 +1,103 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
paths:
|
||||
- sourcemap-reader/**
|
||||
|
||||
name: Build and Deploy Chalice
|
||||
|
||||
jobs:
|
||||
deploy:
|
||||
name: Deploy
|
||||
runs-on: ubuntu-latest
|
||||
|
||||
steps:
|
||||
- name: Checkout
|
||||
uses: actions/checkout@v2
|
||||
with:
|
||||
# We need to diff with old commit
|
||||
# to see which workers got changed.
|
||||
fetch-depth: 2
|
||||
|
||||
- name: Docker login
|
||||
run: |
|
||||
docker login ${{ secrets.OSS_REGISTRY_URL }} -u ${{ secrets.OSS_DOCKER_USERNAME }} -p "${{ secrets.OSS_REGISTRY_TOKEN }}"
|
||||
|
||||
- uses: azure/k8s-set-context@v1
|
||||
with:
|
||||
method: kubeconfig
|
||||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pusing sourcemaps-reader image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd sourcemap-reader
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
mv openreplay/charts/{ingress-nginx,chalice,quickwit} /tmp
|
||||
rm -rf openreplay/charts/*
|
||||
mv /tmp/{ingress-nginx,chalice,quickwit} openreplay/charts/
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ${{ github.sha }}
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
4
.github/workflows/utilities.yaml
vendored
4
.github/workflows/utilities.yaml
vendored
|
|
@ -36,7 +36,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd utilities
|
||||
|
|
@ -53,7 +53,7 @@ jobs:
|
|||
bash kube-install.sh --app utilities
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
|
|
|
|||
39
.github/workflows/workers-ee.yaml
vendored
39
.github/workflows/workers-ee.yaml
vendored
|
|
@ -7,6 +7,10 @@ on:
|
|||
description: 'Name of a single service to build(in small letters). "all" to build everything'
|
||||
required: false
|
||||
default: 'false'
|
||||
skip_security_checks:
|
||||
description: 'Skip Security checks if there is a unfixable vuln or error. Value: true/false'
|
||||
required: false
|
||||
default: 'false'
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -49,7 +53,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
|
|
@ -61,6 +65,7 @@ jobs:
|
|||
#
|
||||
set -x
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
build_param=${tmp_param:-'false'}
|
||||
case ${build_param} in
|
||||
|
|
@ -81,7 +86,11 @@ jobs:
|
|||
;;
|
||||
esac
|
||||
|
||||
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
|
|
@ -89,6 +98,17 @@ jobs:
|
|||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh ee $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
|
@ -96,12 +116,13 @@ jobs:
|
|||
- name: Deploying to kuberntes
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
|
|
@ -140,6 +161,18 @@ jobs:
|
|||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
#- name: Alert slack
|
||||
# if: ${{ failure() }}
|
||||
# uses: rtCamp/action-slack-notify@v2
|
||||
# env:
|
||||
# SLACK_CHANNEL: ee
|
||||
# SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
# SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
# SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
# SLACK_USERNAME: "OR Bot"
|
||||
# SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
35
.github/workflows/workers.yaml
vendored
35
.github/workflows/workers.yaml
vendored
|
|
@ -53,7 +53,7 @@ jobs:
|
|||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
|
|
@ -63,7 +63,7 @@ jobs:
|
|||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
set -x
|
||||
set -xe
|
||||
touch /tmp/images_to_build.txt
|
||||
skip_security_checks=${{ github.event.inputs.skip_security_checks }}
|
||||
tmp_param=${{ github.event.inputs.build_service }}
|
||||
|
|
@ -86,7 +86,11 @@ jobs:
|
|||
;;
|
||||
esac
|
||||
|
||||
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
|
||||
if [[ $(cat /tmp/images_to_build.txt) == "" ]]; then
|
||||
echo "Nothing to build here"
|
||||
touch /tmp/nothing-to-build-here
|
||||
exit 0
|
||||
fi
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
|
|
@ -94,17 +98,30 @@ jobs:
|
|||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=0 bash -x ./build.sh skip $image
|
||||
[[ "x$skip_security_checks" == "xtrue" ]] || {
|
||||
curl -L https://github.com/aquasecurity/trivy/releases/download/v0.34.0/trivy_0.34.0_Linux-64bit.tar.gz | tar -xzf - -C ./
|
||||
./trivy image --exit-code 1 --vuln-type os,library --severity "HIGH,CRITICAL" --ignore-unfixed $DOCKER_REPO/$image:$IMAGE_TAG
|
||||
err_code=$?
|
||||
[[ $err_code -ne 0 ]] && {
|
||||
exit $err_code
|
||||
}
|
||||
} && {
|
||||
echo "Skipping Security Checks"
|
||||
}
|
||||
PUSH_IMAGE=1 bash -x ./build.sh skip $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.ref_name }}_${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
set -x
|
||||
[[ -f /tmp/nothing-to-build-here ]] && exit 0
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
|
|
@ -142,6 +159,16 @@ jobs:
|
|||
# Deploy command
|
||||
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
|
||||
|
||||
- name: Alert slack
|
||||
if: ${{ failure() }}
|
||||
uses: rtCamp/action-slack-notify@v2
|
||||
env:
|
||||
SLACK_CHANNEL: foss
|
||||
SLACK_TITLE: "Failed ${{ github.workflow }}"
|
||||
SLACK_COLOR: ${{ job.status }} # or a specific color like 'good' or '#ff00ff'
|
||||
SLACK_WEBHOOK: ${{ secrets.SLACK_WEB_HOOK }}
|
||||
SLACK_USERNAME: "OR Bot"
|
||||
SLACK_MESSAGE: 'Build failed :bomb:'
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
|
|
@ -1,28 +1,22 @@
|
|||
FROM python:3.10-alpine
|
||||
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
|
||||
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
||||
RUN apk add --no-cache build-base nodejs npm tini
|
||||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
# Add Tini
|
||||
# Startup daemon
|
||||
ENV SOURCE_MAP_VERSION=0.7.4 \
|
||||
APP_NAME=chalice \
|
||||
LISTEN_PORT=8000 \
|
||||
MAPPING_WASM=/work/sourcemap-reader/mappings.wasm \
|
||||
PRIVATE_ENDPOINTS=false \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
|
||||
ADD https://unpkg.com/source-map@${SOURCE_MAP_VERSION}/lib/mappings.wasm /mappings.wasm
|
||||
|
||||
WORKDIR /work_tmp
|
||||
COPY requirements.txt /work_tmp/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r /work_tmp/requirements.txt
|
||||
COPY sourcemap-reader/*.json /work_tmp/
|
||||
RUN cd /work_tmp && npm install
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv /work_tmp/node_modules sourcemap-reader/. \
|
||||
&& mv /mappings.wasm ${MAPPING_WASM} && chmod 644 ${MAPPING_WASM}
|
||||
RUN mv env.default .env
|
||||
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
USER 1001
|
||||
|
|
|
|||
|
|
@ -4,4 +4,8 @@
|
|||
**/build.sh
|
||||
**/build_*.sh
|
||||
**/*deploy.sh
|
||||
Dockerfile*
|
||||
Dockerfile*
|
||||
|
||||
app_alerts.py
|
||||
requirements-alerts.txt
|
||||
entrypoint_alerts.sh
|
||||
|
|
@ -4,15 +4,16 @@ LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
|
|||
RUN apk add --no-cache build-base tini
|
||||
ARG envarg
|
||||
ENV APP_NAME=alerts \
|
||||
pg_minconn=1 \
|
||||
pg_maxconn=10 \
|
||||
PG_MINCONN=1 \
|
||||
PG_MAXCONN=10 \
|
||||
LISTEN_PORT=8000 \
|
||||
PRIVATE_ENDPOINTS=true \
|
||||
ENTERPRISE_BUILD=${envarg}
|
||||
|
||||
COPY requirements.txt /work_tmp/requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r /work_tmp/requirements.txt
|
||||
|
||||
WORKDIR /work
|
||||
COPY requirements-alerts.txt ./requirements.txt
|
||||
RUN pip install --no-cache-dir --upgrade -r requirements.txt
|
||||
|
||||
COPY . .
|
||||
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
|
||||
|
||||
|
|
@ -7,5 +7,5 @@
|
|||
Dockerfile*
|
||||
|
||||
app.py
|
||||
entrypoint_alerts.sh
|
||||
entrypoint.sh
|
||||
requirements.txt
|
||||
64
api/app.py
64
api/app.py
|
|
@ -20,22 +20,14 @@ app.add_middleware(GZipMiddleware, minimum_size=1000)
|
|||
|
||||
@app.middleware('http')
|
||||
async def or_middleware(request: Request, call_next):
|
||||
global OR_SESSION_TOKEN
|
||||
OR_SESSION_TOKEN = request.headers.get('vnd.openreplay.com.sid', request.headers.get('vnd.asayer.io.sid'))
|
||||
|
||||
try:
|
||||
if helper.TRACK_TIME:
|
||||
import time
|
||||
now = int(time.time() * 1000)
|
||||
response: StreamingResponse = await call_next(request)
|
||||
if helper.TRACK_TIME:
|
||||
now = int(time.time() * 1000) - now
|
||||
if now > 500:
|
||||
print(f"Execution time: {now} ms")
|
||||
except Exception as e:
|
||||
pg_client.close()
|
||||
raise e
|
||||
pg_client.close()
|
||||
if helper.TRACK_TIME:
|
||||
import time
|
||||
now = int(time.time() * 1000)
|
||||
response: StreamingResponse = await call_next(request)
|
||||
if helper.TRACK_TIME:
|
||||
now = int(time.time() * 1000) - now
|
||||
if now > 500:
|
||||
logging.info(f"Execution time: {now} ms")
|
||||
return response
|
||||
|
||||
|
||||
|
|
@ -61,14 +53,38 @@ app.include_router(metrics.app)
|
|||
app.include_router(insights.app)
|
||||
app.include_router(v1_api.app_apikey)
|
||||
|
||||
Schedule = AsyncIOScheduler()
|
||||
Schedule.start()
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
Schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
for job in Schedule.get_jobs():
|
||||
print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))
|
||||
for job in core_crons.cron_jobs + core_dynamic_crons.cron_jobs:
|
||||
app.schedule.add_job(id=job["func"].__name__, **job)
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -3,11 +3,12 @@ import logging
|
|||
from apscheduler.schedulers.asyncio import AsyncIOScheduler
|
||||
from decouple import config
|
||||
from fastapi import FastAPI
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
from chalicelib.core import alerts_processor
|
||||
|
||||
app = FastAPI(root_path="/alerts", docs_url=config("docs_url", default=""), redoc_url=config("redoc_url", default=""))
|
||||
print("============= ALERTS =============")
|
||||
logging.info("============= ALERTS =============")
|
||||
|
||||
|
||||
@app.get("/")
|
||||
|
|
@ -16,12 +17,39 @@ async def root():
|
|||
|
||||
|
||||
app.schedule = AsyncIOScheduler()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
"minutes": config("ALERTS_INTERVAL", cast=int, default=5),
|
||||
"misfire_grace_time": 20})
|
||||
for job in app.schedule.get_jobs():
|
||||
print({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
logging.basicConfig(level=config("LOGLEVEL", default=logging.INFO))
|
||||
logging.getLogger('apscheduler').setLevel(config("LOGLEVEL", default=logging.INFO))
|
||||
loglevel = config("LOGLEVEL", default=logging.INFO)
|
||||
print(f">Loglevel set to: {loglevel}")
|
||||
logging.basicConfig(level=loglevel)
|
||||
ap_logger = logging.getLogger('apscheduler')
|
||||
ap_logger.setLevel(loglevel)
|
||||
app.schedule = AsyncIOScheduler()
|
||||
|
||||
|
||||
@app.on_event("startup")
|
||||
async def startup():
|
||||
logging.info(">>>>> starting up <<<<<")
|
||||
await pg_client.init()
|
||||
app.schedule.start()
|
||||
app.schedule.add_job(id="alerts_processor", **{"func": alerts_processor.process, "trigger": "interval",
|
||||
"minutes": config("ALERTS_INTERVAL", cast=int, default=5),
|
||||
"misfire_grace_time": 20})
|
||||
|
||||
ap_logger.info(">Scheduled jobs:")
|
||||
for job in app.schedule.get_jobs():
|
||||
ap_logger.info({"Name": str(job.id), "Run Frequency": str(job.trigger), "Next Run": str(job.next_run_time)})
|
||||
|
||||
|
||||
@app.on_event("shutdown")
|
||||
async def shutdown():
|
||||
logging.info(">>>>> shutting down <<<<<")
|
||||
app.schedule.shutdown(wait=False)
|
||||
await pg_client.terminate()
|
||||
|
||||
|
||||
@app.get('/private/shutdown', tags=["private"])
|
||||
async def stop_server():
|
||||
logging.info("Requested shutdown")
|
||||
await shutdown()
|
||||
import os, signal
|
||||
os.kill(1, signal.SIGTERM)
|
||||
|
|
|
|||
|
|
@ -6,14 +6,14 @@ from starlette import status
|
|||
from starlette.exceptions import HTTPException
|
||||
|
||||
from chalicelib.core import authorizers, users
|
||||
from schemas import CurrentContext
|
||||
import schemas
|
||||
|
||||
|
||||
class JWTAuth(HTTPBearer):
|
||||
def __init__(self, auto_error: bool = True):
|
||||
super(JWTAuth, self).__init__(auto_error=auto_error)
|
||||
|
||||
async def __call__(self, request: Request) -> Optional[CurrentContext]:
|
||||
async def __call__(self, request: Request) -> Optional[schemas.CurrentContext]:
|
||||
credentials: HTTPAuthorizationCredentials = await super(JWTAuth, self).__call__(request)
|
||||
if credentials:
|
||||
if not credentials.scheme == "Bearer":
|
||||
|
|
@ -49,9 +49,9 @@ class JWTAuth(HTTPBearer):
|
|||
jwt_payload["authorizer_identity"] = "jwt"
|
||||
print(jwt_payload)
|
||||
request.state.authorizer_identity = "jwt"
|
||||
request.state.currentContext = CurrentContext(tenant_id=jwt_payload.get("tenantId", -1),
|
||||
user_id=jwt_payload.get("userId", -1),
|
||||
email=user["email"])
|
||||
request.state.currentContext = schemas.CurrentContext(tenant_id=jwt_payload.get("tenantId", -1),
|
||||
user_id=jwt_payload.get("userId", -1),
|
||||
email=user["email"])
|
||||
return request.state.currentContext
|
||||
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -17,8 +17,8 @@ class ProjectAuthorizer:
|
|||
current_user: schemas.CurrentContext = await OR_context(request)
|
||||
value = request.path_params[self.project_identifier]
|
||||
if (self.project_identifier == "projectId" \
|
||||
and not (isinstance(value, int) or isinstance(value, str) and value.isnumeric())
|
||||
and projects.get_project(project_id=value, tenant_id=current_user.tenant_id) is None) \
|
||||
and (not (isinstance(value, int) or isinstance(value, str) and value.isnumeric())
|
||||
or projects.get_project(project_id=value, tenant_id=current_user.tenant_id) is None)) \
|
||||
or (self.project_identifier == "projectKey" \
|
||||
and projects.get_internal_project_id(project_key=value) is None):
|
||||
print("project not found")
|
||||
|
|
|
|||
30
api/build.sh
30
api/build.sh
|
|
@ -7,6 +7,15 @@
|
|||
|
||||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
|
||||
# Helper function
|
||||
exit_err() {
|
||||
err_code=$1
|
||||
if [[ err_code != 0 ]]; then
|
||||
exit $err_code
|
||||
fi
|
||||
}
|
||||
|
||||
environment=$1
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
envarg="default-foss"
|
||||
check_prereq() {
|
||||
|
|
@ -18,10 +27,12 @@ check_prereq() {
|
|||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../api ../_api
|
||||
cd ../_api
|
||||
cp -R ../utilities/utils ../sourcemap-reader/.
|
||||
cp -R ../sourcemap-reader .
|
||||
destination="_api"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_api_ee"
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -29,9 +40,10 @@ function build_api(){
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
}
|
||||
mv Dockerfile.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/chalice:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../_api
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/chalice:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
|
||||
|
|
@ -41,11 +53,13 @@ function build_api(){
|
|||
}
|
||||
|
||||
check_prereq
|
||||
build_api $1
|
||||
build_api $environment
|
||||
echo buil_complete
|
||||
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1
|
||||
|
||||
[[ $1 == "ee" ]] && {
|
||||
[[ $environment == "ee" ]] && {
|
||||
cp ../ee/api/build_crons.sh .
|
||||
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_crons.sh $1
|
||||
}
|
||||
exit_err $?
|
||||
rm build_crons.sh
|
||||
} || true
|
||||
|
|
|
|||
|
|
@ -16,9 +16,13 @@ check_prereq() {
|
|||
}
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../api ../_alerts
|
||||
cd ../_alerts
|
||||
function build_alerts(){
|
||||
destination="_alerts"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_alerts_ee"
|
||||
}
|
||||
cp -R ../api ../${destination}
|
||||
cd ../${destination}
|
||||
tag=""
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -26,9 +30,10 @@ function build_api(){
|
|||
envarg="default-ee"
|
||||
tag="ee-"
|
||||
}
|
||||
docker build -f ./Dockerfile.alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
|
||||
mv Dockerfile_alerts.dockerignore .dockerignore
|
||||
docker build -f ./Dockerfile_alerts --build-arg envarg=$envarg -t ${DOCKER_REPO:-'local'}/alerts:${git_sha1} .
|
||||
cd ../api
|
||||
rm -rf ../_alerts
|
||||
rm -rf ../${destination}
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/alerts:${git_sha1}
|
||||
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
|
||||
|
|
@ -38,4 +43,4 @@ function build_api(){
|
|||
}
|
||||
|
||||
check_prereq
|
||||
build_api $1
|
||||
build_alerts $1
|
||||
|
|
|
|||
|
|
@ -103,6 +103,7 @@ def Build(a):
|
|||
params = {"project_id": a["projectId"], "now": now}
|
||||
full_args = {}
|
||||
j_s = True
|
||||
main_table = ""
|
||||
if a["seriesId"] is not None:
|
||||
a["filter"]["sort"] = "session_id"
|
||||
a["filter"]["order"] = schemas.SortOrderType.desc
|
||||
|
|
@ -120,16 +121,16 @@ def Build(a):
|
|||
WHERE project_id = %(project_id)s
|
||||
{"AND " + colDef["condition"] if colDef.get("condition") is not None else ""}"""
|
||||
j_s = colDef.get("joinSessions", True)
|
||||
|
||||
main_table = colDef["table"]
|
||||
is_ss = main_table == "public.sessions"
|
||||
q = f"""SELECT coalesce(value,0) AS value, coalesce(value,0) {a["query"]["operator"]} {a["query"]["right"]} AS valid"""
|
||||
|
||||
if a["detectionMethod"] == schemas.AlertDetectionMethod.threshold:
|
||||
if a["seriesId"] is not None:
|
||||
q += f""" FROM ({subQ}) AS stat"""
|
||||
else:
|
||||
q += f""" FROM ({subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
q += f""" FROM ({subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}) AS stat"""
|
||||
params = {**params, **full_args, "startDate": TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
if a["change"] == schemas.AlertDetectionType.change:
|
||||
|
|
@ -142,13 +143,11 @@ def Build(a):
|
|||
"timestamp_sub2": TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s
|
||||
AND datetime<=toDateTime(%(now)s/1000)
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
|
||||
AND timestamp<=%(now)s
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND timestamp<%(startDate)s
|
||||
AND timestamp>=%(timestamp_sub2)s
|
||||
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() - 2 * a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub1 = f"SELECT (( {sub1} )-( {sub2} )) AS value"
|
||||
q += f" FROM ( {sub1} ) AS stat"
|
||||
|
|
@ -164,13 +163,11 @@ def Build(a):
|
|||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) \
|
||||
* 60 * 1000}
|
||||
else:
|
||||
sub1 = f"""{subQ} AND timestamp>=%(startDate)s AND timestamp<=%(now)s
|
||||
{"AND sessions.start_ts >= %(startDate)s" if j_s else ""}
|
||||
{"AND sessions.start_ts <= %(now)s" if j_s else ""}"""
|
||||
sub1 = f"""{subQ} {"AND timestamp >= %(startDate)s AND timestamp <= %(now)s" if not is_ss else ""}
|
||||
{"AND start_ts >= %(startDate)s AND start_ts <= %(now)s" if j_s else ""}"""
|
||||
params["startDate"] = TimeUTC.now() - a["options"]["currentPeriod"] * 60 * 1000
|
||||
sub2 = f"""{subQ} AND timestamp<%(startDate)s
|
||||
AND timestamp>=%(timestamp_sub2)s
|
||||
{"AND sessions.start_ts < %(startDate)s AND sessions.start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
sub2 = f"""{subQ} {"AND timestamp < %(startDate)s AND timestamp >= %(timestamp_sub2)s" if not is_ss else ""}
|
||||
{"AND start_ts < %(startDate)s AND start_ts >= %(timestamp_sub2)s" if j_s else ""}"""
|
||||
params["timestamp_sub2"] = TimeUTC.now() \
|
||||
- (a["options"]["currentPeriod"] + a["options"]["currentPeriod"]) * 60 * 1000
|
||||
sub1 = f"SELECT (({sub1})/NULLIF(({sub2}),0)-1)*100 AS value"
|
||||
|
|
@ -185,21 +182,28 @@ def process():
|
|||
with pg_client.PostgresClient() as cur:
|
||||
for alert in all_alerts:
|
||||
if can_check(alert):
|
||||
logging.info(f"Querying alertId:{alert['alertId']} name: {alert['name']}")
|
||||
query, params = Build(alert)
|
||||
query = cur.mogrify(query, params)
|
||||
try:
|
||||
query = cur.mogrify(query, params)
|
||||
except Exception as e:
|
||||
logging.error(
|
||||
f"!!!Error while building alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(e)
|
||||
continue
|
||||
logging.debug(alert)
|
||||
logging.debug(query)
|
||||
try:
|
||||
cur.execute(query)
|
||||
result = cur.fetchone()
|
||||
if result["valid"]:
|
||||
logging.info("Valid alert, notifying users")
|
||||
logging.info(f"Valid alert, notifying users, alertId:{alert['alertId']} name: {alert['name']}")
|
||||
notifications.append(generate_notification(alert, result))
|
||||
except Exception as e:
|
||||
logging.error(f"!!!Error while running alert query for alertId:{alert['alertId']}")
|
||||
logging.error(str(e))
|
||||
logging.error(
|
||||
f"!!!Error while running alert query for alertId:{alert['alertId']} name: {alert['name']}")
|
||||
logging.error(query)
|
||||
logging.error(e)
|
||||
cur = cur.recreate(rollback=True)
|
||||
if len(notifications) > 0:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.Alerts
|
||||
|
|
@ -209,12 +213,22 @@ def process():
|
|||
alerts.process_notifications(notifications)
|
||||
|
||||
|
||||
def __format_value(x):
|
||||
if x % 1 == 0:
|
||||
x = int(x)
|
||||
else:
|
||||
x = round(x, 2)
|
||||
return f"{x:,}"
|
||||
|
||||
|
||||
def generate_notification(alert, result):
|
||||
left = __format_value(result['value'])
|
||||
right = __format_value(alert['query']['right'])
|
||||
return {
|
||||
"alertId": alert["alertId"],
|
||||
"tenantId": alert["tenantId"],
|
||||
"title": alert["name"],
|
||||
"description": f"has been triggered, {alert['query']['left']} = {round(result['value'], 2)} ({alert['query']['operator']} {alert['query']['right']}).",
|
||||
"description": f"has been triggered, {alert['query']['left']} = {left} ({alert['query']['operator']} {right}).",
|
||||
"buttonText": "Check metrics for more details",
|
||||
"buttonUrl": f"/{alert['projectId']}/metrics",
|
||||
"imageUrl": None,
|
||||
|
|
|
|||
|
|
@ -1,10 +1,14 @@
|
|||
from os import access, R_OK
|
||||
from os.path import exists as path_exists, getsize
|
||||
|
||||
import jwt
|
||||
import requests
|
||||
from decouple import config
|
||||
from os.path import exists
|
||||
from starlette.exceptions import HTTPException
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import projects
|
||||
from starlette.exceptions import HTTPException
|
||||
from os import access, R_OK
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
ASSIST_KEY = config("ASSIST_KEY")
|
||||
ASSIST_URL = config("ASSIST_URL") % ASSIST_KEY
|
||||
|
|
@ -49,22 +53,22 @@ def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSche
|
|||
def __get_live_sessions_ws(project_id, data):
|
||||
project_key = projects.get_project_key(project_id)
|
||||
try:
|
||||
connected_peers = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if connected_peers.status_code != 200:
|
||||
print("!! issue with the peer-server")
|
||||
print(connected_peers.text)
|
||||
results = requests.post(ASSIST_URL + config("assist") + f"/{project_key}",
|
||||
json=data, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return {"total": 0, "sessions": []}
|
||||
live_peers = connected_peers.json().get("data", [])
|
||||
live_peers = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
print("Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
except Exception as e:
|
||||
print("issue getting Live-Assist response")
|
||||
print("!! Issue getting Live-Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
print(connected_peers.text)
|
||||
print(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
live_peers = {"total": 0, "sessions": []}
|
||||
|
|
@ -77,58 +81,76 @@ def __get_live_sessions_ws(project_id, data):
|
|||
return live_peers
|
||||
|
||||
|
||||
def __get_agent_token(project_id, project_key, session_id):
|
||||
iat = TimeUTC.now()
|
||||
return jwt.encode(
|
||||
payload={
|
||||
"projectKey": project_key,
|
||||
"projectId": project_id,
|
||||
"sessionId": session_id,
|
||||
"iat": iat // 1000,
|
||||
"exp": iat // 1000 + config("ASSIST_JWT_EXPIRATION", cast=int) + TimeUTC.get_utc_offset() // 1000,
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"aud": f"openreplay:agent"
|
||||
},
|
||||
key=config("ASSIST_JWT_SECRET"),
|
||||
algorithm=config("jwt_algorithm")
|
||||
)
|
||||
|
||||
|
||||
def get_live_session_by_id(project_id, session_id):
|
||||
project_key = projects.get_project_key(project_id)
|
||||
try:
|
||||
connected_peers = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if connected_peers.status_code != 200:
|
||||
print("!! issue with the peer-server")
|
||||
print(connected_peers.text)
|
||||
return False
|
||||
connected_peers = connected_peers.json().get("data")
|
||||
if connected_peers is None:
|
||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return None
|
||||
connected_peers["live"] = True
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
return None
|
||||
results["live"] = True
|
||||
results["agentToken"] = __get_agent_token(project_id=project_id, project_key=project_key, session_id=session_id)
|
||||
except requests.exceptions.Timeout:
|
||||
print("Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return None
|
||||
except Exception as e:
|
||||
print("issue getting Assist response")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
print(connected_peers.text)
|
||||
print(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
return None
|
||||
return connected_peers
|
||||
return results
|
||||
|
||||
|
||||
def is_live(project_id, session_id, project_key=None):
|
||||
if project_key is None:
|
||||
project_key = projects.get_project_key(project_id)
|
||||
try:
|
||||
connected_peers = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if connected_peers.status_code != 200:
|
||||
print("!! issue with the peer-server")
|
||||
print(connected_peers.text)
|
||||
results = requests.get(ASSIST_URL + config("assistList") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return False
|
||||
connected_peers = connected_peers.json().get("data")
|
||||
results = results.json().get("data")
|
||||
except requests.exceptions.Timeout:
|
||||
print("Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
print("issue getting Assist response")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
print(connected_peers.text)
|
||||
print(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
return False
|
||||
return str(session_id) == connected_peers
|
||||
return str(session_id) == results
|
||||
|
||||
|
||||
def autocomplete(project_id, q: str, key: str = None):
|
||||
|
|
@ -141,15 +163,15 @@ def autocomplete(project_id, q: str, key: str = None):
|
|||
ASSIST_URL + config("assistList") + f"/{project_key}/autocomplete",
|
||||
params=params, timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print("!! issue with the peer-server")
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
|
||||
results = results.json().get("data", [])
|
||||
except requests.exceptions.Timeout:
|
||||
print("Timeout getting Assist response")
|
||||
print("!! Timeout getting Assist response")
|
||||
return {"errors": ["Assist request timeout"]}
|
||||
except Exception as e:
|
||||
print("issue getting Assist response")
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
|
|
@ -165,21 +187,76 @@ def get_ice_servers():
|
|||
and len(config("iceServers")) > 0 else None
|
||||
|
||||
|
||||
def get_raw_mob_by_id(project_id, session_id):
|
||||
def __get_efs_path():
|
||||
efs_path = config("FS_DIR")
|
||||
if not exists(efs_path):
|
||||
if not path_exists(efs_path):
|
||||
raise HTTPException(400, f"EFS not found in path: {efs_path}")
|
||||
|
||||
if not access(efs_path, R_OK):
|
||||
raise HTTPException(400, f"EFS found under: {efs_path}; but it is not readable, please check permissions")
|
||||
return efs_path
|
||||
|
||||
path_to_file = efs_path + "/" + str(session_id)
|
||||
|
||||
if exists(path_to_file):
|
||||
def __get_mob_path(project_id, session_id):
|
||||
params = {"projectId": project_id, "sessionId": session_id}
|
||||
return config("EFS_SESSION_MOB_PATTERN", default="%(sessionId)s") % params
|
||||
|
||||
|
||||
def get_raw_mob_by_id(project_id, session_id):
|
||||
efs_path = __get_efs_path()
|
||||
path_to_file = efs_path + "/" + __get_mob_path(project_id=project_id, session_id=session_id)
|
||||
if path_exists(path_to_file):
|
||||
if not access(path_to_file, R_OK):
|
||||
raise HTTPException(400, f"Replay file found under: {efs_path};"
|
||||
raise HTTPException(400, f"Replay file found under: {efs_path};" +
|
||||
f" but it is not readable, please check permissions")
|
||||
# getsize return size in bytes, UNPROCESSED_MAX_SIZE is in Kb
|
||||
if (getsize(path_to_file) / 1000) >= config("UNPROCESSED_MAX_SIZE", cast=int, default=200 * 1000):
|
||||
raise HTTPException(413, "Replay file too large")
|
||||
return path_to_file
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def __get_devtools_path(project_id, session_id):
|
||||
params = {"projectId": project_id, "sessionId": session_id}
|
||||
return config("EFS_DEVTOOLS_MOB_PATTERN", default="%(sessionId)s") % params
|
||||
|
||||
|
||||
def get_raw_devtools_by_id(project_id, session_id):
|
||||
efs_path = __get_efs_path()
|
||||
path_to_file = efs_path + "/" + __get_devtools_path(project_id=project_id, session_id=session_id)
|
||||
if path_exists(path_to_file):
|
||||
if not access(path_to_file, R_OK):
|
||||
raise HTTPException(400, f"Devtools file found under: {efs_path};"
|
||||
f" but it is not readable, please check permissions")
|
||||
|
||||
return path_to_file
|
||||
|
||||
return None
|
||||
|
||||
|
||||
def session_exists(project_id, session_id):
|
||||
project_key = projects.get_project_key(project_id)
|
||||
try:
|
||||
results = requests.get(ASSIST_URL + config("assist") + f"/{project_key}/{session_id}",
|
||||
timeout=config("assistTimeout", cast=int, default=5))
|
||||
if results.status_code != 200:
|
||||
print(f"!! issue with the peer-server code:{results.status_code}")
|
||||
print(results.text)
|
||||
return None
|
||||
results = results.json().get("data")
|
||||
if results is None:
|
||||
return False
|
||||
return True
|
||||
except requests.exceptions.Timeout:
|
||||
print("!! Timeout getting Assist response")
|
||||
return False
|
||||
except Exception as e:
|
||||
print("!! Issue getting Assist response")
|
||||
print(str(e))
|
||||
print("expected JSON, received:")
|
||||
try:
|
||||
print(results.text)
|
||||
except:
|
||||
print("couldn't get response")
|
||||
return False
|
||||
|
|
|
|||
|
|
@ -15,7 +15,7 @@ def jwt_authorizer(token):
|
|||
token[1],
|
||||
config("jwt_secret"),
|
||||
algorithms=config("jwt_algorithm"),
|
||||
audience=[f"plugin:{helper.get_stage_name()}", f"front:{helper.get_stage_name()}"]
|
||||
audience=[ f"front:{helper.get_stage_name()}"]
|
||||
)
|
||||
except jwt.ExpiredSignatureError:
|
||||
print("! JWT Expired signature")
|
||||
|
|
@ -42,8 +42,8 @@ def generate_jwt(id, tenant_id, iat, aud):
|
|||
payload={
|
||||
"userId": id,
|
||||
"tenantId": tenant_id,
|
||||
"exp": iat // 1000 + config("jwt_exp_delta_seconds", cast=int) + TimeUTC.get_utc_offset() // 1000,
|
||||
"iss": config("jwt_issuer"),
|
||||
"exp": iat // 1000 + config("JWT_EXPIRATION", cast=int) + TimeUTC.get_utc_offset() // 1000,
|
||||
"iss": config("JWT_ISSUER"),
|
||||
"iat": iat // 1000,
|
||||
"aud": aud
|
||||
},
|
||||
|
|
|
|||
|
|
@ -35,24 +35,57 @@ class Slack:
|
|||
return True
|
||||
|
||||
@classmethod
|
||||
def send_text(cls, tenant_id, webhook_id, text, **args):
|
||||
def send_text_attachments(cls, tenant_id, webhook_id, text, **args):
|
||||
integration = cls.__get(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["slack integration not found"]}
|
||||
print("====> sending slack notification")
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json={
|
||||
"attachments": [
|
||||
{
|
||||
"text": text,
|
||||
"ts": datetime.now().timestamp(),
|
||||
**args
|
||||
}
|
||||
]
|
||||
})
|
||||
print(r)
|
||||
print(r.text)
|
||||
try:
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json={
|
||||
"attachments": [
|
||||
{
|
||||
"text": text,
|
||||
"ts": datetime.now().timestamp(),
|
||||
**args
|
||||
}
|
||||
]
|
||||
},
|
||||
timeout=5)
|
||||
if r.status_code != 200:
|
||||
print(f"!! issue sending slack text attachments; webhookId:{webhook_id} code:{r.status_code}")
|
||||
print(r.text)
|
||||
return None
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout sending slack text attachments webhookId:{webhook_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"!! Issue sending slack text attachments webhookId:{webhook_id}")
|
||||
print(str(e))
|
||||
return None
|
||||
return {"data": r.text}
|
||||
|
||||
@classmethod
|
||||
def send_raw(cls, tenant_id, webhook_id, body):
|
||||
integration = cls.__get(tenant_id=tenant_id, integration_id=webhook_id)
|
||||
if integration is None:
|
||||
return {"errors": ["slack integration not found"]}
|
||||
try:
|
||||
r = requests.post(
|
||||
url=integration["endpoint"],
|
||||
json=body,
|
||||
timeout=5)
|
||||
if r.status_code != 200:
|
||||
print(f"!! issue sending slack raw; webhookId:{webhook_id} code:{r.status_code}")
|
||||
print(r.text)
|
||||
return None
|
||||
except requests.exceptions.Timeout:
|
||||
print(f"!! Timeout sending slack raw webhookId:{webhook_id}")
|
||||
return None
|
||||
except Exception as e:
|
||||
print(f"!! Issue sending slack raw webhookId:{webhook_id}")
|
||||
print(str(e))
|
||||
return None
|
||||
return {"data": r.text}
|
||||
|
||||
@classmethod
|
||||
|
|
@ -66,7 +99,8 @@ class Slack:
|
|||
url=integration["endpoint"],
|
||||
json={"attachments": attachments[i:i + 100]})
|
||||
if r.status_code != 200:
|
||||
print("!!!! something went wrong")
|
||||
print("!!!! something went wrong while sending to:")
|
||||
print(integration)
|
||||
print(r)
|
||||
print(r.text)
|
||||
|
||||
|
|
|
|||
|
|
@ -266,7 +266,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
|
|||
params = {"metric_id": metric_id, "is_public": data.is_public, "name": data.name,
|
||||
"user_id": user_id, "project_id": project_id, "view_type": data.view_type,
|
||||
"metric_type": data.metric_type, "metric_of": data.metric_of,
|
||||
"metric_value": data.metric_value, "metric_format": data.metric_format}
|
||||
"metric_value": data.metric_value, "metric_format": data.metric_format,
|
||||
"config": json.dumps(data.config.dict())}
|
||||
for i, s in enumerate(data.series):
|
||||
prefix = "u_"
|
||||
if s.index is None:
|
||||
|
|
@ -316,7 +317,8 @@ def update(metric_id, user_id, project_id, data: schemas.UpdateCustomMetricsSche
|
|||
view_type= %(view_type)s, metric_type= %(metric_type)s,
|
||||
metric_of= %(metric_of)s, metric_value= %(metric_value)s,
|
||||
metric_format= %(metric_format)s,
|
||||
edited_at = timezone('utc'::text, now())
|
||||
edited_at = timezone('utc'::text, now()),
|
||||
default_config = %(config)s
|
||||
WHERE metric_id = %(metric_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND (user_id = %(user_id)s OR is_public)
|
||||
|
|
|
|||
|
|
@ -111,6 +111,8 @@ def get_dashboard(project_id, user_id, dashboard_id):
|
|||
for w in row["widgets"]:
|
||||
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
|
||||
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
|
||||
w["config"]["col"] = w["default_config"]["col"]
|
||||
w["config"]["row"] = w["default_config"]["row"]
|
||||
for s in w["series"]:
|
||||
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
|
||||
return helper.dict_to_camel_case(row)
|
||||
|
|
|
|||
|
|
@ -83,6 +83,19 @@ def __process_tags(row):
|
|||
def get_details(project_id, error_id, user_id, **data):
|
||||
pg_sub_query24 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size24")
|
||||
pg_sub_query24.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_session = __get_basic_constraints(time_constraint=True, chart=False,
|
||||
startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="sessions.project_id")
|
||||
pg_sub_query30_session.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_session.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_session.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err = __get_basic_constraints(time_constraint=True, chart=False, startTime_arg_name="startDate30",
|
||||
endTime_arg_name="endDate30", project_key="errors.project_id")
|
||||
pg_sub_query30_err.append("sessions.project_id = %(project_id)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts >= %(startDate30)s")
|
||||
pg_sub_query30_err.append("sessions.start_ts <= %(endDate30)s")
|
||||
pg_sub_query30_err.append("error_id = %(error_id)s")
|
||||
pg_sub_query30_err.append("source ='js_exception'")
|
||||
pg_sub_query30 = __get_basic_constraints(time_constraint=False, chart=True, step_size_name="step_size30")
|
||||
pg_sub_query30.append("error_id = %(error_id)s")
|
||||
pg_basic_query = __get_basic_constraints(time_constraint=False)
|
||||
|
|
@ -121,50 +134,49 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
device_partition,
|
||||
country_partition,
|
||||
chart24,
|
||||
chart30
|
||||
chart30,
|
||||
custom_tags
|
||||
FROM (SELECT error_id,
|
||||
name,
|
||||
message,
|
||||
COUNT(DISTINCT user_uuid) AS users,
|
||||
COUNT(DISTINCT user_id) AS users,
|
||||
COUNT(DISTINCT session_id) AS sessions
|
||||
FROM public.errors
|
||||
INNER JOIN events.errors AS s_errors USING (error_id)
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE error_id = %(error_id)s
|
||||
WHERE {" AND ".join(pg_sub_query30_err)}
|
||||
GROUP BY error_id, name, message) AS details
|
||||
INNER JOIN (SELECT error_id,
|
||||
MAX(timestamp) AS last_occurrence,
|
||||
INNER JOIN (SELECT MAX(timestamp) AS last_occurrence,
|
||||
MIN(timestamp) AS first_occurrence
|
||||
FROM events.errors
|
||||
WHERE error_id = %(error_id)s
|
||||
GROUP BY error_id) AS time_details USING (error_id)
|
||||
INNER JOIN (SELECT error_id,
|
||||
session_id AS last_session_id,
|
||||
user_os,
|
||||
user_os_version,
|
||||
user_browser,
|
||||
user_browser_version,
|
||||
user_device,
|
||||
user_device_type,
|
||||
user_uuid
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
GROUP BY error_id) AS time_details ON (TRUE)
|
||||
INNER JOIN (SELECT session_id AS last_session_id,
|
||||
coalesce(custom_tags, '[]')::jsonb AS custom_tags
|
||||
FROM events.errors
|
||||
LEFT JOIN LATERAL (
|
||||
SELECT jsonb_agg(jsonb_build_object(errors_tags.key, errors_tags.value)) AS custom_tags
|
||||
FROM errors_tags
|
||||
WHERE errors_tags.error_id = %(error_id)s
|
||||
AND errors_tags.session_id = errors.session_id
|
||||
AND errors_tags.message_id = errors.message_id) AS errors_tags ON (TRUE)
|
||||
WHERE error_id = %(error_id)s
|
||||
ORDER BY errors.timestamp DESC
|
||||
LIMIT 1) AS last_session_details USING (error_id)
|
||||
LIMIT 1) AS last_session_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(browser_details) AS browsers_partition
|
||||
FROM (SELECT *
|
||||
FROM (SELECT user_browser AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_browser
|
||||
ORDER BY count DESC) AS count_per_browser_query
|
||||
INNER JOIN LATERAL (SELECT JSONB_AGG(version_details) AS partition
|
||||
FROM (SELECT user_browser_version AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_browser = count_per_browser_query.name
|
||||
GROUP BY user_browser_version
|
||||
ORDER BY count DESC) AS version_details
|
||||
|
|
@ -174,13 +186,13 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
FROM (SELECT user_os AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_os
|
||||
ORDER BY count DESC) AS count_per_os_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_version_details) AS partition
|
||||
FROM (SELECT COALESCE(user_os_version,'unknown') AS version, COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_os = count_per_os_details.name
|
||||
GROUP BY user_os_version
|
||||
ORDER BY count DESC) AS count_per_version_details
|
||||
|
|
@ -191,7 +203,7 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
FROM (SELECT user_device_type AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_device_type
|
||||
ORDER BY count DESC) AS count_per_device_details
|
||||
INNER JOIN LATERAL (SELECT jsonb_agg(count_per_device_v_details) AS partition
|
||||
|
|
@ -201,7 +213,7 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
ELSE user_device END AS version,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
AND sessions.user_device_type = count_per_device_details.name
|
||||
GROUP BY user_device
|
||||
ORDER BY count DESC) AS count_per_device_v_details
|
||||
|
|
@ -211,7 +223,7 @@ def get_details(project_id, error_id, user_id, **data):
|
|||
FROM (SELECT user_country AS name,
|
||||
COUNT(session_id) AS count
|
||||
FROM events.errors INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_basic_query)}
|
||||
WHERE {" AND ".join(pg_sub_query30_session)}
|
||||
GROUP BY user_country
|
||||
ORDER BY count DESC) AS count_per_country_details) AS country_details ON (TRUE)
|
||||
INNER JOIN (SELECT jsonb_agg(chart_details) AS chart24
|
||||
|
|
@ -709,36 +721,6 @@ def __status_rank(status):
|
|||
}.get(status)
|
||||
|
||||
|
||||
def merge(error_ids):
|
||||
error_ids = list(set(error_ids))
|
||||
errors = get_batch(error_ids)
|
||||
if len(error_ids) <= 1 or len(error_ids) > len(errors):
|
||||
return {"errors": ["invalid list of ids"]}
|
||||
error_ids = [e["errorId"] for e in errors]
|
||||
parent_error_id = error_ids[0]
|
||||
status = "unresolved"
|
||||
for e in errors:
|
||||
if __status_rank(status) < __status_rank(e["status"]):
|
||||
status = e["status"]
|
||||
if __status_rank(status) == MAX_RANK:
|
||||
break
|
||||
params = {
|
||||
"error_ids": tuple(error_ids),
|
||||
"parent_error_id": parent_error_id,
|
||||
"status": status
|
||||
}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
"""UPDATE public.errors
|
||||
SET parent_error_id = %(parent_error_id)s, status = %(status)s
|
||||
WHERE error_id IN %(error_ids)s OR parent_error_id IN %(error_ids)s;""",
|
||||
params)
|
||||
cur.execute(query=query)
|
||||
# row = cur.fetchone()
|
||||
|
||||
return {"data": "success"}
|
||||
|
||||
|
||||
def format_first_stack_frame(error):
|
||||
error["stack"] = sourcemaps.format_payload(error.pop("payload"), truncate_to_first=True)
|
||||
for s in error["stack"]:
|
||||
|
|
|
|||
|
|
@ -144,7 +144,7 @@ def execute_jobs():
|
|||
)
|
||||
|
||||
sessions.delete_sessions_by_session_ids(session_ids)
|
||||
sessions_mobs.delete_mobs(session_ids)
|
||||
sessions_mobs.delete_mobs(session_ids=session_ids, project_id=job["projectId"])
|
||||
else:
|
||||
raise Exception(f"The action {job['action']} not supported.")
|
||||
|
||||
|
|
|
|||
|
|
@ -1,7 +1,6 @@
|
|||
# from elasticsearch import Elasticsearch, RequestsHttpConnection
|
||||
from elasticsearch import Elasticsearch
|
||||
from chalicelib.core import log_tools
|
||||
import base64
|
||||
import logging
|
||||
|
||||
logging.getLogger('elasticsearch').level = logging.ERROR
|
||||
|
|
|
|||
|
|
@ -1,7 +1,7 @@
|
|||
import re
|
||||
|
||||
from chalicelib.core import projects
|
||||
from chalicelib.utils import pg_client, dev
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
MAX_INDEXES = 10
|
||||
|
||||
|
|
|
|||
|
|
@ -419,7 +419,7 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|||
pg_sub_query_chart = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=True, data=args)
|
||||
pg_sub_query_chart.append("resources.type = 'img'")
|
||||
pg_sub_query_chart.append("resources.url = top_img.url")
|
||||
pg_sub_query_chart.append("resources.url_hostpath = top_img.url_hostpath")
|
||||
|
||||
pg_sub_query_subset = __get_constraints(project_id=project_id, time_constraint=True,
|
||||
chart=False, data=args)
|
||||
|
|
@ -431,13 +431,13 @@ def get_slowest_images(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT *
|
||||
FROM (SELECT resources.url,
|
||||
FROM (SELECT resources.url_hostpath,
|
||||
COALESCE(AVG(resources.duration), 0) AS avg_duration,
|
||||
COUNT(resources.session_id) AS sessions_count
|
||||
FROM events.resources
|
||||
INNER JOIN sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query_subset)}
|
||||
GROUP BY resources.url
|
||||
GROUP BY resources.url_hostpath
|
||||
ORDER BY avg_duration DESC
|
||||
LIMIT 10) AS top_img
|
||||
LEFT JOIN LATERAL (
|
||||
|
|
@ -485,13 +485,13 @@ def get_performance(project_id, startTimestamp=TimeUTC.now(delta_days=-1), endTi
|
|||
if resources and len(resources) > 0:
|
||||
for r in resources:
|
||||
if r["type"] == "IMG":
|
||||
img_constraints.append(f"resources.url = %(val_{len(img_constraints)})s")
|
||||
img_constraints.append(f"resources.url_hostpath = %(val_{len(img_constraints)})s")
|
||||
img_constraints_vals["val_" + str(len(img_constraints) - 1)] = r['value']
|
||||
elif r["type"] == "LOCATION":
|
||||
location_constraints.append(f"pages.path = %(val_{len(location_constraints)})s")
|
||||
location_constraints_vals["val_" + str(len(location_constraints) - 1)] = r['value']
|
||||
else:
|
||||
request_constraints.append(f"resources.url = %(val_{len(request_constraints)})s")
|
||||
request_constraints.append(f"resources.url_hostpath = %(val_{len(request_constraints)})s")
|
||||
request_constraints_vals["val_" + str(len(request_constraints) - 1)] = r['value']
|
||||
params = {"step_size": step_size, "project_id": project_id, "startTimestamp": startTimestamp,
|
||||
"endTimestamp": endTimestamp}
|
||||
|
|
@ -627,12 +627,12 @@ def search(text, resource_type, project_id, performance=False, pages_only=False,
|
|||
pg_sub_query.append("url_hostpath ILIKE %(value)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""SELECT key, value
|
||||
FROM ( SELECT DISTINCT ON (url) ROW_NUMBER() OVER (PARTITION BY type ORDER BY url) AS r,
|
||||
url AS value,
|
||||
FROM ( SELECT DISTINCT ON (url_hostpath) ROW_NUMBER() OVER (PARTITION BY type ORDER BY url_hostpath) AS r,
|
||||
url_hostpath AS value,
|
||||
type AS key
|
||||
FROM events.resources INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
ORDER BY url, type ASC) AS ranked_values
|
||||
ORDER BY url_hostpath, type ASC) AS ranked_values
|
||||
WHERE ranked_values.r<=5;"""
|
||||
cur.execute(cur.mogrify(pg_query, {"project_id": project_id, "value": helper.string_to_sql_like(text)}))
|
||||
rows = cur.fetchall()
|
||||
|
|
@ -893,7 +893,7 @@ def get_resources_loading_time(project_id, startTimestamp=TimeUTC.now(delta_days
|
|||
if type is not None:
|
||||
pg_sub_query_subset.append(f"resources.type = '{__get_resource_db_type_from_type(type)}'")
|
||||
if url is not None:
|
||||
pg_sub_query_subset.append(f"resources.url = %(value)s")
|
||||
pg_sub_query_subset.append(f"resources.url_hostpath = %(value)s")
|
||||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
pg_query = f"""WITH resources AS (SELECT resources.duration, timestamp
|
||||
|
|
@ -1009,7 +1009,7 @@ def get_slowest_resources(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
|
|||
ORDER BY avg DESC
|
||||
LIMIT 10) AS main_list
|
||||
INNER JOIN LATERAL (
|
||||
SELECT url, type
|
||||
SELECT url_hostpath AS url, type
|
||||
FROM events.resources
|
||||
INNER JOIN public.sessions USING (session_id)
|
||||
WHERE {" AND ".join(pg_sub_query)}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,7 @@ import json
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import users
|
||||
from chalicelib.utils import pg_client, helper, dev
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
|
|
@ -48,7 +48,7 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
if gdpr:
|
||||
extra_projection += ',s.gdpr'
|
||||
if recorded:
|
||||
extra_projection += """, COALESCE(nullif(EXTRACT(EPOCH FROM s.first_recorded_session_at) * 1000, NULL)::BIGINT,
|
||||
extra_projection += """,COALESCE(nullif(EXTRACT(EPOCH FROM s.first_recorded_session_at) * 1000, NULL)::BIGINT,
|
||||
(SELECT MIN(sessions.start_ts)
|
||||
FROM public.sessions
|
||||
WHERE sessions.project_id = s.project_id
|
||||
|
|
@ -76,19 +76,21 @@ def get_projects(tenant_id, recording_state=False, gdpr=None, recorded=False, st
|
|||
rows = cur.fetchall()
|
||||
# if recorded is requested, check if it was saved or computed
|
||||
if recorded:
|
||||
for r in rows:
|
||||
u_values = []
|
||||
params = {}
|
||||
for i, r in enumerate(rows):
|
||||
if r["first_recorded_session_at"] is None:
|
||||
extra_update = ""
|
||||
if r["recorded"]:
|
||||
extra_update = ", first_recorded_session_at=to_timestamp(%(first_recorded)s/1000)"
|
||||
query = cur.mogrify(f"""UPDATE public.projects
|
||||
SET sessions_last_check_at=(now() at time zone 'utc')
|
||||
{extra_update}
|
||||
WHERE project_id=%(project_id)s""",
|
||||
{"project_id": r["project_id"], "first_recorded": r["first_recorded"]})
|
||||
cur.execute(query)
|
||||
u_values.append(f"(%(project_id_{i})s,to_timestamp(%(first_recorded_{i})s/1000))")
|
||||
params[f"project_id_{i}"] = r["project_id"]
|
||||
params[f"first_recorded_{i}"] = r["first_recorded"] if r["recorded"] else None
|
||||
r.pop("first_recorded_session_at")
|
||||
r.pop("first_recorded")
|
||||
if len(u_values) > 0:
|
||||
query = cur.mogrify(f"""UPDATE public.projects
|
||||
SET sessions_last_check_at=(now() at time zone 'utc'), first_recorded_session_at=u.first_recorded
|
||||
FROM (VALUES {",".join(u_values)}) AS u(project_id,first_recorded)
|
||||
WHERE projects.project_id=u.project_id;""", params)
|
||||
cur.execute(query)
|
||||
|
||||
if recording_state and len(rows) > 0:
|
||||
project_ids = [f'({r["project_id"]})' for r in rows]
|
||||
|
|
|
|||
|
|
@ -11,11 +11,7 @@ def reset(data: schemas.ForgetPasswordPayloadSchema):
|
|||
if not helper.has_smtp():
|
||||
return {"errors": ["no SMTP configuration found, you can ask your admin to reset your password"]}
|
||||
a_users = users.get_by_email_only(data.email)
|
||||
if len(a_users) > 1:
|
||||
print(f"multiple users found for [{data.email}] please contact our support")
|
||||
return {"errors": ["multiple users, please contact our support"]}
|
||||
elif len(a_users) == 1:
|
||||
a_users = a_users[0]
|
||||
if a_users:
|
||||
invitation_link = users.generate_new_invitation(user_id=a_users["id"])
|
||||
email_helper.send_forgot_password(recipient=data.email, invitation_link=invitation_link)
|
||||
else:
|
||||
|
|
|
|||
|
|
@ -2,7 +2,8 @@ from typing import List
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, events_ios, \
|
||||
sessions_mobs, issues, projects, errors, resources, assist, performance_event, sessions_viewed, sessions_favorite
|
||||
sessions_mobs, issues, projects, errors, resources, assist, performance_event, sessions_viewed, sessions_favorite, \
|
||||
sessions_devtool, sessions_notes
|
||||
from chalicelib.utils import pg_client, helper, metrics_helper
|
||||
|
||||
SESSION_PROJECTION_COLS = """s.project_id,
|
||||
|
|
@ -39,8 +40,8 @@ def __group_metadata(session, project_metadata):
|
|||
return meta
|
||||
|
||||
|
||||
def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_viewed=False, group_metadata=False,
|
||||
live=True):
|
||||
def get_by_id2_pg(project_id, session_id, context: schemas.CurrentContext, full_data=False, include_fav_viewed=False,
|
||||
group_metadata=False, live=True):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
extra_query = []
|
||||
if include_fav_viewed:
|
||||
|
|
@ -63,7 +64,7 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
|
|||
FROM public.sessions AS s {"INNER JOIN public.projects AS p USING (project_id)" if group_metadata else ""}
|
||||
WHERE s.project_id = %(project_id)s
|
||||
AND s.session_id = %(session_id)s;""",
|
||||
{"project_id": project_id, "session_id": session_id, "userId": user_id}
|
||||
{"project_id": project_id, "session_id": session_id, "userId": context.user_id}
|
||||
)
|
||||
# print("===============")
|
||||
# print(query)
|
||||
|
|
@ -81,27 +82,29 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
|
|||
data['crashes'] = events_ios.get_crashes_by_session_id(session_id=session_id)
|
||||
data['userEvents'] = events_ios.get_customs_by_sessionId(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(sessionId=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_ios(session_id=session_id)
|
||||
else:
|
||||
data['events'] = events.get_by_sessionId2_pg(project_id=project_id, session_id=session_id,
|
||||
group_clickrage=True)
|
||||
all_errors = events.get_errors_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['stackEvents'] = [e for e in all_errors if e['source'] != "js_exception"]
|
||||
# to keep only the first stack
|
||||
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors if
|
||||
e['source'] == "js_exception"][
|
||||
:500] # limit the number of errors to reduce the response-body size
|
||||
# limit the number of errors to reduce the response-body size
|
||||
data['errors'] = [errors.format_first_stack_frame(e) for e in all_errors
|
||||
if e['source'] == "js_exception"][:500]
|
||||
data['userEvents'] = events.get_customs_by_sessionId2_pg(project_id=project_id,
|
||||
session_id=session_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_web(sessionId=session_id)
|
||||
data['domURL'] = sessions_mobs.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['mobsUrl'] = sessions_mobs.get_urls_depercated(session_id=session_id)
|
||||
data['devtoolsURL'] = sessions_devtool.get_urls(session_id=session_id, project_id=project_id)
|
||||
data['resources'] = resources.get_by_session_id(session_id=session_id, project_id=project_id,
|
||||
start_ts=data["startTs"],
|
||||
duration=data["duration"])
|
||||
start_ts=data["startTs"], duration=data["duration"])
|
||||
|
||||
data['notes'] = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=project_id,
|
||||
session_id=session_id, user_id=context.user_id)
|
||||
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
|
||||
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
|
||||
data['live'] = live and assist.is_live(project_id=project_id,
|
||||
session_id=session_id,
|
||||
data['live'] = live and assist.is_live(project_id=project_id, session_id=session_id,
|
||||
project_key=data["projectKey"])
|
||||
data["inDB"] = True
|
||||
return data
|
||||
|
|
@ -174,7 +177,7 @@ def _isUndefined_operator(op: schemas.SearchEventOperator):
|
|||
|
||||
# This function executes the query and return result
|
||||
def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, errors_only=False,
|
||||
error_status=schemas.ErrorStatus.all, count_only=False, issue=None):
|
||||
error_status=schemas.ErrorStatus.all, count_only=False, issue=None, ids_only=False):
|
||||
if data.bookmarked:
|
||||
data.startDate, data.endDate = sessions_favorite.get_start_end_timestamp(project_id, user_id)
|
||||
|
||||
|
|
@ -182,9 +185,11 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
favorite_only=data.bookmarked, issue=issue, project_id=project_id,
|
||||
user_id=user_id)
|
||||
if data.limit is not None and data.page is not None:
|
||||
full_args["sessions_limit"] = data.limit
|
||||
full_args["sessions_limit_s"] = (data.page - 1) * data.limit
|
||||
full_args["sessions_limit_e"] = data.page * data.limit
|
||||
else:
|
||||
full_args["sessions_limit"] = 200
|
||||
full_args["sessions_limit_s"] = 1
|
||||
full_args["sessions_limit_e"] = 200
|
||||
|
||||
|
|
@ -232,6 +237,12 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
GROUP BY user_id
|
||||
) AS users_sessions;""",
|
||||
full_args)
|
||||
elif ids_only:
|
||||
main_query = cur.mogrify(f"""SELECT DISTINCT ON(s.session_id) s.session_id
|
||||
{query_part}
|
||||
ORDER BY s.session_id desc
|
||||
LIMIT %(sessions_limit)s OFFSET %(sessions_limit_s)s;""",
|
||||
full_args)
|
||||
else:
|
||||
if data.order is None:
|
||||
data.order = schemas.SortOrderType.desc
|
||||
|
|
@ -239,7 +250,6 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
if data.sort is not None and data.sort != "session_id":
|
||||
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
|
||||
sort = helper.key_to_snake_case(data.sort)
|
||||
|
||||
meta_keys = metadata.get(project_id=project_id)
|
||||
main_query = cur.mogrify(f"""SELECT COUNT(full_sessions) AS count,
|
||||
COALESCE(JSONB_AGG(full_sessions)
|
||||
|
|
@ -263,7 +273,7 @@ def search_sessions(data: schemas.SessionsSearchPayloadSchema, project_id, user_
|
|||
print(data.json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
if errors_only:
|
||||
if errors_only or ids_only:
|
||||
return helper.list_to_camel_case(cur.fetchall())
|
||||
|
||||
sessions = cur.fetchone()
|
||||
|
|
@ -329,7 +339,15 @@ def search2_series(data: schemas.SessionsSearchPayloadSchema, project_id: int, d
|
|||
# print("--------------------")
|
||||
# print(main_query)
|
||||
# print("--------------------")
|
||||
cur.execute(main_query)
|
||||
try:
|
||||
cur.execute(main_query)
|
||||
except Exception as err:
|
||||
print("--------- SESSIONS-SERIES QUERY EXCEPTION -----------")
|
||||
print(main_query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(data.json())
|
||||
print("--------------------")
|
||||
raise err
|
||||
if view_type == schemas.MetricTimeseriesViewType.line_chart:
|
||||
sessions = cur.fetchall()
|
||||
else:
|
||||
|
|
@ -1237,3 +1255,15 @@ def count_all():
|
|||
with pg_client.PostgresClient(unlimited_query=True) as cur:
|
||||
row = cur.execute(query="SELECT COUNT(session_id) AS count FROM public.sessions")
|
||||
return row.get("count", 0)
|
||||
|
||||
|
||||
def session_exists(project_id, session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""SELECT 1
|
||||
FROM public.sessions
|
||||
WHERE session_id=%(session_id)s
|
||||
AND project_id=%(project_id)s""",
|
||||
{"project_id": project_id, "session_id": session_id})
|
||||
cur.execute(query)
|
||||
row = cur.fetchone()
|
||||
return row is not None
|
||||
|
|
|
|||
|
|
@ -23,7 +23,8 @@ def __get_saved_data(project_id, session_id, issue_id, tool):
|
|||
return helper.dict_to_camel_case(cur.fetchone())
|
||||
|
||||
|
||||
def create_new_assignment(tenant_id, project_id, session_id, creator_id, assignee, description, title, issue_type, integration_project_id):
|
||||
def create_new_assignment(tenant_id, project_id, session_id, creator_id, assignee, description, title, issue_type,
|
||||
integration_project_id):
|
||||
error, integration = integrations_manager.get_integration(tenant_id=tenant_id, user_id=creator_id)
|
||||
if error is not None:
|
||||
return error
|
||||
|
|
@ -40,7 +41,7 @@ def create_new_assignment(tenant_id, project_id, session_id, creator_id, assigne
|
|||
integration_project_id=integration_project_id)
|
||||
except integration_base_issue.RequestException as e:
|
||||
return integration_base_issue.proxy_issues_handler(e)
|
||||
if issue is not None and "id" not in issue:
|
||||
if issue is None or "id" not in issue:
|
||||
return {"errors": ["something went wrong while creating the issue"]}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify("""\
|
||||
|
|
|
|||
24
api/chalicelib/core/sessions_devtool.py
Normal file
24
api/chalicelib/core/sessions_devtool.py
Normal file
|
|
@ -0,0 +1,24 @@
|
|||
from decouple import config
|
||||
|
||||
from chalicelib.utils import s3
|
||||
|
||||
|
||||
def __get_devtools_keys(project_id, session_id):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id
|
||||
}
|
||||
return [
|
||||
config("DEVTOOLS_MOB_PATTERN", default="%(sessionId)sdevtools") % params
|
||||
]
|
||||
|
||||
|
||||
def get_urls(session_id, project_id):
|
||||
results = []
|
||||
for k in __get_devtools_keys(project_id=project_id, session_id=session_id):
|
||||
results.append(s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={'Bucket': config("sessions_bucket"), 'Key': k},
|
||||
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
|
||||
))
|
||||
return results
|
||||
|
|
@ -1,37 +1,39 @@
|
|||
import schemas
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.utils import pg_client
|
||||
|
||||
|
||||
def add_favorite_session(project_id, user_id, session_id):
|
||||
def add_favorite_session(context: schemas.CurrentContext, project_id, session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
INSERT INTO public.user_favorite_sessions(user_id, session_id)
|
||||
VALUES (%(userId)s,%(sessionId)s);""",
|
||||
{"userId": user_id, "sessionId": session_id})
|
||||
VALUES (%(userId)s,%(session_id)s);""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
return sessions.get_by_id2_pg(project_id=project_id, session_id=session_id, user_id=user_id, full_data=False,
|
||||
include_fav_viewed=True)
|
||||
return sessions.get_by_id2_pg(context=context, project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True)
|
||||
|
||||
|
||||
def remove_favorite_session(project_id, user_id, session_id):
|
||||
def remove_favorite_session(context: schemas.CurrentContext, project_id, session_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""\
|
||||
DELETE FROM public.user_favorite_sessions
|
||||
WHERE user_id = %(userId)s
|
||||
AND session_id = %(sessionId)s;""",
|
||||
{"userId": user_id, "sessionId": session_id})
|
||||
AND session_id = %(session_id)s;""",
|
||||
{"userId": context.user_id, "session_id": session_id})
|
||||
)
|
||||
return sessions.get_by_id2_pg(project_id=project_id, session_id=session_id, user_id=user_id, full_data=False,
|
||||
include_fav_viewed=True)
|
||||
return sessions.get_by_id2_pg(context=context, project_id=project_id, session_id=session_id,
|
||||
full_data=False, include_fav_viewed=True)
|
||||
|
||||
|
||||
def favorite_session(project_id, user_id, session_id):
|
||||
if favorite_session_exists(user_id=user_id, session_id=session_id):
|
||||
return remove_favorite_session(project_id=project_id, user_id=user_id, session_id=session_id)
|
||||
def favorite_session(context: schemas.CurrentContext, project_id, session_id):
|
||||
if favorite_session_exists(user_id=context.user_id, session_id=session_id):
|
||||
return remove_favorite_session(context=context, project_id=project_id,
|
||||
session_id=session_id)
|
||||
|
||||
return add_favorite_session(project_id=project_id, user_id=user_id, session_id=session_id)
|
||||
return add_favorite_session(context=context, project_id=project_id, session_id=session_id)
|
||||
|
||||
|
||||
def favorite_session_exists(user_id, session_id):
|
||||
|
|
@ -42,8 +44,8 @@ def favorite_session_exists(user_id, session_id):
|
|||
FROM public.user_favorite_sessions
|
||||
WHERE
|
||||
user_id = %(userId)s
|
||||
AND session_id = %(sessionId)s""",
|
||||
{"userId": user_id, "sessionId": session_id})
|
||||
AND session_id = %(session_id)s""",
|
||||
{"userId": user_id, "session_id": session_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
return r is not None
|
||||
|
|
@ -61,4 +63,4 @@ def get_start_end_timestamp(project_id, user_id):
|
|||
{"userId": user_id, "project_id": project_id})
|
||||
)
|
||||
r = cur.fetchone()
|
||||
return (0, 0) if r is None else (r["max_start_ts"], r["min_start_ts"])
|
||||
return (0, 0) if r is None else (r["min_start_ts"], r["max_start_ts"])
|
||||
|
|
|
|||
|
|
@ -1,40 +1,57 @@
|
|||
from decouple import config
|
||||
|
||||
from chalicelib.utils import s3
|
||||
from chalicelib.utils.s3 import client
|
||||
|
||||
|
||||
def get_web(sessionId):
|
||||
def __get_mob_keys(project_id, session_id):
|
||||
params = {
|
||||
"sessionId": session_id,
|
||||
"projectId": project_id
|
||||
}
|
||||
return [
|
||||
client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("sessions_bucket"),
|
||||
'Key': str(sessionId)
|
||||
},
|
||||
ExpiresIn=100000
|
||||
),
|
||||
client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("sessions_bucket"),
|
||||
'Key': str(sessionId) + "e"
|
||||
},
|
||||
ExpiresIn=100000
|
||||
)]
|
||||
config("SESSION_MOB_PATTERN_S", default="%(sessionId)s") % params,
|
||||
config("SESSION_MOB_PATTERN_E", default="%(sessionId)se") % params
|
||||
]
|
||||
|
||||
|
||||
def get_ios(sessionId):
|
||||
return client.generate_presigned_url(
|
||||
def __get_mob_keys_deprecated(session_id):
|
||||
return [str(session_id), str(session_id) + "e"]
|
||||
|
||||
|
||||
def get_urls(project_id, session_id):
|
||||
results = []
|
||||
for k in __get_mob_keys(project_id=project_id, session_id=session_id):
|
||||
results.append(s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={'Bucket': config("sessions_bucket"), 'Key': k},
|
||||
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
|
||||
))
|
||||
return results
|
||||
|
||||
|
||||
def get_urls_depercated(session_id):
|
||||
results = []
|
||||
for k in __get_mob_keys_deprecated(session_id=session_id):
|
||||
results.append(s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={'Bucket': config("sessions_bucket"), 'Key': k},
|
||||
ExpiresIn=100000
|
||||
))
|
||||
return results
|
||||
|
||||
|
||||
def get_ios(session_id):
|
||||
return s3.client.generate_presigned_url(
|
||||
'get_object',
|
||||
Params={
|
||||
'Bucket': config("ios_bucket"),
|
||||
'Key': str(sessionId)
|
||||
'Key': str(session_id)
|
||||
},
|
||||
ExpiresIn=100000
|
||||
ExpiresIn=config("PRESIGNED_URL_EXPIRATION", cast=int, default=900)
|
||||
)
|
||||
|
||||
|
||||
def delete_mobs(session_ids):
|
||||
def delete_mobs(project_id, session_ids):
|
||||
for session_id in session_ids:
|
||||
s3.schedule_for_deletion(config("sessions_bucket"), session_id)
|
||||
for k in __get_mob_keys(project_id=project_id, session_id=session_id):
|
||||
s3.schedule_for_deletion(config("sessions_bucket"), k)
|
||||
|
|
|
|||
166
api/chalicelib/core/sessions_notes.py
Normal file
166
api/chalicelib/core/sessions_notes.py
Normal file
|
|
@ -0,0 +1,166 @@
|
|||
from urllib.parse import urljoin
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.utils import pg_client, helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
|
||||
def get_note(tenant_id, project_id, user_id, note_id, share=None):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT sessions_notes.*, users.name AS creator_name
|
||||
{",(SELECT name FROM users WHERE user_id=%(share)s AND deleted_at ISNULL) AS share_name" if share else ""}
|
||||
FROM sessions_notes INNER JOIN users USING (user_id)
|
||||
WHERE sessions_notes.project_id = %(project_id)s
|
||||
AND sessions_notes.note_id = %(note_id)s
|
||||
AND sessions_notes.deleted_at IS NULL
|
||||
AND (sessions_notes.user_id = %(user_id)s OR sessions_notes.is_public);""",
|
||||
{"project_id": project_id, "user_id": user_id, "tenant_id": tenant_id,
|
||||
"note_id": note_id, "share": share})
|
||||
|
||||
cur.execute(query=query)
|
||||
row = cur.fetchone()
|
||||
row = helper.dict_to_camel_case(row)
|
||||
if row:
|
||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||
return row
|
||||
|
||||
|
||||
def get_session_notes(tenant_id, project_id, session_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""SELECT sessions_notes.*
|
||||
FROM sessions_notes
|
||||
WHERE sessions_notes.project_id = %(project_id)s
|
||||
AND sessions_notes.deleted_at IS NULL
|
||||
AND sessions_notes.session_id = %(session_id)s
|
||||
AND (sessions_notes.user_id = %(user_id)s
|
||||
OR sessions_notes.is_public)
|
||||
ORDER BY created_at DESC;""",
|
||||
{"project_id": project_id, "user_id": user_id,
|
||||
"tenant_id": tenant_id, "session_id": session_id})
|
||||
|
||||
cur.execute(query=query)
|
||||
rows = cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
for row in rows:
|
||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||
return rows
|
||||
|
||||
|
||||
def get_all_notes_by_project_id(tenant_id, project_id, user_id, data: schemas.SearchNoteSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
conditions = ["sessions_notes.project_id = %(project_id)s", "sessions_notes.deleted_at IS NULL"]
|
||||
extra_params = {}
|
||||
if data.tags and len(data.tags) > 0:
|
||||
k = "tag_value"
|
||||
conditions.append(
|
||||
sessions._multiple_conditions(f"%({k})s = sessions_notes.tag", data.tags, value_key=k))
|
||||
extra_params = sessions._multiple_values(data.tags, value_key=k)
|
||||
if data.shared_only:
|
||||
conditions.append("sessions_notes.is_public")
|
||||
elif data.mine_only:
|
||||
conditions.append("sessions_notes.user_id = %(user_id)s")
|
||||
else:
|
||||
conditions.append("(sessions_notes.user_id = %(user_id)s OR sessions_notes.is_public)")
|
||||
query = cur.mogrify(f"""SELECT sessions_notes.*
|
||||
FROM sessions_notes
|
||||
WHERE {" AND ".join(conditions)}
|
||||
ORDER BY created_at {data.order}
|
||||
LIMIT {data.limit} OFFSET {data.limit * (data.page - 1)};""",
|
||||
{"project_id": project_id, "user_id": user_id, "tenant_id": tenant_id, **extra_params})
|
||||
|
||||
cur.execute(query=query)
|
||||
rows = cur.fetchall()
|
||||
rows = helper.list_to_camel_case(rows)
|
||||
for row in rows:
|
||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||
return rows
|
||||
|
||||
|
||||
def create(tenant_id, user_id, project_id, session_id, data: schemas.SessionNoteSchema):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(f"""INSERT INTO public.sessions_notes (message, user_id, tag, session_id, project_id, timestamp, is_public)
|
||||
VALUES (%(message)s, %(user_id)s, %(tag)s, %(session_id)s, %(project_id)s, %(timestamp)s, %(is_public)s)
|
||||
RETURNING *;""",
|
||||
{"user_id": user_id, "project_id": project_id, "session_id": session_id, **data.dict()})
|
||||
cur.execute(query)
|
||||
result = helper.dict_to_camel_case(cur.fetchone())
|
||||
if result:
|
||||
result["createdAt"] = TimeUTC.datetime_to_timestamp(result["createdAt"])
|
||||
return result
|
||||
|
||||
|
||||
def edit(tenant_id, user_id, project_id, note_id, data: schemas.SessionUpdateNoteSchema):
|
||||
sub_query = []
|
||||
if data.message is not None:
|
||||
sub_query.append("message = %(message)s")
|
||||
if data.tag is not None and len(data.tag) > 0:
|
||||
sub_query.append("tag = %(tag)s")
|
||||
if data.is_public is not None:
|
||||
sub_query.append("is_public = %(is_public)s")
|
||||
if data.timestamp is not None:
|
||||
sub_query.append("timestamp = %(timestamp)s")
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(f"""UPDATE public.sessions_notes
|
||||
SET
|
||||
{" ,".join(sub_query)}
|
||||
WHERE
|
||||
project_id = %(project_id)s
|
||||
AND user_id = %(user_id)s
|
||||
AND note_id = %(note_id)s
|
||||
AND deleted_at ISNULL
|
||||
RETURNING *;""",
|
||||
{"project_id": project_id, "user_id": user_id, "note_id": note_id, **data.dict()})
|
||||
)
|
||||
row = helper.dict_to_camel_case(cur.fetchone())
|
||||
if row:
|
||||
row["createdAt"] = TimeUTC.datetime_to_timestamp(row["createdAt"])
|
||||
return row
|
||||
|
||||
|
||||
def delete(tenant_id, user_id, project_id, note_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(""" UPDATE public.sessions_notes
|
||||
SET deleted_at = timezone('utc'::text, now())
|
||||
WHERE note_id = %(note_id)s
|
||||
AND project_id = %(project_id)s
|
||||
AND user_id = %(user_id)s
|
||||
AND deleted_at ISNULL;""",
|
||||
{"project_id": project_id, "user_id": user_id, "note_id": note_id})
|
||||
)
|
||||
return {"data": {"state": "success"}}
|
||||
|
||||
|
||||
def share_to_slack(tenant_id, user_id, project_id, note_id, webhook_id):
|
||||
note = get_note(tenant_id=tenant_id, project_id=project_id, user_id=user_id, note_id=note_id, share=user_id)
|
||||
if note is None:
|
||||
return {"errors": ["Note not found"]}
|
||||
session_url = urljoin(config('SITE_URL'), f"{note['projectId']}/session/{note['sessionId']}")
|
||||
title = f"<{session_url}|Note for session {note['sessionId']}>"
|
||||
|
||||
blocks = [{"type": "section",
|
||||
"fields": [{"type": "mrkdwn",
|
||||
"text": title}]},
|
||||
{"type": "section",
|
||||
"fields": [{"type": "plain_text",
|
||||
"text": note["message"]}]}]
|
||||
if note["tag"]:
|
||||
blocks.append({"type": "context",
|
||||
"elements": [{"type": "plain_text",
|
||||
"text": f"Tag: *{note['tag']}*"}]})
|
||||
bottom = f"Created by {note['creatorName'].capitalize()}"
|
||||
if user_id != note["userId"]:
|
||||
bottom += f"\nSent by {note['shareName']}: "
|
||||
blocks.append({"type": "context",
|
||||
"elements": [{"type": "plain_text",
|
||||
"text": bottom}]})
|
||||
return Slack.send_raw(
|
||||
tenant_id=tenant_id,
|
||||
webhook_id=webhook_id,
|
||||
body={"blocks": blocks}
|
||||
)
|
||||
|
|
@ -5,7 +5,7 @@ def view_session(project_id, user_id, session_id):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""INSERT INTO public.user_viewed_sessions(user_id, session_id)
|
||||
VALUES (%(userId)s,%(sessionId)s)
|
||||
VALUES (%(userId)s,%(session_id)s)
|
||||
ON CONFLICT DO NOTHING;""",
|
||||
{"userId": user_id, "sessionId": session_id})
|
||||
{"userId": user_id, "session_id": session_id})
|
||||
)
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ __maintainer__ = "KRAIEM Taha Yassine"
|
|||
|
||||
import schemas
|
||||
from chalicelib.core import events, metadata, sessions
|
||||
from chalicelib.utils import dev
|
||||
|
||||
"""
|
||||
todo: remove LIMIT from the query
|
||||
|
|
@ -182,9 +181,7 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
values=s["value"], value_key=f"value{i + 1}")
|
||||
n_stages_query.append(f"""
|
||||
(SELECT main.session_id,
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp,
|
||||
'{event_type}' AS type,
|
||||
'{s["operator"]}' AS operator
|
||||
{"MIN(main.timestamp)" if i + 1 < len(stages) else "MAX(main.timestamp)"} AS stage{i + 1}_timestamp
|
||||
FROM {next_table} AS main {" ".join(extra_from)}
|
||||
WHERE main.timestamp >= {f"T{i}.stage{i}_timestamp" if i > 0 else "%(startTimestamp)s"}
|
||||
{f"AND main.session_id=T1.session_id" if i > 0 else ""}
|
||||
|
|
@ -192,45 +189,55 @@ def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
|
|||
{(" AND " + " AND ".join(stage_constraints)) if len(stage_constraints) > 0 else ""}
|
||||
{(" AND " + " AND ".join(first_stage_extra_constraints)) if len(first_stage_extra_constraints) > 0 and i == 0 else ""}
|
||||
GROUP BY main.session_id)
|
||||
AS T{i + 1} {"USING (session_id)" if i > 0 else ""}
|
||||
AS T{i + 1} {"ON (TRUE)" if i > 0 else ""}
|
||||
""")
|
||||
if len(n_stages_query) == 0:
|
||||
n_stages = len(n_stages_query)
|
||||
if n_stages == 0:
|
||||
return []
|
||||
n_stages_query = " LEFT JOIN LATERAL ".join(n_stages_query)
|
||||
n_stages_query += ") AS stages_t"
|
||||
|
||||
n_stages_query = f"""
|
||||
SELECT stages_and_issues_t.*,sessions.session_id, sessions.user_uuid FROM (
|
||||
SELECT stages_and_issues_t.*, sessions.user_uuid
|
||||
FROM (
|
||||
SELECT * FROM (
|
||||
SELECT * FROM
|
||||
{n_stages_query}
|
||||
SELECT T1.session_id, {",".join([f"stage{i + 1}_timestamp" for i in range(n_stages)])}
|
||||
FROM {n_stages_query}
|
||||
LEFT JOIN LATERAL
|
||||
(
|
||||
SELECT * FROM
|
||||
(SELECT ISE.session_id,
|
||||
ISS.type as issue_type,
|
||||
( SELECT ISS.type as issue_type,
|
||||
ISE.timestamp AS issue_timestamp,
|
||||
ISS.context_string as issue_context,
|
||||
COALESCE(ISS.context_string,'') as issue_context,
|
||||
ISS.issue_id as issue_id
|
||||
FROM events_common.issues AS ISE INNER JOIN issues AS ISS USING (issue_id)
|
||||
WHERE ISE.timestamp >= stages_t.stage1_timestamp
|
||||
AND ISE.timestamp <= stages_t.stage{i + 1}_timestamp
|
||||
AND ISS.project_id=%(project_id)s
|
||||
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}) AS base_t
|
||||
) AS issues_t
|
||||
USING (session_id)) AS stages_and_issues_t
|
||||
inner join sessions USING(session_id);
|
||||
AND ISE.session_id = stages_t.session_id
|
||||
AND ISS.type!='custom' -- ignore custom issues because they are massive
|
||||
{"AND ISS.type IN %(issueTypes)s" if len(filter_issues) > 0 else ""}
|
||||
LIMIT 10 -- remove the limit to get exact stats
|
||||
) AS issues_t ON (TRUE)
|
||||
) AS stages_and_issues_t INNER JOIN sessions USING(session_id);
|
||||
"""
|
||||
|
||||
# LIMIT 10000
|
||||
params = {"project_id": project_id, "startTimestamp": filter_d["startDate"], "endTimestamp": filter_d["endDate"],
|
||||
"issueTypes": tuple(filter_issues), **values}
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(n_stages_query, params)
|
||||
# print("---------------------------------------------------")
|
||||
# print(cur.mogrify(n_stages_query, params))
|
||||
# print(query)
|
||||
# print("---------------------------------------------------")
|
||||
cur.execute(cur.mogrify(n_stages_query, params))
|
||||
rows = cur.fetchall()
|
||||
try:
|
||||
cur.execute(query)
|
||||
rows = cur.fetchall()
|
||||
except Exception as err:
|
||||
print("--------- FUNNEL SEARCH QUERY EXCEPTION -----------")
|
||||
print(query.decode('UTF-8'))
|
||||
print("--------- PAYLOAD -----------")
|
||||
print(filter_d)
|
||||
print("--------------------")
|
||||
raise err
|
||||
return rows
|
||||
|
||||
|
||||
|
|
@ -292,7 +299,21 @@ def pearson_corr(x: list, y: list):
|
|||
return r, confidence, False
|
||||
|
||||
|
||||
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_with_context, first_stage, last_stage):
|
||||
# def tuple_or(t: tuple):
|
||||
# x = 0
|
||||
# for el in t:
|
||||
# x |= el # | is for bitwise OR
|
||||
# return x
|
||||
#
|
||||
# The following function is correct optimization of the previous function because t is a list of 0,1
|
||||
def tuple_or(t: tuple):
|
||||
for el in t:
|
||||
if el > 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
|
||||
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues, first_stage, last_stage):
|
||||
"""
|
||||
Returns two lists with binary values 0/1:
|
||||
|
||||
|
|
@ -311,12 +332,6 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
|
|||
transitions = []
|
||||
n_sess_affected = 0
|
||||
errors = {}
|
||||
for issue in all_issues_with_context:
|
||||
split = issue.split('__^__')
|
||||
errors[issue] = {
|
||||
"errors": [],
|
||||
"issue_type": split[0],
|
||||
"context": split[1]}
|
||||
|
||||
for row in rows:
|
||||
t = 0
|
||||
|
|
@ -324,38 +339,26 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
|
|||
last_ts = row[f'stage{last_stage}_timestamp']
|
||||
if first_ts is None:
|
||||
continue
|
||||
elif first_ts is not None and last_ts is not None:
|
||||
elif last_ts is not None:
|
||||
t = 1
|
||||
transitions.append(t)
|
||||
|
||||
ic_present = False
|
||||
for issue_type_with_context in errors:
|
||||
for error_id in all_issues:
|
||||
if error_id not in errors:
|
||||
errors[error_id] = []
|
||||
ic = 0
|
||||
issue_type = errors[issue_type_with_context]["issue_type"]
|
||||
context = errors[issue_type_with_context]["context"]
|
||||
if row['issue_type'] is not None:
|
||||
row_issue_id = row['issue_id']
|
||||
if row_issue_id is not None:
|
||||
if last_ts is None or (first_ts < row['issue_timestamp'] < last_ts):
|
||||
context_in_row = row['issue_context'] if row['issue_context'] is not None else ''
|
||||
if issue_type == row['issue_type'] and context == context_in_row:
|
||||
if error_id == row_issue_id:
|
||||
ic = 1
|
||||
ic_present = True
|
||||
errors[issue_type_with_context]["errors"].append(ic)
|
||||
errors[error_id].append(ic)
|
||||
|
||||
if ic_present and t:
|
||||
n_sess_affected += 1
|
||||
|
||||
# def tuple_or(t: tuple):
|
||||
# x = 0
|
||||
# for el in t:
|
||||
# x |= el
|
||||
# return x
|
||||
def tuple_or(t: tuple):
|
||||
for el in t:
|
||||
if el > 0:
|
||||
return 1
|
||||
return 0
|
||||
|
||||
errors = {key: errors[key]["errors"] for key in errors}
|
||||
all_errors = [tuple_or(t) for t in zip(*errors.values())]
|
||||
|
||||
return transitions, errors, all_errors, n_sess_affected
|
||||
|
|
@ -371,10 +374,9 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
|
|||
"""
|
||||
affected_users = defaultdict(lambda: set())
|
||||
affected_sessions = defaultdict(lambda: set())
|
||||
contexts = defaultdict(lambda: None)
|
||||
all_issues = {}
|
||||
n_affected_users_dict = defaultdict(lambda: None)
|
||||
n_affected_sessions_dict = defaultdict(lambda: None)
|
||||
all_issues_with_context = set()
|
||||
n_issues_dict = defaultdict(lambda: 0)
|
||||
issues_by_session = defaultdict(lambda: 0)
|
||||
|
||||
|
|
@ -390,15 +392,13 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
|
|||
# check that the issue exists and belongs to subfunnel:
|
||||
if iss is not None and (row[f'stage{last_stage}_timestamp'] is None or
|
||||
(row[f'stage{first_stage}_timestamp'] < iss_ts < row[f'stage{last_stage}_timestamp'])):
|
||||
context_string = row['issue_context'] if row['issue_context'] is not None else ''
|
||||
issue_with_context = iss + '__^__' + context_string
|
||||
contexts[issue_with_context] = {"context": context_string, "id": row["issue_id"]}
|
||||
all_issues_with_context.add(issue_with_context)
|
||||
n_issues_dict[issue_with_context] += 1
|
||||
if row["issue_id"] not in all_issues:
|
||||
all_issues[row["issue_id"]] = {"context": row['issue_context'], "issue_type": row["issue_type"]}
|
||||
n_issues_dict[row["issue_id"]] += 1
|
||||
if row['user_uuid'] is not None:
|
||||
affected_users[issue_with_context].add(row['user_uuid'])
|
||||
affected_users[row["issue_id"]].add(row['user_uuid'])
|
||||
|
||||
affected_sessions[issue_with_context].add(row['session_id'])
|
||||
affected_sessions[row["issue_id"]].add(row['session_id'])
|
||||
issues_by_session[row[f'session_id']] += 1
|
||||
|
||||
if len(affected_users) > 0:
|
||||
|
|
@ -409,29 +409,28 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
|
|||
n_affected_sessions_dict.update({
|
||||
iss: len(affected_sessions[iss]) for iss in affected_sessions
|
||||
})
|
||||
return all_issues_with_context, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict, contexts
|
||||
return all_issues, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict
|
||||
|
||||
|
||||
def count_sessions(rows, n_stages):
|
||||
session_counts = {i: set() for i in range(1, n_stages + 1)}
|
||||
for ind, row in enumerate(rows):
|
||||
for row in rows:
|
||||
for i in range(1, n_stages + 1):
|
||||
if row[f"stage{i}_timestamp"] is not None:
|
||||
session_counts[i].add(row[f"session_id"])
|
||||
|
||||
session_counts = {i: len(session_counts[i]) for i in session_counts}
|
||||
return session_counts
|
||||
|
||||
|
||||
def count_users(rows, n_stages):
|
||||
users_in_stages = defaultdict(lambda: set())
|
||||
|
||||
for ind, row in enumerate(rows):
|
||||
users_in_stages = {i: set() for i in range(1, n_stages + 1)}
|
||||
for row in rows:
|
||||
for i in range(1, n_stages + 1):
|
||||
if row[f"stage{i}_timestamp"] is not None:
|
||||
users_in_stages[i].add(row["user_uuid"])
|
||||
|
||||
users_count = {i: len(users_in_stages[i]) for i in range(1, n_stages + 1)}
|
||||
|
||||
return users_count
|
||||
|
||||
|
||||
|
|
@ -484,18 +483,18 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
|||
last_stage = n_stages
|
||||
|
||||
n_critical_issues = 0
|
||||
issues_dict = dict({"significant": [],
|
||||
"insignificant": []})
|
||||
issues_dict = {"significant": [],
|
||||
"insignificant": []}
|
||||
session_counts = count_sessions(rows, n_stages)
|
||||
drop = session_counts[first_stage] - session_counts[last_stage]
|
||||
|
||||
all_issues_with_context, n_issues_dict, affected_users_dict, affected_sessions, contexts = get_affected_users_for_all_issues(
|
||||
all_issues, n_issues_dict, affected_users_dict, affected_sessions = get_affected_users_for_all_issues(
|
||||
rows, first_stage, last_stage)
|
||||
transitions, errors, all_errors, n_sess_affected = get_transitions_and_issues_of_each_type(rows,
|
||||
all_issues_with_context,
|
||||
all_issues,
|
||||
first_stage, last_stage)
|
||||
|
||||
# print("len(transitions) =", len(transitions))
|
||||
del rows
|
||||
|
||||
if any(all_errors):
|
||||
total_drop_corr, conf, is_sign = pearson_corr(transitions, all_errors)
|
||||
|
|
@ -508,33 +507,35 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
|
|||
|
||||
if drop_only:
|
||||
return total_drop_due_to_issues
|
||||
for issue in all_issues_with_context:
|
||||
for issue_id in all_issues:
|
||||
|
||||
if not any(errors[issue]):
|
||||
if not any(errors[issue_id]):
|
||||
continue
|
||||
r, confidence, is_sign = pearson_corr(transitions, errors[issue])
|
||||
r, confidence, is_sign = pearson_corr(transitions, errors[issue_id])
|
||||
|
||||
if r is not None and drop is not None and is_sign:
|
||||
lost_conversions = int(r * affected_sessions[issue])
|
||||
lost_conversions = int(r * affected_sessions[issue_id])
|
||||
else:
|
||||
lost_conversions = None
|
||||
if r is None:
|
||||
r = 0
|
||||
split = issue.split('__^__')
|
||||
issues_dict['significant' if is_sign else 'insignificant'].append({
|
||||
"type": split[0],
|
||||
"title": helper.get_issue_title(split[0]),
|
||||
"affected_sessions": affected_sessions[issue],
|
||||
"unaffected_sessions": session_counts[1] - affected_sessions[issue],
|
||||
"type": all_issues[issue_id]["issue_type"],
|
||||
"title": helper.get_issue_title(all_issues[issue_id]["issue_type"]),
|
||||
"affected_sessions": affected_sessions[issue_id],
|
||||
"unaffected_sessions": session_counts[1] - affected_sessions[issue_id],
|
||||
"lost_conversions": lost_conversions,
|
||||
"affected_users": affected_users_dict[issue],
|
||||
"affected_users": affected_users_dict[issue_id],
|
||||
"conversion_impact": round(r * 100),
|
||||
"context_string": contexts[issue]["context"],
|
||||
"issue_id": contexts[issue]["id"]
|
||||
"context_string": all_issues[issue_id]["context"],
|
||||
"issue_id": issue_id
|
||||
})
|
||||
|
||||
if is_sign:
|
||||
n_critical_issues += n_issues_dict[issue]
|
||||
n_critical_issues += n_issues_dict[issue_id]
|
||||
# To limit the number of returned issues to the frontend
|
||||
issues_dict["significant"] = issues_dict["significant"][:20]
|
||||
issues_dict["insignificant"] = issues_dict["insignificant"][:20]
|
||||
|
||||
return n_critical_issues, issues_dict, total_drop_due_to_issues
|
||||
|
||||
|
|
@ -559,8 +560,8 @@ def get_top_insights(filter_d, project_id):
|
|||
"dropDueToIssues": 0
|
||||
|
||||
}]
|
||||
counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d), project_id=project_id,
|
||||
user_id=None, count_only=True)
|
||||
counts = sessions.search_sessions(data=schemas.SessionsSearchCountSchema.parse_obj(filter_d),
|
||||
project_id=project_id, user_id=None, count_only=True)
|
||||
output[0]["sessionsCount"] = counts["countSessions"]
|
||||
output[0]["usersCount"] = counts["countUsers"]
|
||||
return output, 0
|
||||
|
|
|
|||
|
|
@ -1,7 +1,5 @@
|
|||
import json
|
||||
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import users, telemetry, tenants
|
||||
from chalicelib.utils import captcha
|
||||
|
|
@ -20,55 +18,41 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
print(f"=====================> {email}")
|
||||
password = data.password
|
||||
|
||||
print("Verifying email validity")
|
||||
if email is None or len(email) < 5 or not helper.is_valid_email(email):
|
||||
if email is None or len(email) < 5:
|
||||
errors.append("Invalid email address.")
|
||||
else:
|
||||
print("Verifying email existance")
|
||||
if users.email_exists(email):
|
||||
errors.append("Email address already in use.")
|
||||
if users.get_deleted_user_by_email(email) is not None:
|
||||
errors.append("Email address previously deleted.")
|
||||
|
||||
print("Verifying captcha")
|
||||
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
|
||||
errors.append("Invalid captcha.")
|
||||
|
||||
print("Verifying password validity")
|
||||
if len(password) < 6:
|
||||
errors.append("Password is too short, it must be at least 6 characters long.")
|
||||
|
||||
print("Verifying fullname validity")
|
||||
fullname = data.fullname
|
||||
if fullname is None or len(fullname) < 1 or not helper.is_alphabet_space_dash(fullname):
|
||||
errors.append("Invalid full name.")
|
||||
|
||||
print("Verifying company's name validity")
|
||||
company_name = data.organizationName
|
||||
if company_name is None or len(company_name) < 1:
|
||||
errors.append("invalid organization's name")
|
||||
|
||||
print("Verifying project's name validity")
|
||||
project_name = data.projectName
|
||||
if project_name is None or len(project_name) < 1:
|
||||
project_name = "my first project"
|
||||
organization_name = data.organizationName
|
||||
if organization_name is None or len(organization_name) < 1:
|
||||
errors.append("Invalid organization name.")
|
||||
|
||||
if len(errors) > 0:
|
||||
print("==> error")
|
||||
print(f"==> error for email:{data.email}, fullname:{data.fullname}, organizationName:{data.organizationName}")
|
||||
print(errors)
|
||||
return {"errors": errors}
|
||||
print("No errors detected")
|
||||
|
||||
project_name = "my first project"
|
||||
params = {
|
||||
"email": email, "password": password,
|
||||
"fullname": fullname,
|
||||
"projectName": project_name,
|
||||
"data": json.dumps({"lastAnnouncementView": TimeUTC.now()}),
|
||||
"organizationName": company_name
|
||||
"email": email, "password": password, "fullname": fullname, "projectName": project_name,
|
||||
"data": json.dumps({"lastAnnouncementView": TimeUTC.now()}), "organizationName": organization_name
|
||||
}
|
||||
query = f"""\
|
||||
WITH t AS (
|
||||
INSERT INTO public.tenants (name, version_number)
|
||||
VALUES (%(organizationName)s, (SELECT openreplay_version()))
|
||||
query = f"""WITH t AS (
|
||||
INSERT INTO public.tenants (name)
|
||||
VALUES (%(organizationName)s)
|
||||
RETURNING api_key
|
||||
),
|
||||
u AS (
|
||||
|
|
@ -76,8 +60,8 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
VALUES (%(email)s, 'owner', %(fullname)s,%(data)s)
|
||||
RETURNING user_id,email,role,name
|
||||
),
|
||||
au AS (INSERT
|
||||
INTO public.basic_authentication (user_id, password)
|
||||
au AS (
|
||||
INSERT INTO public.basic_authentication (user_id, password)
|
||||
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)))
|
||||
)
|
||||
INSERT INTO public.projects (name, active)
|
||||
|
|
@ -86,9 +70,9 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(cur.mogrify(query, params))
|
||||
cur = cur.fetchone()
|
||||
project_id = cur["project_id"]
|
||||
api_key = cur["api_key"]
|
||||
data = cur.fetchone()
|
||||
project_id = data["project_id"]
|
||||
api_key = data["api_key"]
|
||||
telemetry.new_client()
|
||||
created_at = TimeUTC.now()
|
||||
r = users.authenticate(email, password)
|
||||
|
|
@ -106,7 +90,7 @@ def create_step1(data: schemas.UserSignupSchema):
|
|||
}
|
||||
c = {
|
||||
"tenantId": 1,
|
||||
"name": company_name,
|
||||
"name": organization_name,
|
||||
"apiKey": api_key,
|
||||
"remainingTrial": 14,
|
||||
"trialEnded": False,
|
||||
|
|
|
|||
|
|
@ -4,17 +4,6 @@ from decouple import config
|
|||
from chalicelib.core.collaboration_slack import Slack
|
||||
|
||||
|
||||
def send(notification, destination):
|
||||
if notification is None:
|
||||
return
|
||||
return Slack.send_text(tenant_id=notification["tenantId"],
|
||||
webhook_id=destination,
|
||||
text=notification["description"] \
|
||||
+ f"\n<{config('SITE_URL')}{notification['buttonUrl']}|{notification['buttonText']}>",
|
||||
title=notification["title"],
|
||||
title_link=notification["buttonUrl"], )
|
||||
|
||||
|
||||
def send_batch(notifications_list):
|
||||
if notifications_list is None or len(notifications_list) == 0:
|
||||
return
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ def start_replay(project_id, session_id, device, os_version, mob_url):
|
|||
r = requests.post(config("IOS_MIDDLEWARE") + "/replay", json={
|
||||
"projectId": project_id,
|
||||
"projectKey": projects.get_project_key(project_id),
|
||||
"sessionId": session_id,
|
||||
"session_id": session_id,
|
||||
"device": device,
|
||||
"osVersion": os_version,
|
||||
"mobUrl": mob_url
|
||||
|
|
|
|||
|
|
@ -1,11 +1,11 @@
|
|||
from decouple import config
|
||||
from chalicelib.utils import helper
|
||||
|
||||
from chalicelib.utils import s3
|
||||
import hashlib
|
||||
from urllib.parse import urlparse
|
||||
|
||||
import requests
|
||||
from decouple import config
|
||||
|
||||
from chalicelib.core import sourcemaps_parser
|
||||
from chalicelib.utils import s3
|
||||
|
||||
|
||||
def __get_key(project_id, url):
|
||||
|
|
@ -54,7 +54,8 @@ def __frame_is_valid(f):
|
|||
|
||||
def __format_frame(f):
|
||||
f["context"] = [] # no context by default
|
||||
if "source" in f: f.pop("source")
|
||||
if "source" in f:
|
||||
f.pop("source")
|
||||
url = f.pop("fileName")
|
||||
f["absPath"] = url
|
||||
f["filename"] = urlparse(url).path
|
||||
|
|
@ -73,6 +74,16 @@ def format_payload(p, truncate_to_first=False):
|
|||
return []
|
||||
|
||||
|
||||
def url_exists(url):
|
||||
try:
|
||||
r = requests.head(url, allow_redirects=False)
|
||||
return r.status_code == 200 and "text/html" not in r.headers.get("Content-Type", "")
|
||||
except Exception as e:
|
||||
print(f"!! Issue checking if URL exists: {url}")
|
||||
print(e)
|
||||
return False
|
||||
|
||||
|
||||
def get_traces_group(project_id, payload):
|
||||
frames = format_payload(payload)
|
||||
|
||||
|
|
@ -80,25 +91,44 @@ def get_traces_group(project_id, payload):
|
|||
payloads = {}
|
||||
all_exists = True
|
||||
for i, u in enumerate(frames):
|
||||
key = __get_key(project_id, u["absPath"]) # use filename instead?
|
||||
file_exists_in_bucket = False
|
||||
file_exists_in_server = False
|
||||
file_url = u["absPath"]
|
||||
key = __get_key(project_id, file_url) # use filename instead?
|
||||
params_idx = file_url.find("?")
|
||||
if file_url and len(file_url) > 0 \
|
||||
and not (file_url[:params_idx] if params_idx > -1 else file_url).endswith(".js"):
|
||||
print(f"{u['absPath']} sourcemap is not a JS file")
|
||||
payloads[key] = None
|
||||
|
||||
if key not in payloads:
|
||||
file_exists = s3.exists(config('sourcemaps_bucket'), key)
|
||||
all_exists = all_exists and file_exists
|
||||
if not file_exists:
|
||||
print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3")
|
||||
file_exists_in_bucket = len(file_url) > 0 and s3.exists(config('sourcemaps_bucket'), key)
|
||||
if len(file_url) > 0 and not file_exists_in_bucket:
|
||||
print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3 looking in server")
|
||||
if not file_url.endswith(".map"):
|
||||
file_url += '.map'
|
||||
file_exists_in_server = url_exists(file_url)
|
||||
file_exists_in_bucket = file_exists_in_server
|
||||
all_exists = all_exists and file_exists_in_bucket
|
||||
if not file_exists_in_bucket and not file_exists_in_server:
|
||||
print(f"{u['absPath']} sourcemap (key '{key}') doesn't exist in S3 nor server")
|
||||
payloads[key] = None
|
||||
else:
|
||||
payloads[key] = []
|
||||
results[i] = dict(u)
|
||||
results[i]["frame"] = dict(u)
|
||||
if payloads[key] is not None:
|
||||
payloads[key].append({"resultIndex": i,
|
||||
payloads[key].append({"resultIndex": i, "frame": dict(u), "URL": file_url,
|
||||
"position": {"line": u["lineNo"], "column": u["colNo"]},
|
||||
"frame": dict(u)})
|
||||
"isURL": file_exists_in_server})
|
||||
|
||||
for key in payloads.keys():
|
||||
if payloads[key] is None:
|
||||
continue
|
||||
key_results = sourcemaps_parser.get_original_trace(key=key, positions=[o["position"] for o in payloads[key]])
|
||||
key_results = sourcemaps_parser.get_original_trace(
|
||||
key=payloads[key][0]["URL"] if payloads[key][0]["isURL"] else key,
|
||||
positions=[o["position"] for o in payloads[key]],
|
||||
is_url=payloads[key][0]["isURL"])
|
||||
if key_results is None:
|
||||
all_exists = False
|
||||
continue
|
||||
|
|
@ -123,16 +153,17 @@ MAX_COLUMN_OFFSET = 60
|
|||
def fetch_missed_contexts(frames):
|
||||
source_cache = {}
|
||||
for i in range(len(frames)):
|
||||
if len(frames[i]["context"]) != 0:
|
||||
if frames[i] and frames[i].get("context") and len(frames[i]["context"]) > 0:
|
||||
continue
|
||||
if frames[i]["frame"]["absPath"] in source_cache:
|
||||
file = source_cache[frames[i]["frame"]["absPath"]]
|
||||
file_abs_path = frames[i]["frame"]["absPath"]
|
||||
if file_abs_path in source_cache:
|
||||
file = source_cache[file_abs_path]
|
||||
else:
|
||||
file = s3.get_file(config('js_cache_bucket'), get_js_cache_path(frames[i]["frame"]["absPath"]))
|
||||
file_path = get_js_cache_path(file_abs_path)
|
||||
file = s3.get_file(config('js_cache_bucket'), file_path)
|
||||
if file is None:
|
||||
print(
|
||||
f"File {get_js_cache_path(frames[i]['frame']['absPath'])} not found in {config('js_cache_bucket')}")
|
||||
source_cache[frames[i]["frame"]["absPath"]] = file
|
||||
print(f"Missing abs_path: {file_abs_path}, file {file_path} not found in {config('js_cache_bucket')}")
|
||||
source_cache[file_abs_path] = file
|
||||
if file is None:
|
||||
continue
|
||||
lines = file.split("\n")
|
||||
|
|
|
|||
|
|
@ -11,14 +11,14 @@ if '%s' in SMR_URL:
|
|||
SMR_URL = SMR_URL % "smr"
|
||||
|
||||
|
||||
def get_original_trace(key, positions):
|
||||
def get_original_trace(key, positions, is_url=False):
|
||||
payload = {
|
||||
"key": key,
|
||||
"positions": positions,
|
||||
"padding": 5,
|
||||
"bucket": config('sourcemaps_bucket')
|
||||
"bucket": config('sourcemaps_bucket'),
|
||||
"isURL": is_url
|
||||
}
|
||||
|
||||
try:
|
||||
r = requests.post(SMR_URL, json=payload, timeout=config("sourcemapTimeout", cast=int, default=5))
|
||||
if r.status_code != 200:
|
||||
|
|
|
|||
|
|
@ -20,15 +20,19 @@ def process_data(data):
|
|||
|
||||
|
||||
def compute():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
with pg_client.PostgresClient(long_query=True) as cur:
|
||||
cur.execute(
|
||||
f"""UPDATE public.tenants
|
||||
SET t_integrations = COALESCE((SELECT COUNT(DISTINCT provider) FROM public.integrations) +
|
||||
(SELECT COUNT(*) FROM public.webhooks WHERE type = 'slack') +
|
||||
(SELECT COUNT(*) FROM public.jira_cloud), 0),
|
||||
t_projects=COALESCE((SELECT COUNT(*) FROM public.projects WHERE deleted_at ISNULL), 0),
|
||||
t_sessions=COALESCE((SELECT COUNT(*) FROM public.sessions), 0),
|
||||
t_users=COALESCE((SELECT COUNT(*) FROM public.users WHERE deleted_at ISNULL), 0)
|
||||
t_sessions=t_sessions + COALESCE((SELECT COUNT(*)
|
||||
FROM public.sessions
|
||||
WHERE start_ts >= (SELECT last_telemetry FROM tenants)
|
||||
AND start_ts <=CAST(EXTRACT(epoch FROM date_trunc('day', now())) * 1000 AS BIGINT)), 0),
|
||||
t_users=COALESCE((SELECT COUNT(*) FROM public.users WHERE deleted_at ISNULL), 0),
|
||||
last_telemetry=CAST(EXTRACT(epoch FROM date_trunc('day', now())) * 1000 AS BIGINT)
|
||||
RETURNING name,t_integrations,t_projects,t_sessions,t_users,tenant_key,opt_out,
|
||||
(SELECT openreplay_version()) AS version_number,(SELECT email FROM public.users WHERE role = 'owner' LIMIT 1);"""
|
||||
)
|
||||
|
|
@ -39,8 +43,8 @@ def compute():
|
|||
def new_client():
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
f"""SELECT *,
|
||||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
f"""SELECT *, openreplay_version() AS version_number,
|
||||
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
|
||||
FROM public.tenants
|
||||
LIMIT 1;""")
|
||||
data = cur.fetchone()
|
||||
|
|
|
|||
|
|
@ -14,7 +14,7 @@ def get_by_tenant_id(tenant_id):
|
|||
api_key,
|
||||
created_at,
|
||||
'{license.EDITION}' AS edition,
|
||||
version_number,
|
||||
openreplay_version() AS version_number,
|
||||
opt_out
|
||||
FROM public.tenants
|
||||
LIMIT 1;""",
|
||||
|
|
|
|||
|
|
@ -7,7 +7,7 @@ from fastapi import BackgroundTasks
|
|||
import schemas
|
||||
from chalicelib.core import authorizers, metadata, projects
|
||||
from chalicelib.core import tenants, assist
|
||||
from chalicelib.utils import dev, email_helper
|
||||
from chalicelib.utils import email_helper
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils import pg_client
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
|
@ -194,14 +194,6 @@ def create_member(tenant_id, user_id, data, background_tasks: BackgroundTasks):
|
|||
new_member = create_new_member(email=data["email"], invitation_token=invitation_token,
|
||||
admin=data.get("admin", False), name=name)
|
||||
new_member["invitationLink"] = __get_invitation_link(new_member.pop("invitationToken"))
|
||||
|
||||
# helper.async_post(config('email_basic') % 'member_invitation',
|
||||
# {
|
||||
# "email": data["email"],
|
||||
# "invitationLink": new_member["invitationLink"],
|
||||
# "clientId": tenants.get_by_tenant_id(tenant_id)["name"],
|
||||
# "senderName": admin["name"]
|
||||
# })
|
||||
background_tasks.add_task(email_helper.send_team_invitation, **{
|
||||
"recipient": data["email"],
|
||||
"invitation_link": new_member["invitationLink"],
|
||||
|
|
@ -259,9 +251,8 @@ def generate_new_api_key(user_id):
|
|||
cur.mogrify(
|
||||
f"""UPDATE public.users
|
||||
SET api_key=generate_api_key(20)
|
||||
WHERE
|
||||
users.user_id = %(userId)s
|
||||
AND deleted_at IS NULL
|
||||
WHERE users.user_id = %(userId)s
|
||||
AND deleted_at IS NULL
|
||||
RETURNING api_key;""",
|
||||
{"userId": user_id})
|
||||
)
|
||||
|
|
@ -302,6 +293,39 @@ def edit(user_id_to_update, tenant_id, changes: schemas.EditUserSchema, editor_i
|
|||
return {"data": user}
|
||||
|
||||
|
||||
def edit_member(user_id_to_update, tenant_id, changes: schemas.EditUserSchema, editor_id):
|
||||
user = get_member(user_id=user_id_to_update, tenant_id=tenant_id)
|
||||
if editor_id != user_id_to_update or changes.admin is not None and changes.admin != user["admin"]:
|
||||
admin = get(tenant_id=tenant_id, user_id=editor_id)
|
||||
if not admin["superAdmin"] and not admin["admin"]:
|
||||
return {"errors": ["unauthorized"]}
|
||||
_changes = {}
|
||||
if editor_id == user_id_to_update:
|
||||
if changes.admin is not None:
|
||||
if user["superAdmin"]:
|
||||
changes.admin = None
|
||||
elif changes.admin != user["admin"]:
|
||||
return {"errors": ["cannot change your own role"]}
|
||||
|
||||
if changes.email is not None and changes.email != user["email"]:
|
||||
if email_exists(changes.email):
|
||||
return {"errors": ["email already exists."]}
|
||||
if get_deleted_user_by_email(changes.email) is not None:
|
||||
return {"errors": ["email previously deleted."]}
|
||||
_changes["email"] = changes.email
|
||||
|
||||
if changes.name is not None and len(changes.name) > 0:
|
||||
_changes["name"] = changes.name
|
||||
|
||||
if changes.admin is not None:
|
||||
_changes["role"] = "admin" if changes.admin else "member"
|
||||
|
||||
if len(_changes.keys()) > 0:
|
||||
update(tenant_id=tenant_id, user_id=user_id_to_update, changes=_changes)
|
||||
return {"data": get_member(user_id=user_id_to_update, tenant_id=tenant_id)}
|
||||
return {"data": user}
|
||||
|
||||
|
||||
def get_by_email_only(email):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
|
|
@ -314,15 +338,16 @@ def get_by_email_only(email):
|
|||
users.name,
|
||||
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
|
||||
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
|
||||
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
|
||||
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
|
||||
TRUE AS has_password
|
||||
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
|
||||
WHERE
|
||||
users.email = %(email)s
|
||||
AND users.deleted_at IS NULL;""",
|
||||
WHERE users.email = %(email)s
|
||||
AND users.deleted_at IS NULL
|
||||
LIMIT 1;""",
|
||||
{"email": email})
|
||||
)
|
||||
r = cur.fetchall()
|
||||
return helper.list_to_camel_case(r)
|
||||
r = cur.fetchone()
|
||||
return helper.dict_to_camel_case(r)
|
||||
|
||||
|
||||
def get_by_email_reset(email, reset_token):
|
||||
|
|
@ -349,11 +374,44 @@ def get_by_email_reset(email, reset_token):
|
|||
return helper.dict_to_camel_case(r)
|
||||
|
||||
|
||||
def get_member(tenant_id, user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify(
|
||||
f"""SELECT
|
||||
users.user_id,
|
||||
users.email,
|
||||
users.role,
|
||||
users.name,
|
||||
users.created_at,
|
||||
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
|
||||
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
|
||||
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
|
||||
DATE_PART('day',timezone('utc'::text, now()) \
|
||||
- COALESCE(basic_authentication.invited_at,'2000-01-01'::timestamp ))>=1 AS expired_invitation,
|
||||
basic_authentication.password IS NOT NULL AS joined,
|
||||
invitation_token
|
||||
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
|
||||
WHERE users.deleted_at IS NULL AND users.user_id=%(user_id)s
|
||||
ORDER BY name, user_id""",
|
||||
{"user_id": user_id})
|
||||
)
|
||||
u = helper.dict_to_camel_case(cur.fetchone())
|
||||
if u:
|
||||
u["createdAt"] = TimeUTC.datetime_to_timestamp(u["createdAt"])
|
||||
if u["invitationToken"]:
|
||||
u["invitationLink"] = __get_invitation_link(u.pop("invitationToken"))
|
||||
else:
|
||||
u["invitationLink"] = None
|
||||
|
||||
return u
|
||||
|
||||
|
||||
def get_members(tenant_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
f"""SELECT
|
||||
users.user_id AS id,
|
||||
users.user_id,
|
||||
users.email,
|
||||
users.role,
|
||||
users.name,
|
||||
|
|
@ -367,7 +425,7 @@ def get_members(tenant_id):
|
|||
invitation_token
|
||||
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
|
||||
WHERE users.deleted_at IS NULL
|
||||
ORDER BY name, id"""
|
||||
ORDER BY name, user_id"""
|
||||
)
|
||||
r = cur.fetchall()
|
||||
if len(r):
|
||||
|
|
@ -424,8 +482,8 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
|
|||
changes = {"password": new_password}
|
||||
user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
tenant_id = r.pop("tenantId")
|
||||
|
||||
tenant_id = r.pop("tenantId")
|
||||
r["limits"] = {
|
||||
"teamMember": -1,
|
||||
"projects": -1,
|
||||
|
|
@ -452,8 +510,8 @@ def set_password_invitation(user_id, new_password):
|
|||
"changePwdExpireAt": None, "changePwdToken": None}
|
||||
user = update(tenant_id=-1, user_id=user_id, changes=changes)
|
||||
r = authenticate(user['email'], new_password)
|
||||
tenant_id = r.pop("tenantId")
|
||||
|
||||
tenant_id = r.pop("tenantId")
|
||||
r["limits"] = {
|
||||
"teamMember": -1,
|
||||
"projects": -1,
|
||||
|
|
@ -552,7 +610,19 @@ def auth_exists(user_id, tenant_id, jwt_iat, jwt_aud):
|
|||
)
|
||||
|
||||
|
||||
def authenticate(email, password, for_change_password=False, for_plugin=False):
|
||||
def change_jwt_iat(user_id):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""UPDATE public.users
|
||||
SET jwt_iat = timezone('utc'::text, now())
|
||||
WHERE user_id = %(user_id)s
|
||||
RETURNING jwt_iat;""",
|
||||
{"user_id": user_id})
|
||||
cur.execute(query)
|
||||
return cur.fetchone().get("jwt_iat")
|
||||
|
||||
|
||||
def authenticate(email, password, for_change_password=False):
|
||||
with pg_client.PostgresClient() as cur:
|
||||
query = cur.mogrify(
|
||||
f"""SELECT
|
||||
|
|
@ -573,22 +643,16 @@ def authenticate(email, password, for_change_password=False, for_plugin=False):
|
|||
cur.execute(query)
|
||||
r = cur.fetchone()
|
||||
|
||||
if r is not None:
|
||||
if for_change_password:
|
||||
return True
|
||||
r = helper.dict_to_camel_case(r)
|
||||
query = cur.mogrify(
|
||||
f"""UPDATE public.users
|
||||
SET jwt_iat = timezone('utc'::text, now())
|
||||
WHERE user_id = %(user_id)s
|
||||
RETURNING jwt_iat;""",
|
||||
{"user_id": r["userId"]})
|
||||
cur.execute(query)
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
|
||||
TimeUTC.datetime_to_timestamp(cur.fetchone()["jwt_iat"]),
|
||||
aud=f"plugin:{helper.get_stage_name()}" if for_plugin else f"front:{helper.get_stage_name()}"),
|
||||
"email": email,
|
||||
**r
|
||||
}
|
||||
if r is not None:
|
||||
if for_change_password:
|
||||
return True
|
||||
r = helper.dict_to_camel_case(r)
|
||||
jwt_iat = change_jwt_iat(r['userId'])
|
||||
return {
|
||||
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
|
||||
TimeUTC.datetime_to_timestamp(jwt_iat),
|
||||
aud=f"front:{helper.get_stage_name()}"),
|
||||
"email": email,
|
||||
**r
|
||||
}
|
||||
return None
|
||||
|
|
|
|||
|
|
@ -10,8 +10,7 @@ def get_by_id(webhook_id):
|
|||
with pg_client.PostgresClient() as cur:
|
||||
cur.execute(
|
||||
cur.mogrify("""\
|
||||
SELECT
|
||||
w.*
|
||||
SELECT w.*
|
||||
FROM public.webhooks AS w
|
||||
where w.webhook_id =%(webhook_id)s AND deleted_at ISNULL;""",
|
||||
{"webhook_id": webhook_id})
|
||||
|
|
@ -161,8 +160,8 @@ def __trigger(hook, data):
|
|||
|
||||
r = requests.post(url=hook["endpoint"], json=data, headers=headers)
|
||||
if r.status_code != 200:
|
||||
logging.error("=======> webhook: something went wrong")
|
||||
logging.error(r)
|
||||
logging.error("=======> webhook: something went wrong for:")
|
||||
logging.error(hook)
|
||||
logging.error(r.status_code)
|
||||
logging.error(r.text)
|
||||
return
|
||||
|
|
|
|||
|
|
@ -1,6 +1,9 @@
|
|||
import requests
|
||||
from datetime import datetime
|
||||
|
||||
from fastapi import HTTPException
|
||||
from starlette import status
|
||||
|
||||
|
||||
class github_formatters:
|
||||
|
||||
|
|
@ -120,9 +123,9 @@ class githubV3Request:
|
|||
pages = get_response_links(response)
|
||||
result = response.json()
|
||||
if response.status_code != 200:
|
||||
print("!-------- error")
|
||||
print(f"=>GITHUB Exception")
|
||||
print(result)
|
||||
raise Exception(result["message"])
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"GITHUB: {result['message']}")
|
||||
if isinstance(result, dict):
|
||||
return result
|
||||
results += result
|
||||
|
|
|
|||
|
|
@ -4,52 +4,18 @@ import re
|
|||
import string
|
||||
from typing import Union
|
||||
|
||||
import requests
|
||||
from decouple import config
|
||||
|
||||
import schemas
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
|
||||
local_prefix = 'local-'
|
||||
from decouple import config
|
||||
|
||||
|
||||
def get_version_number():
|
||||
return config("version")
|
||||
|
||||
|
||||
def get_stage_name():
|
||||
stage = config("stage")
|
||||
return stage[len(local_prefix):] if stage.startswith(local_prefix) else stage
|
||||
return "OpenReplay"
|
||||
|
||||
|
||||
def is_production():
|
||||
return get_stage_name() == "production"
|
||||
|
||||
|
||||
def is_staging():
|
||||
return get_stage_name() == "staging"
|
||||
|
||||
|
||||
def is_onprem():
|
||||
return not is_production() and not is_staging()
|
||||
|
||||
|
||||
def is_local():
|
||||
return config("stage").startswith(local_prefix)
|
||||
|
||||
|
||||
def generate_salt():
|
||||
return "".join(random.choices(string.hexdigits, k=36))
|
||||
|
||||
|
||||
def unique_ordered_list(array):
|
||||
uniq = []
|
||||
[uniq.append(x) for x in array if x not in uniq]
|
||||
return uniq
|
||||
|
||||
|
||||
def unique_unordered_list(array):
|
||||
return list(set(array))
|
||||
def random_string(length=36):
|
||||
return "".join(random.choices(string.hexdigits, k=length))
|
||||
|
||||
|
||||
def list_to_camel_case(items, flatten=False):
|
||||
|
|
@ -130,29 +96,11 @@ def key_to_snake_case(name, delimiter='_', split_number=False):
|
|||
TRACK_TIME = True
|
||||
|
||||
|
||||
def __sbool_to_bool(value):
|
||||
if value is None or not isinstance(value, str):
|
||||
return False
|
||||
return value.lower() in ["true", "yes", "1"]
|
||||
|
||||
|
||||
def allow_captcha():
|
||||
return config("captcha_server", default=None) is not None and config("captcha_key", default=None) is not None \
|
||||
and len(config("captcha_server")) > 0 and len(config("captcha_key")) > 0
|
||||
|
||||
|
||||
def allow_sentry():
|
||||
return config("sentryURL", default=None) is not None and len(config("sentryURL")) > 0
|
||||
|
||||
|
||||
def async_post(endpoint, data):
|
||||
data["auth"] = config("async_Token")
|
||||
try:
|
||||
requests.post(endpoint, timeout=1, json=data)
|
||||
except requests.exceptions.ReadTimeout:
|
||||
pass
|
||||
|
||||
|
||||
def string_to_sql_like(value):
|
||||
value = re.sub(' +', ' ', value)
|
||||
value = value.replace("*", "%")
|
||||
|
|
@ -222,54 +170,11 @@ def values_for_operator(value: Union[str, list], op: schemas.SearchEventOperator
|
|||
return value
|
||||
|
||||
|
||||
def is_valid_email(email):
|
||||
return re.match(r"[^@]+@[^@]+\.[^@]+", email) is not None
|
||||
|
||||
|
||||
def is_valid_http_url(url):
|
||||
regex = re.compile(
|
||||
r'^(?:http|ftp)s?://' # http:// or https://
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
|
||||
r'localhost|' # localhost...
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
||||
r'(?::\d+)?' # optional port
|
||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
||||
|
||||
return re.match(regex, url) is not None
|
||||
|
||||
|
||||
def is_valid_url(url):
|
||||
regex = re.compile(
|
||||
# r'^(?:http|ftp)s?://' # http:// or https://
|
||||
r'(?:(?:[A-Z0-9](?:[A-Z0-9-]{0,61}[A-Z0-9])?\.)+(?:[A-Z]{2,6}\.?|[A-Z0-9-]{2,}\.?)|' # domain...
|
||||
r'localhost|' # localhost...
|
||||
r'\d{1,3}\.\d{1,3}\.\d{1,3}\.\d{1,3})' # ...or ip
|
||||
r'(?::\d+)?' # optional port
|
||||
r'(?:/?|[/?]\S+)$', re.IGNORECASE)
|
||||
|
||||
return re.match(regex, url) is not None
|
||||
|
||||
|
||||
def is_alphabet_space(word):
|
||||
r = re.compile("^[a-zA-Z ]*$")
|
||||
return r.match(word) is not None
|
||||
|
||||
|
||||
def is_alphabet_latin_space(word):
|
||||
r = re.compile("^[a-zA-Z\u00C0-\u00D6\u00D8-\u00f6\u00f8-\u00ff\s ]*$")
|
||||
return r.match(word) is not None
|
||||
|
||||
|
||||
def is_alphabet_space_dash(word):
|
||||
r = re.compile("^[a-zA-Z -]*$")
|
||||
return r.match(word) is not None
|
||||
|
||||
|
||||
def is_alphanumeric_space(word):
|
||||
r = re.compile("^[a-zA-Z0-9._\- ]*$")
|
||||
return r.match(word) is not None
|
||||
|
||||
|
||||
def merge_lists_by_key(l1, l2, key):
|
||||
merged = {}
|
||||
for item in l1 + l2:
|
||||
|
|
@ -322,9 +227,6 @@ def explode_widget(data, key=None):
|
|||
return result
|
||||
|
||||
|
||||
TEMP_PATH = "./" if is_local() else "/tmp/"
|
||||
|
||||
|
||||
def get_issue_title(issue_type):
|
||||
return {'click_rage': "Click Rage",
|
||||
'dead_click': "Dead Click",
|
||||
|
|
|
|||
|
|
@ -35,7 +35,7 @@ class JiraManager:
|
|||
if (e.status_code // 100) == 4 and self.retries > 0:
|
||||
time.sleep(1)
|
||||
return self.get_projects()
|
||||
print(f"=>Exception {e.text}")
|
||||
print(f"=>JIRA Exception {e.text}")
|
||||
raise HTTPException(status_code=status.HTTP_400_BAD_REQUEST, detail=f"JIRA: {e.text}")
|
||||
projects_dict_list = []
|
||||
for project in projects:
|
||||
|
|
|
|||
|
|
@ -17,10 +17,8 @@ _PG_CONFIG = {"host": config("pg_host"),
|
|||
"port": config("pg_port", cast=int),
|
||||
"application_name": config("APP_NAME", default="PY")}
|
||||
PG_CONFIG = dict(_PG_CONFIG)
|
||||
if config("pg_timeout", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('pg_timeout', cast=int) * 1000}"
|
||||
|
||||
logging.info(f">PG_POOL:{config('PG_POOL', default=None)}")
|
||||
if config("PG_TIMEOUT", cast=int, default=0) > 0:
|
||||
PG_CONFIG["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int) * 1000}"
|
||||
|
||||
|
||||
class ORThreadedConnectionPool(psycopg2.pool.ThreadedConnectionPool):
|
||||
|
|
@ -67,8 +65,8 @@ def make_pool():
|
|||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logging.error("Error while closing all connexions to PostgreSQL", error)
|
||||
try:
|
||||
postgreSQL_pool = ORThreadedConnectionPool(config("pg_minconn", cast=int, default=20),
|
||||
config("pg_maxconn", cast=int, default=80),
|
||||
postgreSQL_pool = ORThreadedConnectionPool(config("PG_MINCONN", cast=int, default=20),
|
||||
config("PG_MAXCONN", cast=int, default=80),
|
||||
**PG_CONFIG)
|
||||
if (postgreSQL_pool):
|
||||
logging.info("Connection pool created successfully")
|
||||
|
|
@ -83,10 +81,6 @@ def make_pool():
|
|||
raise error
|
||||
|
||||
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
|
||||
|
||||
class PostgresClient:
|
||||
connection = None
|
||||
cursor = None
|
||||
|
|
@ -109,7 +103,7 @@ class PostgresClient:
|
|||
elif not config('PG_POOL', cast=bool, default=True):
|
||||
single_config = dict(_PG_CONFIG)
|
||||
single_config["application_name"] += "-NOPOOL"
|
||||
single_config["options"] = f"-c statement_timeout={config('pg_timeout', cast=int, default=3 * 60) * 1000}"
|
||||
single_config["options"] = f"-c statement_timeout={config('PG_TIMEOUT', cast=int, default=30) * 1000}"
|
||||
self.connection = psycopg2.connect(**single_config)
|
||||
else:
|
||||
self.connection = postgreSQL_pool.getconn()
|
||||
|
|
@ -117,6 +111,7 @@ class PostgresClient:
|
|||
def __enter__(self):
|
||||
if self.cursor is None:
|
||||
self.cursor = self.connection.cursor(cursor_factory=psycopg2.extras.RealDictCursor)
|
||||
self.cursor.recreate = self.recreate_cursor
|
||||
return self.cursor
|
||||
|
||||
def __exit__(self, *args):
|
||||
|
|
@ -141,6 +136,31 @@ class PostgresClient:
|
|||
and not self.unlimited_query:
|
||||
postgreSQL_pool.putconn(self.connection)
|
||||
|
||||
def recreate_cursor(self, rollback=False):
|
||||
if rollback:
|
||||
try:
|
||||
self.connection.rollback()
|
||||
except Exception as error:
|
||||
logging.error("Error while rollbacking connection for recreation", error)
|
||||
try:
|
||||
self.cursor.close()
|
||||
except Exception as error:
|
||||
logging.error("Error while closing cursor for recreation", error)
|
||||
self.cursor = None
|
||||
return self.__enter__()
|
||||
|
||||
def close():
|
||||
pass
|
||||
|
||||
async def init():
|
||||
logging.info(f">PG_POOL:{config('PG_POOL', default=None)}")
|
||||
if config('PG_POOL', cast=bool, default=True):
|
||||
make_pool()
|
||||
|
||||
|
||||
async def terminate():
|
||||
global postgreSQL_pool
|
||||
if postgreSQL_pool is not None:
|
||||
try:
|
||||
postgreSQL_pool.closeall()
|
||||
logging.info("Closed all connexions to PostgreSQL")
|
||||
except (Exception, psycopg2.DatabaseError) as error:
|
||||
logging.error("Error while closing all connexions to PostgreSQL", error)
|
||||
|
|
|
|||
|
|
@ -70,7 +70,6 @@ def get_file(source_bucket, source_key):
|
|||
)
|
||||
except ClientError as ex:
|
||||
if ex.response['Error']['Code'] == 'NoSuchKey':
|
||||
print(f'======> No object found - returning None for \nbucket:{source_bucket}\nkey:{source_key}')
|
||||
return None
|
||||
else:
|
||||
raise ex
|
||||
|
|
|
|||
|
|
@ -1,3 +0,0 @@
|
|||
BEGIN;
|
||||
CREATE INDEX pages_ttfb_idx ON events.pages (ttfb) WHERE ttfb > 0;
|
||||
COMMIT;
|
||||
|
|
@ -1,5 +1,3 @@
|
|||
#!/bin/sh
|
||||
cd sourcemap-reader
|
||||
nohup npm start &
|
||||
cd ..
|
||||
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload --proxy-headers
|
||||
|
|
|
|||
|
|
@ -1,3 +1,3 @@
|
|||
#!/bin/sh
|
||||
|
||||
export ASSIST_KEY=ignore
|
||||
uvicorn app:app --host 0.0.0.0 --port $LISTEN_PORT --reload
|
||||
|
|
|
|||
|
|
@ -12,19 +12,14 @@ S3_KEY=
|
|||
S3_SECRET=
|
||||
SITE_URL=
|
||||
announcement_url=
|
||||
async_Token=
|
||||
captcha_key=
|
||||
captcha_server=
|
||||
change_password_link=/reset-password?invitation=%s&&pass=%s
|
||||
email_basic=http://127.0.0.1:8000/async/basic/%s
|
||||
email_signup=http://127.0.0.1:8000/async/email_signup/%s
|
||||
invitation_link=/api/users/invitation?token=%s
|
||||
isEE=false
|
||||
isFOS=true
|
||||
js_cache_bucket=sessions-assets
|
||||
jwt_algorithm=HS512
|
||||
jwt_exp_delta_seconds=2592000
|
||||
jwt_issuer=openreplay-default-foss
|
||||
JWT_EXPIRATION=2592000
|
||||
JWT_ISSUER=openreplay-oss
|
||||
jwt_secret="SET A RANDOM STRING HERE"
|
||||
ASSIST_URL=http://assist-openreplay.app.svc.cluster.local:9001/assist/%s
|
||||
assist=/sockets-live
|
||||
|
|
@ -34,18 +29,25 @@ pg_host=postgresql.db.svc.cluster.local
|
|||
pg_password=asayerPostgres
|
||||
pg_port=5432
|
||||
pg_user=postgres
|
||||
pg_timeout=30
|
||||
pg_minconn=20
|
||||
pg_maxconn=50
|
||||
PG_TIMEOUT=30
|
||||
PG_MINCONN=20
|
||||
PG_MAXCONN=50
|
||||
PG_RETRY_MAX=50
|
||||
PG_RETRY_INTERVAL=2
|
||||
PG_POOL=true
|
||||
put_S3_TTL=20
|
||||
sentryURL=
|
||||
sessions_bucket=mobs
|
||||
sessions_region=us-east-1
|
||||
sourcemaps_bucket=sourcemaps
|
||||
sourcemaps_reader=http://127.0.0.1:9000/sourcemaps/%s/sourcemaps
|
||||
stage=default-foss
|
||||
sourcemaps_reader=http://sourcemaps-reader-openreplay.app.svc.cluster.local:9000/sourcemaps/%s/sourcemaps
|
||||
STAGE=default-foss
|
||||
version_number=1.4.0
|
||||
FS_DIR=/mnt/efs
|
||||
FS_DIR=/mnt/efs
|
||||
EFS_SESSION_MOB_PATTERN=%(sessionId)s
|
||||
EFS_DEVTOOLS_MOB_PATTERN=%(sessionId)sdevtools
|
||||
SESSION_MOB_PATTERN_S=%(sessionId)s/dom.mobs
|
||||
SESSION_MOB_PATTERN_E=%(sessionId)s/dom.mobe
|
||||
DEVTOOLS_MOB_PATTERN=%(sessionId)s/devtools.mob
|
||||
PRESIGNED_URL_EXPIRATION=3600
|
||||
ASSIST_JWT_EXPIRATION=144000
|
||||
ASSIST_JWT_SECRET=
|
||||
PYTHONUNBUFFERED=1
|
||||
|
|
@ -1,15 +1,15 @@
|
|||
requests==2.28.1
|
||||
urllib3==1.26.10
|
||||
boto3==1.24.53
|
||||
pyjwt==2.4.0
|
||||
psycopg2-binary==2.9.3
|
||||
elasticsearch==8.3.3
|
||||
jira==3.3.1
|
||||
urllib3==1.26.12
|
||||
boto3==1.26.14
|
||||
pyjwt==2.6.0
|
||||
psycopg2-binary==2.9.5
|
||||
elasticsearch==8.5.1
|
||||
jira==3.4.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.80.0
|
||||
uvicorn[standard]==0.18.2
|
||||
fastapi==0.87.0
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.6
|
||||
pydantic[email]==1.9.2
|
||||
apscheduler==3.9.1
|
||||
pydantic[email]==1.10.2
|
||||
apscheduler==3.9.1.post1
|
||||
|
|
@ -1,15 +1,15 @@
|
|||
requests==2.28.1
|
||||
urllib3==1.26.10
|
||||
boto3==1.24.53
|
||||
pyjwt==2.4.0
|
||||
psycopg2-binary==2.9.3
|
||||
elasticsearch==8.3.3
|
||||
jira==3.3.1
|
||||
urllib3==1.26.12
|
||||
boto3==1.26.14
|
||||
pyjwt==2.6.0
|
||||
psycopg2-binary==2.9.5
|
||||
elasticsearch==8.5.1
|
||||
jira==3.4.1
|
||||
|
||||
|
||||
|
||||
fastapi==0.80.0
|
||||
uvicorn[standard]==0.18.2
|
||||
fastapi==0.87.0
|
||||
uvicorn[standard]==0.20.0
|
||||
python-decouple==3.6
|
||||
pydantic[email]==1.9.2
|
||||
apscheduler==3.9.1
|
||||
pydantic[email]==1.10.2
|
||||
apscheduler==3.9.1.post1
|
||||
|
|
@ -6,9 +6,11 @@ from auth.auth_project import ProjectAuthorizer
|
|||
from or_dependencies import ORRoute
|
||||
|
||||
|
||||
def get_routers() -> (APIRouter, APIRouter, APIRouter):
|
||||
def get_routers(extra_dependencies=[]) -> (APIRouter, APIRouter, APIRouter):
|
||||
public_app = APIRouter(route_class=ORRoute)
|
||||
app = APIRouter(dependencies=[Depends(JWTAuth()), Depends(ProjectAuthorizer("projectId"))], route_class=ORRoute)
|
||||
app_apikey = APIRouter(dependencies=[Depends(APIKeyAuth()), Depends(ProjectAuthorizer("projectKey"))],
|
||||
route_class=ORRoute)
|
||||
app = APIRouter(dependencies=[Depends(JWTAuth()), Depends(ProjectAuthorizer("projectId"))] + extra_dependencies,
|
||||
route_class=ORRoute)
|
||||
app_apikey = APIRouter(
|
||||
dependencies=[Depends(APIKeyAuth()), Depends(ProjectAuthorizer("projectKey"))] + extra_dependencies,
|
||||
route_class=ORRoute)
|
||||
return public_app, app, app_apikey
|
||||
|
|
|
|||
|
|
@ -1,22 +1,19 @@
|
|||
from typing import Union
|
||||
|
||||
from decouple import config
|
||||
from fastapi import Depends, Body, BackgroundTasks, HTTPException
|
||||
from fastapi.responses import FileResponse
|
||||
from fastapi import Depends, Body, HTTPException
|
||||
from starlette import status
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \
|
||||
alerts, funnels, issues, integrations_manager, metadata, \
|
||||
log_tool_elasticsearch, log_tool_datadog, \
|
||||
log_tool_stackdriver, reset_password, sessions_favorite, \
|
||||
log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, errors, sessions, \
|
||||
log_tool_stackdriver, reset_password, log_tool_cloudwatch, log_tool_sentry, log_tool_sumologic, log_tools, sessions, \
|
||||
log_tool_newrelic, announcements, log_tool_bugsnag, weekly_report, integration_jira_cloud, integration_github, \
|
||||
assist, heatmaps, mobile, signup, tenants, errors_viewed, boarding, notifications, webhook, users, \
|
||||
custom_metrics, saved_search, integrations_global, sessions_viewed, errors_favorite
|
||||
assist, mobile, signup, tenants, boarding, notifications, webhook, users, \
|
||||
custom_metrics, saved_search, integrations_global
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.utils import email_helper, helper, captcha
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from chalicelib.utils import helper, captcha
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
|
|
@ -31,7 +28,7 @@ def login(data: schemas.UserLoginSchema = Body(...)):
|
|||
detail="Invalid captcha."
|
||||
)
|
||||
|
||||
r = users.authenticate(data.email, data.password, for_plugin=False)
|
||||
r = users.authenticate(data.email, data.password)
|
||||
if r is None:
|
||||
raise HTTPException(
|
||||
status_code=status.HTTP_401_UNAUTHORIZED,
|
||||
|
|
@ -59,85 +56,12 @@ def sessions_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchem
|
|||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
|
||||
def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True, user_id=context.user_id,
|
||||
include_fav_viewed=True, group_metadata=True)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/favorite', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/favorite', tags=["sessions"])
|
||||
def add_remove_favorite_session2(projectId: int, sessionId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": sessions_favorite.favorite_session(project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign', tags=["sessions"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign', tags=["sessions"])
|
||||
def assign_session(projectId: int, sessionId, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId,
|
||||
tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
@app.get('/{projectId}/sessions2/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
def assign_session(projectId: int, sessionId: int, issueId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId,
|
||||
tenant_id=context.tenant_id, user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.put('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.post('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
@app.put('/{projectId}/sessions2/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...),
|
||||
@app.post('/{projectId}/sessions/search/ids', tags=["sessions"])
|
||||
@app.post('/{projectId}/sessions/search2/ids', tags=["sessions"])
|
||||
def session_ids_search(projectId: int, data: schemas.FlatSessionsSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.comment(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId, assignment_id=issueId,
|
||||
user_id=context.user_id, message=data.message)
|
||||
if "errors" in data.keys():
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
data = sessions.search_sessions(data=data, project_id=projectId, user_id=context.user_id, ids_only=True)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/events/search', tags=["events"])
|
||||
|
|
@ -181,7 +105,6 @@ def get_integrations_status(projectId: int, context: schemas.CurrentContext = De
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/{integration}/notify/{integrationId}/{source}/{sourceId}', tags=["integrations"])
|
||||
def integration_notify(projectId: int, integration: str, integrationId: int, source: str, sourceId: str,
|
||||
data: schemas.IntegrationNotificationSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -210,7 +133,6 @@ def get_sentry(projectId: int, context: schemas.CurrentContext = Depends(OR_cont
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/sentry', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/sentry', tags=["integrations"])
|
||||
def add_edit_sentry(projectId: int, data: schemas.SentrySchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_sentry.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -237,7 +159,6 @@ def get_datadog(projectId: int, context: schemas.CurrentContext = Depends(OR_con
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/datadog', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/datadog', tags=["integrations"])
|
||||
def add_edit_datadog(projectId: int, data: schemas.DatadogSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_datadog.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -259,7 +180,6 @@ def get_stackdriver(projectId: int, context: schemas.CurrentContext = Depends(OR
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/stackdriver', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/stackdriver', tags=["integrations"])
|
||||
def add_edit_stackdriver(projectId: int, data: schemas.StackdriverSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_stackdriver.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -281,7 +201,6 @@ def get_newrelic(projectId: int, context: schemas.CurrentContext = Depends(OR_co
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/newrelic', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/newrelic', tags=["integrations"])
|
||||
def add_edit_newrelic(projectId: int, data: schemas.NewrelicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_newrelic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -303,7 +222,6 @@ def get_rollbar(projectId: int, context: schemas.CurrentContext = Depends(OR_con
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/rollbar', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/rollbar', tags=["integrations"])
|
||||
def add_edit_rollbar(projectId: int, data: schemas.RollbarSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_rollbar.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -331,7 +249,6 @@ def get_bugsnag(projectId: int, context: schemas.CurrentContext = Depends(OR_con
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/bugsnag', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/bugsnag', tags=["integrations"])
|
||||
def add_edit_bugsnag(projectId: int, data: schemas.BugsnagSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_bugsnag.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -361,7 +278,6 @@ def get_cloudwatch(projectId: int, context: schemas.CurrentContext = Depends(OR_
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/cloudwatch', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/cloudwatch', tags=["integrations"])
|
||||
def add_edit_cloudwatch(projectId: int, data: schemas.CloudwatchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_cloudwatch.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -389,7 +305,6 @@ def test_elasticsearch_connection(data: schemas.ElasticsearchBasicSchema = Body(
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/elasticsearch', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/elasticsearch', tags=["integrations"])
|
||||
def add_edit_elasticsearch(projectId: int, data: schemas.ElasticsearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
|
|
@ -412,7 +327,6 @@ def get_sumologic(projectId: int, context: schemas.CurrentContext = Depends(OR_c
|
|||
|
||||
|
||||
@app.post('/{projectId}/integrations/sumologic', tags=["integrations"])
|
||||
@app.put('/{projectId}/integrations/sumologic', tags=["integrations"])
|
||||
def add_edit_sumologic(projectId: int, data: schemas.SumologicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": log_tool_sumologic.add_edit(tenant_id=context.tenant_id, project_id=projectId, data=data.dict())}
|
||||
|
|
@ -453,7 +367,6 @@ def get_integration_status_github(context: schemas.CurrentContext = Depends(OR_c
|
|||
|
||||
|
||||
@app.post('/integrations/jira', tags=["integrations"])
|
||||
@app.put('/integrations/jira', tags=["integrations"])
|
||||
def add_edit_jira_cloud(data: schemas.JiraSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if not data.url.endswith('atlassian.net'):
|
||||
|
|
@ -467,7 +380,6 @@ def add_edit_jira_cloud(data: schemas.JiraSchema = Body(...),
|
|||
|
||||
|
||||
@app.post('/integrations/github', tags=["integrations"])
|
||||
@app.put('/integrations/github', tags=["integrations"])
|
||||
def add_edit_github(data: schemas.GithubSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
error, integration = integrations_manager.get_integration(tool=integration_github.PROVIDER,
|
||||
|
|
@ -541,8 +453,7 @@ def get_all_assignments(projectId: int, context: schemas.CurrentContext = Depend
|
|||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
@app.put('/{projectId}/sessions2/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/projects/{integrationProjectId}', tags=["assignment"])
|
||||
def create_issue_assignment(projectId: int, sessionId: int, integrationProjectId,
|
||||
data: schemas.AssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -565,14 +476,12 @@ def get_gdpr(projectId: int, context: schemas.CurrentContext = Depends(OR_contex
|
|||
|
||||
|
||||
@app.post('/{projectId}/gdpr', tags=["projects", "gdpr"])
|
||||
@app.put('/{projectId}/gdpr', tags=["projects", "gdpr"])
|
||||
def edit_gdpr(projectId: int, data: schemas.GdprSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": projects.edit_gdpr(project_id=projectId, gdpr=data.dict())}
|
||||
|
||||
|
||||
@public_app.post('/password/reset-link', tags=["reset password"])
|
||||
@public_app.put('/password/reset-link', tags=["reset password"])
|
||||
def reset_password_handler(data: schemas.ForgetPasswordPayloadSchema = Body(...)):
|
||||
if len(data.email) < 5:
|
||||
return {"errors": ["please provide a valid email address"]}
|
||||
|
|
@ -585,21 +494,18 @@ def get_metadata(projectId: int, context: schemas.CurrentContext = Depends(OR_co
|
|||
|
||||
|
||||
@app.post('/{projectId}/metadata/list', tags=["metadata"])
|
||||
@app.put('/{projectId}/metadata/list', tags=["metadata"])
|
||||
def add_edit_delete_metadata(projectId: int, data: schemas.MetadataListSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.add_edit_delete(tenant_id=context.tenant_id, project_id=projectId, new_metas=data.list)
|
||||
|
||||
|
||||
@app.post('/{projectId}/metadata', tags=["metadata"])
|
||||
@app.put('/{projectId}/metadata', tags=["metadata"])
|
||||
def add_metadata(projectId: int, data: schemas.MetadataBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.add(tenant_id=context.tenant_id, project_id=projectId, new_name=data.key)
|
||||
|
||||
|
||||
@app.post('/{projectId}/metadata/{index}', tags=["metadata"])
|
||||
@app.put('/{projectId}/metadata/{index}', tags=["metadata"])
|
||||
def edit_metadata(projectId: int, index: int, data: schemas.MetadataBasicSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return metadata.edit(tenant_id=context.tenant_id, project_id=projectId, index=index,
|
||||
|
|
@ -627,28 +533,12 @@ def search_integrations(projectId: int, context: schemas.CurrentContext = Depend
|
|||
return log_tools.search(project_id=projectId)
|
||||
|
||||
|
||||
@public_app.post('/async/email_assignment', tags=["async mail"])
|
||||
def async_send_signup_emails(data: schemas.EmailPayloadSchema = Body(...)):
|
||||
if data.auth != config("async_Token"):
|
||||
return {}
|
||||
email_helper.send_assign_session(recipient=data.email, link=data.link, message=data.message)
|
||||
|
||||
|
||||
# @public_app.post('/async/basic/member_invitation', tags=["async mail"])
|
||||
# def async_basic_emails(data: schemas.MemberInvitationPayloadSchema = Body(...)):
|
||||
# if data.auth != config("async_Token"):
|
||||
# return {}
|
||||
# email_helper.send_team_invitation(recipient=data.email, invitation_link=data.invitation_link,
|
||||
# client_id=data.client_id, sender_name=data.sender_name)
|
||||
|
||||
|
||||
@app.get('/{projectId}/sample_rate', tags=["projects"])
|
||||
def get_capture_status(projectId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": projects.get_capture_status(project_id=projectId)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sample_rate', tags=["projects"])
|
||||
@app.put('/{projectId}/sample_rate', tags=["projects"])
|
||||
def update_capture_status(projectId: int, data: schemas.SampleRateSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": projects.update_capture_status(project_id=projectId, changes=data.dict())}
|
||||
|
|
@ -664,20 +554,12 @@ def get_all_announcements(context: schemas.CurrentContext = Depends(OR_context))
|
|||
return {"data": announcements.view(user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.post('/{projectId}/errors/merge', tags=["errors"])
|
||||
def errors_merge(projectId: int, data: schemas.ErrorIdsPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.merge(error_ids=data.errors)
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/show_banner', tags=["banner"])
|
||||
def errors_merge(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": False}
|
||||
|
||||
|
||||
@app.post('/{projectId}/alerts', tags=["alerts"])
|
||||
@app.put('/{projectId}/alerts', tags=["alerts"])
|
||||
def create_alert(projectId: int, data: schemas.AlertSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return alerts.create(projectId, data)
|
||||
|
|
@ -700,7 +582,6 @@ def get_alert(projectId: int, alertId: int, context: schemas.CurrentContext = De
|
|||
|
||||
|
||||
@app.post('/{projectId}/alerts/{alertId}', tags=["alerts"])
|
||||
@app.put('/{projectId}/alerts/{alertId}', tags=["alerts"])
|
||||
def update_alert(projectId: int, alertId: int, data: schemas.AlertSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return alerts.update(alertId, data)
|
||||
|
|
@ -712,7 +593,6 @@ def delete_alert(projectId: int, alertId: int, context: schemas.CurrentContext =
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels', tags=["funnels"])
|
||||
def add_funnel(projectId: int, data: schemas.FunnelSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return funnels.create(project_id=projectId,
|
||||
|
|
@ -756,7 +636,6 @@ def get_funnel_insights(projectId: int, funnelId: int, rangeValue: str = None, s
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels/{funnelId}/insights', tags=["funnels"])
|
||||
def get_funnel_insights_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelInsightsPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return funnels.get_top_insights_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
|
||||
|
|
@ -771,7 +650,6 @@ def get_funnel_issues(projectId: int, funnelId, rangeValue: str = None, startDat
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels/{funnelId}/issues', tags=["funnels"])
|
||||
def get_funnel_issues_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": funnels.get_issues_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
|
||||
|
|
@ -788,7 +666,6 @@ def get_funnel_sessions(projectId: int, funnelId: int, rangeValue: str = None, s
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels/{funnelId}/sessions', tags=["funnels"])
|
||||
def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.FunnelSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": funnels.get_sessions_on_the_fly(funnel_id=funnelId, user_id=context.user_id, project_id=projectId,
|
||||
|
|
@ -808,7 +685,6 @@ def get_funnel_issue_sessions(projectId: int, issueId: str, startDate: int = Non
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels/{funnelId}/issues/{issueId}/sessions', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels/{funnelId}/issues/{issueId}/sessions', tags=["funnels"])
|
||||
def get_funnel_issue_sessions(projectId: int, funnelId: int, issueId: str,
|
||||
data: schemas.FunnelSearchPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -832,7 +708,6 @@ def get_funnel(projectId: int, funnelId: int, context: schemas.CurrentContext =
|
|||
|
||||
|
||||
@app.post('/{projectId}/funnels/{funnelId}', tags=["funnels"])
|
||||
@app.put('/{projectId}/funnels/{funnelId}', tags=["funnels"])
|
||||
def edit_funnel(projectId: int, funnelId: int, data: schemas.UpdateFunnelSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return funnels.update(funnel_id=funnelId,
|
||||
|
|
@ -865,7 +740,6 @@ def get_weekly_report_config(context: schemas.CurrentContext = Depends(OR_contex
|
|||
|
||||
|
||||
@app.post('/config/weekly_report', tags=["weekly report config"])
|
||||
@app.put('/config/weekly_report', tags=["weekly report config"])
|
||||
def edit_weekly_report_config(data: schemas.WeeklyReportConfigSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": weekly_report.edit_config(user_id=context.user_id, weekly_report=data.weekly_report)}
|
||||
|
|
@ -894,58 +768,24 @@ def sessions_live(projectId: int, data: schemas.LiveSessionsSearchPayloadSchema
|
|||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"])
|
||||
def get_live_session(projectId: int, sessionId: str, background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
user_id=context.user_id, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId,
|
||||
user_id=context.user_id, session_id=sessionId)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/unprocessed/{sessionId}', tags=["assist"])
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}/replay', tags=["assist"])
|
||||
def get_live_session_replay_file(projectId: int, sessionId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
path = assist.get_raw_mob_by_id(project_id=projectId, session_id=sessionId)
|
||||
if path is None:
|
||||
return {"errors": ["Replay file not found"]}
|
||||
|
||||
return FileResponse(path=path, media_type="application/octet-stream")
|
||||
|
||||
|
||||
@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"])
|
||||
def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": heatmaps.get_by_url(project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
@app.post('/{projectId}/mobile/{sessionId}/urls', tags=['mobile'])
|
||||
def mobile_signe(projectId: int, sessionId: int, data: schemas.MobileSignPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": mobile.sign_keys(project_id=projectId, session_id=sessionId, keys=data.keys)}
|
||||
|
||||
|
||||
@public_app.put('/signup', tags=['signup'])
|
||||
@public_app.post('/signup', tags=['signup'])
|
||||
@public_app.put('/signup', tags=['signup'])
|
||||
def signup_handler(data: schemas.UserSignupSchema = Body(...)):
|
||||
return signup.create_step1(data)
|
||||
|
||||
|
||||
@app.post('/projects', tags=['projects'])
|
||||
@app.put('/projects', tags=['projects'])
|
||||
def create_project(data: schemas.CreateProjectSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return projects.create(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.post('/projects/{projectId}', tags=['projects'])
|
||||
@app.put('/projects/{projectId}', tags=['projects'])
|
||||
def edit_project(projectId: int, data: schemas.CreateProjectSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -964,74 +804,13 @@ def generate_new_tenant_token(context: schemas.CurrentContext = Depends(OR_conte
|
|||
}
|
||||
|
||||
|
||||
@app.put('/client', tags=['client'])
|
||||
@app.post('/client', tags=['client'])
|
||||
@app.put('/client', tags=['client'])
|
||||
def edit_client(data: schemas.UpdateTenantSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return tenants.update(tenant_id=context.tenant_id, user_id=context.user_id, data=data)
|
||||
|
||||
|
||||
@app.post('/{projectId}/errors/search', tags=['errors'])
|
||||
def errors_search(projectId: int, data: schemas.SearchErrorsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": errors.search(data, projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/stats', tags=['errors'])
|
||||
def errors_stats(projectId: int, startTimestamp: int, endTimestamp: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return errors.stats(projectId, user_id=context.user_id, startTimestamp=startTimestamp, endTimestamp=endTimestamp)
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
|
||||
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
|
||||
error_id=errorId)
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/stats', tags=['errors'])
|
||||
def errors_get_details_right_column(projectId: int, errorId: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), density: int = 7,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details_chart(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"startDate": startDate, "endDate": endDate, "density": density})
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/sourcemaps', tags=['errors'])
|
||||
def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"])
|
||||
def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if action == "favorite":
|
||||
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
elif action == "sessions":
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
elif action in list(errors.ACTION_STATE.keys()):
|
||||
return errors.change_state(project_id=projectId, user_id=context.user_id, error_id=errorId, action=action)
|
||||
else:
|
||||
return {"errors": ["undefined action"]}
|
||||
|
||||
|
||||
@app.get('/notifications', tags=['notifications'])
|
||||
def get_notifications(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)}
|
||||
|
|
@ -1048,7 +827,6 @@ def view_notifications(notificationId: int, context: schemas.CurrentContext = De
|
|||
|
||||
|
||||
@app.post('/notifications/view', tags=['notifications'])
|
||||
@app.put('/notifications/view', tags=['notifications'])
|
||||
def batch_view_notifications(data: schemas.NotificationsViewSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": notifications.view_notification(notification_ids=data.ids,
|
||||
|
|
@ -1058,14 +836,6 @@ def batch_view_notifications(data: schemas.NotificationsViewSchema,
|
|||
tenant_id=context.tenant_id)}
|
||||
|
||||
|
||||
@public_app.post('/notifications', tags=['notifications'])
|
||||
@public_app.put('/notifications', tags=['notifications'])
|
||||
def create_notifications(data: schemas.CreateNotificationSchema):
|
||||
if data.token != config("async_Token"):
|
||||
return {"errors": ["missing token"]}
|
||||
return notifications.create(data.notifications)
|
||||
|
||||
|
||||
@app.get('/boarding', tags=['boarding'])
|
||||
def get_boarding_state(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": boarding.get_state(tenant_id=context.tenant_id)}
|
||||
|
|
@ -1106,7 +876,6 @@ def delete_slack_integration(integrationId: int, context: schemas.CurrentContext
|
|||
return webhook.delete(context.tenant_id, integrationId)
|
||||
|
||||
|
||||
@app.post('/webhooks', tags=["webhooks"])
|
||||
@app.put('/webhooks', tags=["webhooks"])
|
||||
def add_edit_webhook(data: schemas.CreateEditWebhookSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -1144,7 +913,6 @@ def generate_new_user_token(context: schemas.CurrentContext = Depends(OR_context
|
|||
|
||||
|
||||
@app.post('/account/password', tags=["account"])
|
||||
@app.put('/account/password', tags=["account"])
|
||||
def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.change_password(email=context.email, old_password=data.old_password,
|
||||
|
|
@ -1153,7 +921,6 @@ def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
|
|||
|
||||
|
||||
@app.post('/{projectId}/saved_search', tags=["savedSearch"])
|
||||
@app.put('/{projectId}/saved_search', tags=["savedSearch"])
|
||||
def add_saved_search(projectId: int, data: schemas.SavedSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return saved_search.create(project_id=projectId, user_id=context.user_id, data=data)
|
||||
|
|
@ -1170,7 +937,6 @@ def get_saved_search(projectId: int, search_id: int, context: schemas.CurrentCon
|
|||
|
||||
|
||||
@app.post('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
|
||||
@app.put('/{projectId}/saved_search/{search_id}', tags=["savedSearch"])
|
||||
def update_saved_search(projectId: int, search_id: int, data: schemas.SavedSearchSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": saved_search.update(user_id=context.user_id, search_id=search_id, data=data, project_id=projectId)}
|
||||
|
|
|
|||
|
|
@ -1,16 +1,18 @@
|
|||
from typing import Optional
|
||||
from typing import Optional, Union
|
||||
|
||||
from decouple import config
|
||||
from fastapi import Body, Depends, BackgroundTasks
|
||||
from starlette.responses import RedirectResponse
|
||||
from starlette.responses import RedirectResponse, FileResponse
|
||||
|
||||
import schemas
|
||||
from chalicelib.core import integrations_manager
|
||||
from chalicelib.core import sessions
|
||||
from chalicelib.core import sessions, errors, errors_viewed, errors_favorite, sessions_assignments, heatmaps, \
|
||||
sessions_favorite, assist, sessions_notes
|
||||
from chalicelib.core import sessions_viewed
|
||||
from chalicelib.core import tenants, users, projects, license
|
||||
from chalicelib.core import webhook
|
||||
from chalicelib.core.collaboration_slack import Slack
|
||||
from chalicelib.utils import helper
|
||||
from chalicelib.utils.TimeUTC import TimeUTC
|
||||
from or_dependencies import OR_context
|
||||
from routers.base import get_routers
|
||||
|
||||
|
|
@ -44,7 +46,6 @@ def get_account(context: schemas.CurrentContext = Depends(OR_context)):
|
|||
|
||||
|
||||
@app.post('/account', tags=["account"])
|
||||
@app.put('/account', tags=["account"])
|
||||
def edit_account(data: schemas.EditUserSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data,
|
||||
|
|
@ -68,8 +69,8 @@ def get_project(projectId: int, context: schemas.CurrentContext = Depends(OR_con
|
|||
return {"data": data}
|
||||
|
||||
|
||||
@app.put('/integrations/slack', tags=['integrations'])
|
||||
@app.post('/integrations/slack', tags=['integrations'])
|
||||
@app.put('/integrations/slack', tags=['integrations'])
|
||||
def add_slack_client(data: schemas.AddSlackSchema, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
n = Slack.add_channel(tenant_id=context.tenant_id, url=data.url, name=data.name)
|
||||
if n is None:
|
||||
|
|
@ -79,7 +80,6 @@ def add_slack_client(data: schemas.AddSlackSchema, context: schemas.CurrentConte
|
|||
return {"data": n}
|
||||
|
||||
|
||||
@app.put('/integrations/slack/{integrationId}', tags=['integrations'])
|
||||
@app.post('/integrations/slack/{integrationId}', tags=['integrations'])
|
||||
def edit_slack_integration(integrationId: int, data: schemas.EditSlackSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
|
|
@ -96,7 +96,6 @@ def edit_slack_integration(integrationId: int, data: schemas.EditSlackSchema = B
|
|||
|
||||
|
||||
@app.post('/client/members', tags=["client"])
|
||||
@app.put('/client/members', tags=["client"])
|
||||
def add_member(background_tasks: BackgroundTasks, data: schemas.CreateMemberSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.create_member(tenant_id=context.tenant_id, user_id=context.user_id, data=data.dict(),
|
||||
|
|
@ -121,7 +120,6 @@ def process_invitation_link(token: str):
|
|||
|
||||
|
||||
@public_app.post('/password/reset', tags=["users"])
|
||||
@public_app.put('/password/reset', tags=["users"])
|
||||
def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema = Body(...)):
|
||||
if data is None or len(data.invitation) < 64 or len(data.passphrase) < 8:
|
||||
return {"errors": ["please provide a valid invitation & pass"]}
|
||||
|
|
@ -135,11 +133,10 @@ def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema =
|
|||
|
||||
|
||||
@app.put('/client/members/{memberId}', tags=["client"])
|
||||
@app.post('/client/members/{memberId}', tags=["client"])
|
||||
def edit_member(memberId: int, data: schemas.EditMemberSchema,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
|
||||
user_id_to_update=memberId)
|
||||
return users.edit_member(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
|
||||
user_id_to_update=memberId)
|
||||
|
||||
|
||||
@app.get('/metadata/session_search', tags=["metadata"])
|
||||
|
|
@ -165,3 +162,257 @@ def get_general_stats():
|
|||
def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,
|
||||
stack_integrations=True)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
|
||||
def get_session(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if isinstance(sessionId, str):
|
||||
return {"errors": ["session not found"]}
|
||||
data = sessions.get_by_id2_pg(project_id=projectId, session_id=sessionId, full_data=True,
|
||||
include_fav_viewed=True, group_metadata=True, context=context)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId, user_id=context.user_id,
|
||||
session_id=sessionId)
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/errors/{errorId}/sourcemaps', tags=["sessions", "sourcemaps"])
|
||||
def get_error_trace(projectId: int, sessionId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/errors/search', tags=['errors'])
|
||||
def errors_search(projectId: int, data: schemas.SearchErrorsSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": errors.search(data, projectId, user_id=context.user_id)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/stats', tags=['errors'])
|
||||
def errors_stats(projectId: int, startTimestamp: int, endTimestamp: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return errors.stats(projectId, user_id=context.user_id, startTimestamp=startTimestamp, endTimestamp=endTimestamp)
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}', tags=['errors'])
|
||||
def errors_get_details(projectId: int, errorId: str, background_tasks: BackgroundTasks, density24: int = 24,
|
||||
density30: int = 30, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"density24": density24, "density30": density30})
|
||||
if data.get("data") is not None:
|
||||
background_tasks.add_task(errors_viewed.viewed_error, project_id=projectId, user_id=context.user_id,
|
||||
error_id=errorId)
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/stats', tags=['errors'])
|
||||
def errors_get_details_right_column(projectId: int, errorId: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), density: int = 7,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_details_chart(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
**{"startDate": startDate, "endDate": endDate, "density": density})
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/sourcemaps', tags=['errors'])
|
||||
def errors_get_details_sourcemaps(projectId: int, errorId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = errors.get_trace(project_id=projectId, error_id=errorId)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/errors/{errorId}/{action}', tags=["errors"])
|
||||
def add_remove_favorite_error(projectId: int, errorId: str, action: str, startDate: int = TimeUTC.now(-7),
|
||||
endDate: int = TimeUTC.now(), context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if action == "favorite":
|
||||
return errors_favorite.favorite_error(project_id=projectId, user_id=context.user_id, error_id=errorId)
|
||||
elif action == "sessions":
|
||||
start_date = startDate
|
||||
end_date = endDate
|
||||
return {
|
||||
"data": errors.get_sessions(project_id=projectId, user_id=context.user_id, error_id=errorId,
|
||||
start_date=start_date, end_date=end_date)}
|
||||
elif action in list(errors.ACTION_STATE.keys()):
|
||||
return errors.change_state(project_id=projectId, user_id=context.user_id, error_id=errorId, action=action)
|
||||
else:
|
||||
return {"errors": ["undefined action"]}
|
||||
|
||||
|
||||
@app.get('/{projectId}/assist/sessions/{sessionId}', tags=["assist"])
|
||||
def get_live_session(projectId: int, sessionId: str, background_tasks: BackgroundTasks,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = assist.get_live_session_by_id(project_id=projectId, session_id=sessionId)
|
||||
if data is None:
|
||||
data = sessions.get_by_id2_pg(context=context, project_id=projectId, session_id=sessionId,
|
||||
full_data=True, include_fav_viewed=True, group_metadata=True, live=False)
|
||||
if data is None:
|
||||
return {"errors": ["session not found"]}
|
||||
if data.get("inDB"):
|
||||
background_tasks.add_task(sessions_viewed.view_session, project_id=projectId,
|
||||
user_id=context.user_id, session_id=sessionId)
|
||||
return {'data': data}
|
||||
|
||||
|
||||
@app.get('/{projectId}/unprocessed/{sessionId}/dom.mob', tags=["assist"])
|
||||
def get_live_session_replay_file(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
not_found = {"errors": ["Replay file not found"]}
|
||||
if isinstance(sessionId, str):
|
||||
print(f"{sessionId} not a valid number.")
|
||||
return not_found
|
||||
if not sessions.session_exists(project_id=projectId, session_id=sessionId):
|
||||
print(f"{projectId}/{sessionId} not found in DB.")
|
||||
if not assist.session_exists(project_id=projectId, session_id=sessionId):
|
||||
print(f"{projectId}/{sessionId} not found in Assist.")
|
||||
return not_found
|
||||
|
||||
path = assist.get_raw_mob_by_id(project_id=projectId, session_id=sessionId)
|
||||
if path is None:
|
||||
return not_found
|
||||
|
||||
return FileResponse(path=path, media_type="application/octet-stream")
|
||||
|
||||
|
||||
@app.get('/{projectId}/unprocessed/{sessionId}/devtools.mob', tags=["assist"])
|
||||
def get_live_session_devtools_file(projectId: int, sessionId: Union[int, str],
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
not_found = {"errors": ["Devtools file not found"]}
|
||||
if isinstance(sessionId, str):
|
||||
print(f"{sessionId} not a valid number.")
|
||||
return not_found
|
||||
if not sessions.session_exists(project_id=projectId, session_id=sessionId):
|
||||
print(f"{projectId}/{sessionId} not found in DB.")
|
||||
if not assist.session_exists(project_id=projectId, session_id=sessionId):
|
||||
print(f"{projectId}/{sessionId} not found in Assist.")
|
||||
return not_found
|
||||
|
||||
path = assist.get_raw_devtools_by_id(project_id=projectId, session_id=sessionId)
|
||||
if path is None:
|
||||
return {"errors": ["Devtools file not found"]}
|
||||
|
||||
return FileResponse(path=path, media_type="application/octet-stream")
|
||||
|
||||
|
||||
@app.post('/{projectId}/heatmaps/url', tags=["heatmaps"])
|
||||
def get_heatmaps_by_url(projectId: int, data: schemas.GetHeatmapPayloadSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {"data": heatmaps.get_by_url(project_id=projectId, data=data.dict())}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/favorite', tags=["sessions"])
|
||||
def add_remove_favorite_session2(projectId: int, sessionId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return {
|
||||
"data": sessions_favorite.favorite_session(context=context, project_id=projectId, session_id=sessionId)}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign', tags=["sessions"])
|
||||
def assign_session(projectId: int, sessionId, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get_by_session(project_id=projectId, session_id=sessionId,
|
||||
tenant_id=context.tenant_id,
|
||||
user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/assign/{issueId}', tags=["sessions", "issueTracking"])
|
||||
def assign_session(projectId: int, sessionId: int, issueId: str,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.get(project_id=projectId, session_id=sessionId, assignment_id=issueId,
|
||||
tenant_id=context.tenant_id, user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/assign/{issueId}/comment', tags=["sessions", "issueTracking"])
|
||||
def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schemas.CommentAssignmentSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_assignments.comment(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId, assignment_id=issueId,
|
||||
user_id=context.user_id, message=data.message)
|
||||
if "errors" in data.keys():
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/sessions/{sessionId}/notes', tags=["sessions", "notes"])
|
||||
def create_note(projectId: int, sessionId: int, data: schemas.SessionNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
if not sessions.session_exists(project_id=projectId, session_id=sessionId):
|
||||
return {"errors": ["Session not found"]}
|
||||
data = sessions_notes.create(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId, user_id=context.user_id, data=data)
|
||||
if "errors" in data.keys():
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.get('/{projectId}/sessions/{sessionId}/notes', tags=["sessions", "notes"])
|
||||
def get_session_notes(projectId: int, sessionId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.get_session_notes(tenant_id=context.tenant_id, project_id=projectId,
|
||||
session_id=sessionId, user_id=context.user_id)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.post('/{projectId}/notes/{noteId}', tags=["sessions", "notes"])
|
||||
def edit_note(projectId: int, noteId: int, data: schemas.SessionUpdateNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.edit(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId, data=data)
|
||||
if "errors" in data.keys():
|
||||
return data
|
||||
return {
|
||||
'data': data
|
||||
}
|
||||
|
||||
|
||||
@app.delete('/{projectId}/notes/{noteId}', tags=["sessions", "notes"])
|
||||
def delete_note(projectId: int, noteId: int, context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.delete(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId)
|
||||
return data
|
||||
|
||||
|
||||
@app.get('/{projectId}/notes/{noteId}/slack/{webhookId}', tags=["sessions", "notes"])
|
||||
def share_note_to_slack(projectId: int, noteId: int, webhookId: int,
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
return sessions_notes.share_to_slack(tenant_id=context.tenant_id, project_id=projectId, user_id=context.user_id,
|
||||
note_id=noteId, webhook_id=webhookId)
|
||||
|
||||
|
||||
@app.post('/{projectId}/notes', tags=["sessions", "notes"])
|
||||
def get_all_notes(projectId: int, data: schemas.SearchNoteSchema = Body(...),
|
||||
context: schemas.CurrentContext = Depends(OR_context)):
|
||||
data = sessions_notes.get_all_notes_by_project_id(tenant_id=context.tenant_id, project_id=projectId,
|
||||
user_id=context.user_id, data=data)
|
||||
if "errors" in data:
|
||||
return data
|
||||
return {'data': data}
|
||||
|
|
|
|||
|
|
@ -28,7 +28,6 @@ class UserLoginSchema(_Grecaptcha):
|
|||
class UserSignupSchema(UserLoginSchema):
|
||||
fullname: str = Field(...)
|
||||
organizationName: str = Field(...)
|
||||
projectName: str = Field(default="my first project")
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
|
@ -875,14 +874,14 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
|
|||
|
||||
|
||||
class CustomMetricsConfigSchema(BaseModel):
|
||||
col: Optional[int] = Field(default=2)
|
||||
col: Optional[int] = Field(...)
|
||||
row: Optional[int] = Field(default=2)
|
||||
position: Optional[int] = Field(default=0)
|
||||
|
||||
|
||||
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
|
||||
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
|
||||
config: CustomMetricsConfigSchema = Field(default=CustomMetricsConfigSchema())
|
||||
config: CustomMetricsConfigSchema = Field(...)
|
||||
|
||||
@root_validator(pre=True)
|
||||
def transform_series(cls, values):
|
||||
|
|
@ -1084,3 +1083,41 @@ class IntegrationType(str, Enum):
|
|||
stackdriver = "STACKDRIVER"
|
||||
cloudwatch = "CLOUDWATCH"
|
||||
newrelic = "NEWRELIC"
|
||||
|
||||
|
||||
class SearchNoteSchema(_PaginatedSchema):
|
||||
sort: str = Field(default="createdAt")
|
||||
order: SortOrderType = Field(default=SortOrderType.desc)
|
||||
tags: Optional[List[str]] = Field(default=[])
|
||||
shared_only: bool = Field(default=False)
|
||||
mine_only: bool = Field(default=False)
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class SessionNoteSchema(BaseModel):
|
||||
message: str = Field(..., min_length=2)
|
||||
tag: Optional[str] = Field(default=None)
|
||||
timestamp: int = Field(default=-1)
|
||||
is_public: bool = Field(default=False)
|
||||
|
||||
class Config:
|
||||
alias_generator = attribute_to_camel_case
|
||||
|
||||
|
||||
class SessionUpdateNoteSchema(SessionNoteSchema):
|
||||
message: Optional[str] = Field(default=None, min_length=2)
|
||||
timestamp: Optional[int] = Field(default=None, ge=-1)
|
||||
is_public: Optional[bool] = Field(default=None)
|
||||
|
||||
@root_validator
|
||||
def validator(cls, values):
|
||||
assert len(values.keys()) > 0, "at least 1 attribute should be provided for update"
|
||||
c = 0
|
||||
for v in values.values():
|
||||
if v is not None and (not isinstance(v, str) or len(v) > 0):
|
||||
c += 1
|
||||
break
|
||||
assert c > 0, "at least 1 value should be provided for update"
|
||||
return values
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
FROM golang:1.18-alpine3.15 AS prepare
|
||||
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
|
|
@ -15,11 +15,11 @@ COPY pkg pkg
|
|||
COPY internal internal
|
||||
|
||||
ARG SERVICE_NAME
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags dynamic openreplay/backend/cmd/$SERVICE_NAME
|
||||
|
||||
|
||||
FROM alpine AS entrypoint
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl cyrus-sasl-gssapiv2 krb5
|
||||
RUN adduser -u 1001 openreplay -D
|
||||
|
||||
ENV TZ=UTC \
|
||||
|
|
@ -29,6 +29,18 @@ ENV TZ=UTC \
|
|||
UAPARSER_FILE=/home/openreplay/regexes.yaml \
|
||||
HTTP_PORT=8080 \
|
||||
KAFKA_USE_SSL=true \
|
||||
# KAFKA_USE_KERBEROS should be set true if you wish to use Kerberos auth for Kafka
|
||||
KAFKA_USE_KERBEROS=false \
|
||||
# KERBEROS_SERVICE_NAME is the primary name of the Brokers configured in the Broker JAAS file
|
||||
KERBEROS_SERVICE_NAME="" \
|
||||
# KERBEROS_PRINCIPAL is this client's principal name
|
||||
KERBEROS_PRINCIPAL="" \
|
||||
# KERBEROS_PRINCIPAL is the absolute path to the keytab to be used for authentication
|
||||
KERBEROS_KEYTAB_LOCATION="" \
|
||||
# KAFKA_SSL_KEY is the absolute path to the CA cert for verifying the broker's key
|
||||
KAFKA_SSL_KEY="" \
|
||||
# KAFKA_SSL_CERT is a CA cert string (PEM format) for verifying the broker's key
|
||||
KAFKA_SSL_CERT="" \
|
||||
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
|
||||
REDIS_STREAMS_MAX_LEN=10000 \
|
||||
TOPIC_RAW_WEB=raw \
|
||||
|
|
|
|||
|
|
@ -1,6 +1,6 @@
|
|||
FROM golang:1.18-alpine3.15 AS prepare
|
||||
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
|
||||
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash librdkafka-dev cyrus-sasl-gssapi cyrus-sasl-devel
|
||||
|
||||
WORKDIR /root
|
||||
|
||||
|
|
@ -14,11 +14,11 @@ COPY cmd cmd
|
|||
COPY pkg pkg
|
||||
COPY internal internal
|
||||
|
||||
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/cmd/$name; done
|
||||
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags dynamic openreplay/backend/cmd/$name; done
|
||||
|
||||
FROM alpine AS entrypoint
|
||||
#FROM pygmy/alpine-tini:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
RUN apk add --no-cache ca-certificates librdkafka-dev cyrus-sasl-gssapi cyrus-sasl-devel pkgconf
|
||||
|
||||
ENV TZ=UTC \
|
||||
FS_ULIMIT=1000 \
|
||||
|
|
@ -28,6 +28,18 @@ ENV TZ=UTC \
|
|||
HTTP_PORT=80 \
|
||||
BEACON_SIZE_LIMIT=7000000 \
|
||||
KAFKA_USE_SSL=true \
|
||||
# KAFKA_USE_KERBEROS should be set true if you wish to use Kerberos auth for Kafka
|
||||
KAFKA_USE_KERBEROS=false \
|
||||
# KERBEROS_SERVICE_NAME is the primary name of the Brokers configured in the Broker JAAS file
|
||||
KERBEROS_SERVICE_NAME="" \
|
||||
# KERBEROS_PRINCIPAL is this client's principal name
|
||||
KERBEROS_PRINCIPAL="" \
|
||||
# KERBEROS_PRINCIPAL is the absolute path to the keytab to be used for authentication
|
||||
KERBEROS_KEYTAB_LOCATION="" \
|
||||
# KAFKA_SSL_KEY is the absolute path to the CA cert for verifying the broker's key
|
||||
KAFKA_SSL_KEY="" \
|
||||
# KAFKA_SSL_CERT is a CA cert string (PEM format) for verifying the broker's key
|
||||
KAFKA_SSL_CERT="" \
|
||||
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
|
||||
REDIS_STREAMS_MAX_LEN=3000 \
|
||||
TOPIC_RAW_WEB=raw \
|
||||
|
|
|
|||
|
|
@ -7,6 +7,7 @@
|
|||
|
||||
# Example
|
||||
# Usage: IMAGE_TAG=latest DOCKER_REPO=myDockerHubID bash build.sh <ee>
|
||||
set -e
|
||||
|
||||
git_sha1=${IMAGE_TAG:-$(git rev-parse HEAD)}
|
||||
ee="false"
|
||||
|
|
@ -25,12 +26,17 @@ function build_service() {
|
|||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
}
|
||||
echo "Build completed for $image"
|
||||
return
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
cp -R ../backend ../_backend
|
||||
cd ../_backend
|
||||
destination="_backend"
|
||||
[[ $1 == "ee" ]] && {
|
||||
destination="_backend_ee"
|
||||
}
|
||||
cp -R ../backend ../${destination}
|
||||
cd ../${destination}
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
cp -r ../ee/backend/* ./
|
||||
|
|
@ -38,6 +44,8 @@ function build_api(){
|
|||
}
|
||||
[[ $2 != "" ]] && {
|
||||
build_service $2
|
||||
cd ../backend
|
||||
rm -rf ../${destination}
|
||||
return
|
||||
}
|
||||
for image in $(ls cmd);
|
||||
|
|
@ -46,7 +54,7 @@ function build_api(){
|
|||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
|
||||
done
|
||||
cd ../backend
|
||||
rm -rf ../_backend
|
||||
rm -rf ../${destination}
|
||||
echo "backend build completed"
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -1 +0,0 @@
|
|||
GROUP_CACHE=from_file
|
||||
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -31,40 +30,28 @@ func main() {
|
|||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
msgHandler := func(msg messages.Message) {
|
||||
switch m := msg.(type) {
|
||||
case *messages.AssetCache:
|
||||
cacher.CacheURL(m.SessionID(), m.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
// TODO: connect to "raw" topic in order to listen for JSException
|
||||
case *messages.JSException:
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&m.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
return
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
msgConsumer := queue.NewConsumer(
|
||||
cfg.GroupCache,
|
||||
[]string{cfg.TopicCache},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgAssetCache {
|
||||
m := iter.Message().Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg := m.(*messages.AssetCache)
|
||||
cacher.CacheURL(sessionID, msg.URL)
|
||||
totalAssets.Add(context.Background(), 1)
|
||||
} else if iter.Type() == messages.MsgErrorEvent {
|
||||
m := iter.Message().Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg := m.(*messages.ErrorEvent)
|
||||
if msg.Source != "js_exception" {
|
||||
continue
|
||||
}
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
continue
|
||||
}
|
||||
for _, source := range sourceList {
|
||||
cacher.CacheJSFile(source)
|
||||
}
|
||||
}
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, []int{messages.MsgAssetCache, messages.MsgJSException}, true),
|
||||
true,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -79,15 +66,18 @@ func main() {
|
|||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
cacher.Stop()
|
||||
msgConsumer.Close()
|
||||
os.Exit(0)
|
||||
case err := <-cacher.Errors:
|
||||
log.Printf("Error while caching: %v", err)
|
||||
// TODO: notify user
|
||||
case <-tick:
|
||||
cacher.UpdateTimeouts()
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
if !cacher.CanCache() {
|
||||
continue
|
||||
}
|
||||
if err := msgConsumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package main
|
|||
import (
|
||||
"errors"
|
||||
"log"
|
||||
types2 "openreplay/backend/pkg/db/types"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
|
|
@ -30,7 +31,8 @@ func main() {
|
|||
cfg := db.New()
|
||||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
pg := cache.NewPGCache(
|
||||
postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
|
|
@ -45,10 +47,6 @@ func main() {
|
|||
// Create handler's aggregator
|
||||
builderMap := sessions.NewBuilderMap(handlersFabric)
|
||||
|
||||
keepMessage := func(tp int) bool {
|
||||
return tp == messages.MsgMetadata || tp == messages.MsgIssueEvent || tp == messages.MsgSessionStart || tp == messages.MsgSessionEnd || tp == messages.MsgUserID || tp == messages.MsgUserAnonymousID || tp == messages.MsgCustomEvent || tp == messages.MsgClickEvent || tp == messages.MsgInputEvent || tp == messages.MsgPageEvent || tp == messages.MsgErrorEvent || tp == messages.MsgFetchEvent || tp == messages.MsgGraphQLEvent || tp == messages.MsgIntegrationEvent || tp == messages.MsgPerformanceTrackAggr || tp == messages.MsgResourceEvent || tp == messages.MsgLongTask || tp == messages.MsgJSException || tp == messages.MsgResourceTiming || tp == messages.MsgRawCustomEvent || tp == messages.MsgCustomIssue || tp == messages.MsgFetch || tp == messages.MsgGraphQL || tp == messages.MsgStateAction || tp == messages.MsgSetInputTarget || tp == messages.MsgSetInputValue || tp == messages.MsgCreateDocument || tp == messages.MsgMouseClick || tp == messages.MsgSetPageLocation || tp == messages.MsgPageLoadTiming || tp == messages.MsgPageRenderTiming
|
||||
}
|
||||
|
||||
var producer types.Producer = nil
|
||||
if cfg.UseQuickwit {
|
||||
producer = queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
|
|
@ -60,69 +58,74 @@ func main() {
|
|||
saver.InitStats()
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
msgFilter := []int{messages.MsgMetadata, messages.MsgIssueEvent, messages.MsgSessionStart, messages.MsgSessionEnd,
|
||||
messages.MsgUserID, messages.MsgUserAnonymousID, messages.MsgClickEvent,
|
||||
messages.MsgIntegrationEvent, messages.MsgPerformanceTrackAggr,
|
||||
messages.MsgJSException, messages.MsgResourceTiming,
|
||||
messages.MsgRawCustomEvent, messages.MsgCustomIssue, messages.MsgFetch, messages.MsgGraphQL,
|
||||
messages.MsgStateAction, messages.MsgSetInputTarget, messages.MsgSetInputValue, messages.MsgCreateDocument,
|
||||
messages.MsgMouseClick, messages.MsgSetPageLocation, messages.MsgPageLoadTiming, messages.MsgPageRenderTiming}
|
||||
|
||||
// Handler logic
|
||||
handler := func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
|
||||
for iter.Next() {
|
||||
if !keepMessage(iter.Type()) {
|
||||
continue
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
msg := iter.Message().Decode()
|
||||
if msg == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
session, err := pg.GetSession(sessionID)
|
||||
if session == nil {
|
||||
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
return
|
||||
}
|
||||
iter.Close()
|
||||
|
||||
var (
|
||||
session *types2.Session
|
||||
err error
|
||||
)
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
session, err = pg.GetSession(msg.SessionID())
|
||||
} else {
|
||||
session, err = pg.Cache.GetSession(msg.SessionID())
|
||||
}
|
||||
if session == nil {
|
||||
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, msg.SessionID(), msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(msg)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(msg.SessionID(), func(msg messages.Message) {
|
||||
if err := saver.InsertMessage(msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Init consumer
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupDB,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicAnalytics,
|
||||
},
|
||||
handler,
|
||||
messages.NewMessageIterator(msgHandler, msgFilter, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -133,33 +136,36 @@ func main() {
|
|||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
commitTick := time.Tick(cfg.CommitBatchTimeout)
|
||||
|
||||
// Send collected batches to db
|
||||
commitDBUpdates := func() {
|
||||
start := time.Now()
|
||||
pg.CommitBatches()
|
||||
pgDur := time.Now().Sub(start).Milliseconds()
|
||||
|
||||
start = time.Now()
|
||||
if err := saver.CommitStats(); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
chDur := time.Now().Sub(start).Milliseconds()
|
||||
log.Printf("commit duration(ms), pg: %d, ch: %d", pgDur, chDur)
|
||||
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
}
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
log.Printf("Caught signal %s: terminating\n", sig.String())
|
||||
commitDBUpdates()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-commitTick:
|
||||
// Send collected batches to db
|
||||
start := time.Now()
|
||||
pg.CommitBatches()
|
||||
pgDur := time.Now().Sub(start).Milliseconds()
|
||||
|
||||
start = time.Now()
|
||||
if err := saver.CommitStats(consumer.HasFirstPartition()); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
chDur := time.Now().Sub(start).Milliseconds()
|
||||
log.Printf("commit duration(ms), pg: %d, ch: %d", pgDur, chDur)
|
||||
|
||||
// TODO: use commit worker to save time each tick
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
commitDBUpdates()
|
||||
default:
|
||||
// Handle new message from queue
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,92 +0,0 @@
|
|||
chalice:
|
||||
env:
|
||||
jwt_secret: SetARandomStringHere
|
||||
clickhouse:
|
||||
enabled: false
|
||||
fromVersion: v1.6.0
|
||||
global:
|
||||
domainName: openreplay.local
|
||||
email:
|
||||
emailFrom: OpenReplay<do-not-reply@openreplay.com>
|
||||
emailHost: ""
|
||||
emailPassword: ""
|
||||
emailPort: "587"
|
||||
emailSslCert: ""
|
||||
emailSslKey: ""
|
||||
emailUseSsl: "false"
|
||||
emailUseTls: "true"
|
||||
emailUser: ""
|
||||
enterpriseEditionLicense: ""
|
||||
ingress:
|
||||
controller:
|
||||
config:
|
||||
enable-real-ip: true
|
||||
force-ssl-redirect: false
|
||||
max-worker-connections: 0
|
||||
proxy-body-size: 10m
|
||||
ssl-redirect: false
|
||||
extraArgs:
|
||||
default-ssl-certificate: app/openreplay-ssl
|
||||
ingressClass: openreplay
|
||||
ingressClassResource:
|
||||
name: openreplay
|
||||
service:
|
||||
externalTrafficPolicy: Local
|
||||
kafka:
|
||||
kafkaHost: kafka.db.svc.cluster.local
|
||||
kafkaPort: "9092"
|
||||
kafkaUseSsl: "false"
|
||||
zookeeperHost: databases-zookeeper.svc.cluster.local
|
||||
zookeeperNonTLSPort: 2181
|
||||
postgresql:
|
||||
postgresqlDatabase: postgres
|
||||
postgresqlHost: postgresql.db.svc.cluster.local
|
||||
postgresqlPassword: changeMePassword
|
||||
postgresqlPort: "5432"
|
||||
postgresqlUser: postgres
|
||||
redis:
|
||||
redisHost: redis-master.db.svc.cluster.local
|
||||
redisPort: "6379"
|
||||
s3:
|
||||
accessKey: changeMeMinioAccessKey
|
||||
assetsBucket: sessions-assets
|
||||
endpoint: http://minio.db.svc.cluster.local:9000
|
||||
recordingsBucket: mobs
|
||||
region: us-east-1
|
||||
secretKey: changeMeMinioPassword
|
||||
sourcemapsBucket: sourcemaps
|
||||
ingress-nginx:
|
||||
controller:
|
||||
config:
|
||||
enable-real-ip: true
|
||||
force-ssl-redirect: false
|
||||
max-worker-connections: 0
|
||||
proxy-body-size: 10m
|
||||
ssl-redirect: false
|
||||
extraArgs:
|
||||
default-ssl-certificate: app/openreplay-ssl
|
||||
ingressClass: openreplay
|
||||
ingressClassResource:
|
||||
name: openreplay
|
||||
service:
|
||||
externalTrafficPolicy: Local
|
||||
kafka:
|
||||
kafkaHost: kafka.db.svc.cluster.local
|
||||
kafkaPort: "9092"
|
||||
kafkaUseSsl: "false"
|
||||
zookeeperHost: databases-zookeeper.svc.cluster.local
|
||||
zookeeperNonTLSPort: 2181
|
||||
minio:
|
||||
global:
|
||||
minio:
|
||||
accessKey: changeMeMinioAccessKey
|
||||
secretKey: changeMeMinioPassword
|
||||
postgresql:
|
||||
postgresqlDatabase: postgres
|
||||
postgresqlHost: postgresql.db.svc.cluster.local
|
||||
postgresqlPassword: changeMePassword
|
||||
postgresqlPort: "5432"
|
||||
postgresqlUser: postgres
|
||||
redis:
|
||||
redisHost: redis-master.db.svc.cluster.local
|
||||
redisPort: "6379"
|
||||
|
|
@ -2,7 +2,7 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/internal/storage"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -20,42 +20,27 @@ import (
|
|||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("ender")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
// Load service configuration
|
||||
metrics := monitoring.New("ender")
|
||||
cfg := ender.New()
|
||||
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// Init all modules
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber, logger.NewQueueStats(cfg.LoggerTimeout))
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupEnder,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgSessionStart || iter.Type() == messages.MsgSessionEnd {
|
||||
continue
|
||||
}
|
||||
if iter.Message().Meta().Timestamp == 0 {
|
||||
log.Printf("ZERO TS, sessID: %d, msgType: %d", sessionID, iter.Type())
|
||||
}
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
sessions.UpdateSession(sessionID, meta.Timestamp, iter.Message().Meta().Timestamp)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
[]string{cfg.TopicRawWeb},
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) { sessions.UpdateSession(msg) },
|
||||
[]int{messages.MsgTimestamp},
|
||||
false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -94,7 +79,16 @@ func main() {
|
|||
currDuration, newDuration)
|
||||
return true
|
||||
}
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(msg)); err != nil {
|
||||
if cfg.UseEncryption {
|
||||
if key := storage.GenerateEncryptionKey(); key != nil {
|
||||
if err := pg.InsertSessionEncryptionKey(sessionID, key); err != nil {
|
||||
log.Printf("can't save session encryption key: %s, session will not be encrypted", err)
|
||||
} else {
|
||||
msg.EncryptionKey = string(key)
|
||||
}
|
||||
}
|
||||
}
|
||||
if err := producer.Produce(cfg.TopicRawWeb, sessionID, msg.Encode()); err != nil {
|
||||
log.Printf("can't send sessionEnd to topic: %s; sessID: %d", err, sessionID)
|
||||
return false
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,7 +2,6 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -47,25 +46,18 @@ func main() {
|
|||
|
||||
// Init producer and consumer for data bus
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
consumer := queue.NewMessageConsumer(
|
||||
|
||||
msgHandler := func(msg messages.Message) {
|
||||
statsLogger.Collect(msg)
|
||||
builderMap.HandleMessage(msg)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupHeuristics,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
var lastMessageID uint64
|
||||
for iter.Next() {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
msg := iter.Message().Decode()
|
||||
if msg == nil {
|
||||
log.Printf("failed batch, sess: %d, lastIndex: %d", sessionID, lastMessageID)
|
||||
continue
|
||||
}
|
||||
lastMessageID = msg.Meta().Index
|
||||
builderMap.HandleMessage(sessionID, msg, iter.Message().Meta().Index)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, nil, true),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -86,7 +78,7 @@ func main() {
|
|||
os.Exit(0)
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(readyMsg))
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, readyMsg.Encode())
|
||||
})
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
|
|
|
|||
|
|
@ -13,12 +13,10 @@ import (
|
|||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
//
|
||||
func main() {
|
||||
metrics := monitoring.New("integrations")
|
||||
|
||||
|
|
@ -84,7 +82,7 @@ func main() {
|
|||
}
|
||||
sessionID = sessData.ID
|
||||
}
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(event.IntegrationEvent))
|
||||
producer.Produce(cfg.TopicAnalytics, sessionID, event.IntegrationEvent.Encode())
|
||||
case err := <-manager.Errors:
|
||||
log.Printf("Integration error: %v\n", err)
|
||||
case i := <-manager.RequestDataUpdates:
|
||||
|
|
|
|||
|
|
@ -3,7 +3,6 @@ package main
|
|||
import (
|
||||
"context"
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
|
@ -11,15 +10,18 @@ import (
|
|||
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/internal/sink/assetscache"
|
||||
"openreplay/backend/internal/sink/oswriter"
|
||||
"openreplay/backend/internal/sink/sessionwriter"
|
||||
"openreplay/backend/internal/storage"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
func main() {
|
||||
pprof.StartProfilingServer()
|
||||
|
||||
metrics := monitoring.New("sink")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
|
@ -30,14 +32,15 @@ func main() {
|
|||
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
|
||||
}
|
||||
|
||||
writer := oswriter.NewWriter(cfg.FsUlimit, cfg.FsDir)
|
||||
writer := sessionwriter.NewWriter(cfg.FsUlimit, cfg.FsDir, cfg.FileBuffer, cfg.SyncTimeout)
|
||||
|
||||
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer, metrics)
|
||||
|
||||
counter := storage.NewLogCounter()
|
||||
// Session message metrics
|
||||
totalMessages, err := metrics.RegisterCounter("messages_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create messages_total metric: %s", err)
|
||||
|
|
@ -51,64 +54,70 @@ func main() {
|
|||
log.Printf("can't create messages_size metric: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
msgHandler := func(msg messages.Message) {
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if msg.TypeID() == messages.MsgSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, msg.SessionID(), msg.Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, msg.SessionID())
|
||||
}
|
||||
writer.Close(msg.SessionID())
|
||||
return
|
||||
}
|
||||
|
||||
// Process assets
|
||||
if msg.TypeID() == messages.MsgSetNodeAttributeURLBased ||
|
||||
msg.TypeID() == messages.MsgSetCSSDataURLBased ||
|
||||
msg.TypeID() == messages.MsgCSSInsertRuleURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSReplaceURLBased ||
|
||||
msg.TypeID() == messages.MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
if m == nil {
|
||||
log.Printf("assets decode err, info: %s", msg.Meta().Batch().Info())
|
||||
return
|
||||
}
|
||||
msg = assetMessageHandler.ParseAssets(m)
|
||||
}
|
||||
|
||||
// Filter message
|
||||
if !messages.IsReplayerType(msg.TypeID()) {
|
||||
return
|
||||
}
|
||||
|
||||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := msg.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msgType: %d", msg.SessionID(), msg.TypeID())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(ts))
|
||||
}
|
||||
|
||||
// Try to encode message to avoid null data inserts
|
||||
data := msg.Encode()
|
||||
if data == nil {
|
||||
return
|
||||
}
|
||||
|
||||
// Write message to file
|
||||
if err := writer.Write(msg); err != nil {
|
||||
log.Printf("writer error: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(msg.Encode())))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
}
|
||||
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupSink,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, iter Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
// [METRICS] Increase the number of processed messages
|
||||
totalMessages.Add(context.Background(), 1)
|
||||
|
||||
// Send SessionEnd trigger to storage service
|
||||
if iter.Type() == MsgSessionEnd {
|
||||
if err := producer.Produce(cfg.TopicTrigger, sessionID, iter.Message().Encode()); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
|
||||
}
|
||||
continue
|
||||
}
|
||||
|
||||
msg := iter.Message()
|
||||
// Process assets
|
||||
if iter.Type() == MsgSetNodeAttributeURLBased ||
|
||||
iter.Type() == MsgSetCSSDataURLBased ||
|
||||
iter.Type() == MsgCSSInsertRuleURLBased ||
|
||||
iter.Type() == MsgAdoptedSSReplaceURLBased ||
|
||||
iter.Type() == MsgAdoptedSSInsertRuleURLBased {
|
||||
m := msg.Decode()
|
||||
if m == nil {
|
||||
return
|
||||
}
|
||||
msg = assetMessageHandler.ParseAssets(sessionID, m) // TODO: filter type only once (use iterator inide or bring ParseAssets out here).
|
||||
}
|
||||
|
||||
// Filter message
|
||||
if !IsReplayerType(msg.TypeID()) {
|
||||
continue
|
||||
}
|
||||
|
||||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := msg.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msgType: %d", sessionID, iter.Type())
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(sessionID, time.UnixMilli(ts))
|
||||
}
|
||||
|
||||
// Write encoded message with index to session file
|
||||
data := msg.EncodeWithIndex()
|
||||
if err := writer.Write(sessionID, data); err != nil {
|
||||
log.Printf("Writer error: %v\n", err)
|
||||
}
|
||||
|
||||
// [METRICS] Increase the number of written to the files messages and the message size
|
||||
messageSize.Record(context.Background(), float64(len(data)))
|
||||
savedMessages.Add(context.Background(), 1)
|
||||
}
|
||||
iter.Close()
|
||||
},
|
||||
messages.NewMessageIterator(msgHandler, nil, false),
|
||||
false,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
@ -117,24 +126,27 @@ func main() {
|
|||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(30 * time.Second)
|
||||
tick := time.Tick(10 * time.Second)
|
||||
tickInfo := time.Tick(30 * time.Second)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
// Sync and stop writer
|
||||
writer.Stop()
|
||||
// Commit and stop consumer
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
if err := writer.SyncAll(); err != nil {
|
||||
log.Fatalf("Sync error: %v\n", err)
|
||||
}
|
||||
counter.Print()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
case <-tickInfo:
|
||||
counter.Print()
|
||||
log.Printf("writer: %s", writer.Info())
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
|
|
@ -142,5 +154,4 @@ func main() {
|
|||
}
|
||||
}
|
||||
}
|
||||
|
||||
}
|
||||
|
|
|
|||
|
|
@ -2,10 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
|
|
@ -38,24 +36,24 @@ func main() {
|
|||
log.Fatalf("can't init sessionFinder module: %s", err)
|
||||
}
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
consumer := queue.NewConsumer(
|
||||
cfg.GroupStorage,
|
||||
[]string{
|
||||
cfg.TopicTrigger,
|
||||
},
|
||||
func(sessionID uint64, iter messages.Iterator, meta *types.Meta) {
|
||||
for iter.Next() {
|
||||
if iter.Type() == messages.MsgSessionEnd {
|
||||
msg := iter.Message().Decode().(*messages.SessionEnd)
|
||||
if err := srv.UploadKey(strconv.FormatUint(sessionID, 10), 5); err != nil {
|
||||
log.Printf("can't find session: %d", sessionID)
|
||||
sessionFinder.Find(sessionID, msg.Timestamp)
|
||||
}
|
||||
// Log timestamp of last processed session
|
||||
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
|
||||
messages.NewMessageIterator(
|
||||
func(msg messages.Message) {
|
||||
sesEnd := msg.(*messages.SessionEnd)
|
||||
if err := srv.UploadSessionFiles(sesEnd); err != nil {
|
||||
log.Printf("can't find session: %d", msg.SessionID())
|
||||
sessionFinder.Find(msg.SessionID(), sesEnd.Timestamp)
|
||||
}
|
||||
}
|
||||
},
|
||||
// Log timestamp of last processed session
|
||||
counter.Update(msg.SessionID(), time.UnixMilli(msg.Meta().Batch().Timestamp()))
|
||||
},
|
||||
[]int{messages.MsgSessionEnd},
|
||||
true,
|
||||
),
|
||||
true,
|
||||
cfg.MessageSizeLimit,
|
||||
)
|
||||
|
|
|
|||
|
|
@ -5,6 +5,7 @@ go 1.18
|
|||
require (
|
||||
cloud.google.com/go/logging v1.4.2
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0
|
||||
github.com/Masterminds/semver v1.5.0
|
||||
github.com/aws/aws-sdk-go v1.44.98
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
|
|
@ -68,8 +69,8 @@ require (
|
|||
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
|
||||
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
|
||||
golang.org/x/text v0.3.7 // indirect
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f // indirect
|
||||
golang.org/x/text v0.4.0 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
|
||||
google.golang.org/appengine v1.6.7 // indirect
|
||||
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
|
||||
|
|
|
|||
|
|
@ -64,6 +64,8 @@ github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym
|
|||
github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0 h1:dj00TDKY+xwuTJdbpspCSmTLFyWzRJerTHwaBxut1C0=
|
||||
github.com/ClickHouse/clickhouse-go/v2 v2.2.0/go.mod h1:8f2XZUi7XoeU+uPIytSi1cvx8fmJxi7vIgqpvYTF1+o=
|
||||
github.com/Masterminds/semver v1.5.0 h1:H65muMkzWKEuNDnfl9d70GUjFniHKHRbFPGBuZ3QEww=
|
||||
github.com/Masterminds/semver v1.5.0/go.mod h1:MB6lktGJrhw8PrUyiEoblNEGEQ+RzHPF078ddwwvV3Y=
|
||||
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
|
||||
github.com/StackExchange/wmi v0.0.0-20190523213315-cbe66965904d/go.mod h1:3eOhrUMpNV+6aFIbp5/iudMxNCF27Vw2OZgy4xEx0Fg=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
|
|
@ -676,8 +678,9 @@ golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBc
|
|||
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220429233432-b5fbb4746d32/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
|
||||
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f h1:v4INt8xihDGvnrfjMDVXGxw9wrfxYyCjk0KbXjhR55s=
|
||||
golang.org/x/sys v0.0.0-20220722155257-8c9f86f7a55f/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -688,8 +691,9 @@ golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
|||
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
|
||||
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
|
||||
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
|
||||
golang.org/x/text v0.4.0 h1:BrVqGRd7+k1DiOgtnFvAkoQEWQvBc25ouMJM6429SFg=
|
||||
golang.org/x/text v0.4.0/go.mod h1:mrYo+phRRbMaCq/xk9113O4dZlRixOauAjOtrjsXDZ8=
|
||||
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
|
||||
|
|
|
|||
|
|
@ -33,6 +33,11 @@ type cacher struct {
|
|||
sizeLimit int
|
||||
downloadedAssets syncfloat64.Counter
|
||||
requestHeaders map[string]string
|
||||
workers *WorkerPool
|
||||
}
|
||||
|
||||
func (c *cacher) CanCache() bool {
|
||||
return c.workers.CanAddTask()
|
||||
}
|
||||
|
||||
func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
||||
|
|
@ -44,7 +49,7 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
if err != nil {
|
||||
log.Printf("can't create downloaded_assets metric: %s", err)
|
||||
}
|
||||
return &cacher{
|
||||
c := &cacher{
|
||||
timeoutMap: newTimeoutMap(),
|
||||
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
|
||||
httpClient: &http.Client{
|
||||
|
|
@ -60,47 +65,48 @@ func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
|
|||
downloadedAssets: downloadedAssets,
|
||||
requestHeaders: cfg.AssetsRequestHeaders,
|
||||
}
|
||||
c.workers = NewPool(64, c.CacheFile)
|
||||
return c
|
||||
}
|
||||
|
||||
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlContext string, isJS bool) {
|
||||
var cachePath string
|
||||
if isJS {
|
||||
cachePath = assets.GetCachePathForJS(requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(sessionID, requestURL)
|
||||
}
|
||||
if c.timeoutMap.contains(cachePath) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(cachePath)
|
||||
crTime := c.s3.GetCreationTime(cachePath)
|
||||
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) { // recently uploaded
|
||||
return
|
||||
}
|
||||
func (c *cacher) CacheFile(task *Task) {
|
||||
c.cacheURL(task)
|
||||
}
|
||||
|
||||
req, _ := http.NewRequest("GET", requestURL, nil)
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
func (c *cacher) cacheURL(t *Task) {
|
||||
t.retries--
|
||||
req, _ := http.NewRequest("GET", t.requestURL, nil)
|
||||
if t.retries%2 == 0 {
|
||||
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
|
||||
}
|
||||
for k, v := range c.requestHeaders {
|
||||
req.Header.Set(k, v)
|
||||
}
|
||||
res, err := c.httpClient.Do(req)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
defer res.Body.Close()
|
||||
if res.StatusCode >= 400 {
|
||||
// TODO: retry
|
||||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), urlContext)
|
||||
printErr := true
|
||||
// Retry 403 error
|
||||
if res.StatusCode == 403 && t.retries > 0 {
|
||||
c.workers.AddTask(t)
|
||||
printErr = false
|
||||
}
|
||||
if printErr {
|
||||
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), t.urlContext)
|
||||
}
|
||||
return
|
||||
}
|
||||
data, err := ioutil.ReadAll(io.LimitReader(res.Body, int64(c.sizeLimit+1)))
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
if len(data) > c.sizeLimit {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), urlContext)
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), t.urlContext)
|
||||
return
|
||||
}
|
||||
|
||||
|
|
@ -112,44 +118,94 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlCo
|
|||
|
||||
strData := string(data)
|
||||
if isCSS {
|
||||
strData = c.rewriter.RewriteCSS(sessionID, requestURL, strData) // TODO: one method for rewrite and return list
|
||||
strData = c.rewriter.RewriteCSS(t.sessionID, t.requestURL, strData) // TODO: one method for rewrite and return list
|
||||
}
|
||||
|
||||
// TODO: implement in streams
|
||||
err = c.s3.Upload(strings.NewReader(strData), cachePath, contentType, false)
|
||||
err = c.s3.Upload(strings.NewReader(strData), t.cachePath, contentType, false)
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
c.downloadedAssets.Add(context.Background(), 1)
|
||||
|
||||
if isCSS {
|
||||
if depth > 0 {
|
||||
if t.depth > 0 {
|
||||
for _, extractedURL := range assets.ExtractURLsFromCSS(string(data)) {
|
||||
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
|
||||
go c.cacheURL(fullURL, sessionID, depth-1, urlContext+"\n -> "+fullURL, false)
|
||||
if fullURL, cachable := assets.GetFullCachableURL(t.requestURL, extractedURL); cachable {
|
||||
c.checkTask(&Task{
|
||||
requestURL: fullURL,
|
||||
sessionID: t.sessionID,
|
||||
depth: t.depth - 1,
|
||||
urlContext: t.urlContext + "\n -> " + fullURL,
|
||||
isJS: false,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
}
|
||||
if err != nil {
|
||||
c.Errors <- errors.Wrap(err, urlContext)
|
||||
c.Errors <- errors.Wrap(err, t.urlContext)
|
||||
return
|
||||
}
|
||||
} else {
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), urlContext)
|
||||
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), t.urlContext)
|
||||
return
|
||||
}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (c *cacher) checkTask(newTask *Task) {
|
||||
// check if file was recently uploaded
|
||||
var cachePath string
|
||||
if newTask.isJS {
|
||||
cachePath = assets.GetCachePathForJS(newTask.requestURL)
|
||||
} else {
|
||||
cachePath = assets.GetCachePathForAssets(newTask.sessionID, newTask.requestURL)
|
||||
}
|
||||
if c.timeoutMap.contains(cachePath) {
|
||||
return
|
||||
}
|
||||
c.timeoutMap.add(cachePath)
|
||||
crTime := c.s3.GetCreationTime(cachePath)
|
||||
if crTime != nil && crTime.After(time.Now().Add(-MAX_STORAGE_TIME)) {
|
||||
return
|
||||
}
|
||||
// add new file in queue to download
|
||||
newTask.cachePath = cachePath
|
||||
c.workers.AddTask(newTask)
|
||||
}
|
||||
|
||||
func (c *cacher) CacheJSFile(sourceURL string) {
|
||||
go c.cacheURL(sourceURL, 0, 0, sourceURL, true)
|
||||
c.checkTask(&Task{
|
||||
requestURL: sourceURL,
|
||||
sessionID: 0,
|
||||
depth: 0,
|
||||
urlContext: sourceURL,
|
||||
isJS: true,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *cacher) CacheURL(sessionID uint64, fullURL string) {
|
||||
go c.cacheURL(fullURL, sessionID, MAX_CACHE_DEPTH, fullURL, false)
|
||||
c.checkTask(&Task{
|
||||
requestURL: fullURL,
|
||||
sessionID: sessionID,
|
||||
depth: MAX_CACHE_DEPTH,
|
||||
urlContext: fullURL,
|
||||
isJS: false,
|
||||
retries: setRetries(),
|
||||
})
|
||||
}
|
||||
|
||||
func (c *cacher) UpdateTimeouts() {
|
||||
c.timeoutMap.deleteOutdated()
|
||||
}
|
||||
|
||||
func (c *cacher) Stop() {
|
||||
c.workers.Stop()
|
||||
}
|
||||
|
||||
func setRetries() int {
|
||||
return 10
|
||||
}
|
||||
|
|
|
|||
80
backend/internal/assets/cacher/pool.go
Normal file
80
backend/internal/assets/cacher/pool.go
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
package cacher
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
)
|
||||
|
||||
type Task struct {
|
||||
requestURL string
|
||||
sessionID uint64
|
||||
depth byte
|
||||
urlContext string
|
||||
isJS bool
|
||||
cachePath string
|
||||
retries int
|
||||
}
|
||||
|
||||
type WorkerPool struct {
|
||||
tasks chan *Task
|
||||
wg sync.WaitGroup
|
||||
done chan struct{}
|
||||
term sync.Once
|
||||
size int
|
||||
job Job
|
||||
}
|
||||
|
||||
func (p *WorkerPool) CanAddTask() bool {
|
||||
if len(p.tasks) < cap(p.tasks) {
|
||||
return true
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
type Job func(task *Task)
|
||||
|
||||
func NewPool(size int, job Job) *WorkerPool {
|
||||
newPool := &WorkerPool{
|
||||
tasks: make(chan *Task, 128),
|
||||
done: make(chan struct{}),
|
||||
size: size,
|
||||
job: job,
|
||||
}
|
||||
newPool.init()
|
||||
return newPool
|
||||
}
|
||||
|
||||
func (p *WorkerPool) init() {
|
||||
p.wg.Add(p.size)
|
||||
for i := 0; i < p.size; i++ {
|
||||
go p.worker()
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) worker() {
|
||||
for {
|
||||
select {
|
||||
case newTask := <-p.tasks:
|
||||
p.job(newTask)
|
||||
case <-p.done:
|
||||
p.wg.Done()
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (p *WorkerPool) AddTask(task *Task) {
|
||||
if task.retries <= 0 {
|
||||
return
|
||||
}
|
||||
p.tasks <- task
|
||||
}
|
||||
|
||||
func (p *WorkerPool) Stop() {
|
||||
log.Printf("stopping workers")
|
||||
p.term.Do(func() {
|
||||
close(p.done)
|
||||
})
|
||||
p.wg.Wait()
|
||||
log.Printf("all workers have been stopped")
|
||||
}
|
||||
|
|
@ -17,9 +17,6 @@ import (
|
|||
)
|
||||
|
||||
func readFile(path string) (map[string]string, error) {
|
||||
if path == "" {
|
||||
return nil, fmt.Errorf("file path is empty")
|
||||
}
|
||||
file, err := os.Open(path)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't open file: %s", err)
|
||||
|
|
@ -33,13 +30,23 @@ func readFile(path string) (map[string]string, error) {
|
|||
res := make(map[string]string)
|
||||
lines := strings.Split(string(data), "\n")
|
||||
for _, line := range lines {
|
||||
if len(line) == 0 {
|
||||
continue
|
||||
}
|
||||
env := strings.Split(line, "=")
|
||||
if len(env) < 2 {
|
||||
continue
|
||||
}
|
||||
res[env[0]] = env[1]
|
||||
}
|
||||
return res, nil
|
||||
}
|
||||
|
||||
func parseFile(a interface{}, path string) {
|
||||
// Skip parsing process without logs if we don't have path to config file
|
||||
if path == "" {
|
||||
return
|
||||
}
|
||||
envs, err := readFile(path)
|
||||
if err != nil {
|
||||
log.Printf("can't parse config file: %s", err)
|
||||
|
|
|
|||
|
|
@ -14,6 +14,7 @@ type Config struct {
|
|||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
|
||||
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
|
||||
UseEncryption bool `env:"USE_ENCRYPTION,default=false"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -9,6 +9,8 @@ type Config struct {
|
|||
common.Config
|
||||
FsDir string `env:"FS_DIR,required"`
|
||||
FsUlimit uint16 `env:"FS_ULIMIT,required"`
|
||||
FileBuffer int `env:"FILE_BUFFER,default=32768"`
|
||||
SyncTimeout int `env:"SYNC_TIMEOUT,default=5"`
|
||||
GroupSink string `env:"GROUP_SINK,required"`
|
||||
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
|
||||
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
|
||||
|
|
@ -17,6 +19,8 @@ type Config struct {
|
|||
CacheAssets bool `env:"CACHE_ASSETS,required"`
|
||||
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
|
||||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
CacheThreshold int64 `env:"CACHE_THRESHOLD,default=5"`
|
||||
CacheExpiration int64 `env:"CACHE_EXPIRATION,default=120"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -11,7 +11,6 @@ type Config struct {
|
|||
S3Region string `env:"AWS_REGION_WEB,required"`
|
||||
S3Bucket string `env:"S3_BUCKET_WEB,required"`
|
||||
FSDir string `env:"FS_DIR,required"`
|
||||
FSCleanHRS int `env:"FS_CLEAN_HRS,required"`
|
||||
FileSplitSize int `env:"FILE_SPLIT_SIZE,required"`
|
||||
RetryTimeout time.Duration `env:"RETRY_TIMEOUT,default=2m"`
|
||||
GroupStorage string `env:"GROUP_STORAGE,required"`
|
||||
|
|
@ -21,6 +20,7 @@ type Config struct {
|
|||
DeleteTimeout time.Duration `env:"DELETE_TIMEOUT,default=48h"`
|
||||
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
|
||||
UseFailover bool `env:"USE_FAILOVER,default=false"`
|
||||
MaxFileSize int64 `env:"MAX_FILE_SIZE,default=524288000"`
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
|
|
|
|||
|
|
@ -5,7 +5,8 @@ import (
|
|||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
|
||||
func (mi *Saver) InsertMessage(msg Message) error {
|
||||
sessionID := msg.SessionID()
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *Metadata:
|
||||
|
|
@ -37,23 +38,16 @@ func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
|
|||
case *PageEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebPageEvent(sessionID, m)
|
||||
case *ErrorEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, m)
|
||||
case *FetchEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebFetchEvent(sessionID, m)
|
||||
case *GraphQLEvent:
|
||||
mi.sendToFTS(msg, sessionID)
|
||||
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
|
||||
case *JSException:
|
||||
return mi.pg.InsertWebJSException(m)
|
||||
case *IntegrationEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{
|
||||
MessageID: m.Meta().Index,
|
||||
Timestamp: m.Timestamp,
|
||||
Source: m.Source,
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
})
|
||||
return mi.pg.InsertWebIntegrationEvent(m)
|
||||
|
||||
// IOS
|
||||
case *IOSSessionStart:
|
||||
|
|
|
|||
|
|
@ -16,12 +16,10 @@ func (si *Saver) InsertStats(session *Session, msg Message) error {
|
|||
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
|
||||
case *ResourceEvent:
|
||||
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
|
||||
case *LongTask:
|
||||
return si.pg.InsertWebStatsLongtask(session.SessionID, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *Saver) CommitStats(optimize bool) error {
|
||||
func (si *Saver) CommitStats() error {
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
|
|
@ -69,12 +69,12 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
tokenData = &token.TokenData{sessionID, 0, expTime.UnixMilli()}
|
||||
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
||||
// The difference with web is mostly here:
|
||||
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, Encode(&IOSSessionStart{
|
||||
sessStart := &IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
|
|
@ -85,7 +85,8 @@ func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request)
|
|||
UserDevice: ios.MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
}))
|
||||
}
|
||||
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, sessStart.Encode())
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartIOSSessionResponse{
|
||||
|
|
|
|||
|
|
@ -3,6 +3,7 @@ package router
|
|||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"github.com/Masterminds/semver"
|
||||
"go.opentelemetry.io/otel/attribute"
|
||||
"io"
|
||||
"log"
|
||||
|
|
@ -37,6 +38,22 @@ func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) (
|
|||
return bodyBytes, nil
|
||||
}
|
||||
|
||||
func getSessionTimestamp(req *StartSessionRequest, startTimeMili int64) (ts uint64) {
|
||||
ts = uint64(req.Timestamp)
|
||||
c, err := semver.NewConstraint(">=4.1.6")
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
v, err := semver.NewVersion(req.TrackerVersion)
|
||||
if err != nil {
|
||||
return
|
||||
}
|
||||
if c.Check(v) {
|
||||
return uint64(startTimeMili)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
|
||||
|
|
@ -91,17 +108,22 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
startTimeMili := startTime.UnixMilli()
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTimeMili))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{ID: sessionID, ExpTime: expTime.UnixMilli()}
|
||||
tokenData = &token.TokenData{
|
||||
ID: sessionID,
|
||||
Delay: startTimeMili - req.Timestamp,
|
||||
ExpTime: expTime.UnixMilli(),
|
||||
}
|
||||
|
||||
sessionStart := &SessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
Timestamp: getSessionTimestamp(req, startTimeMili),
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
|
|
@ -125,7 +147,7 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
}
|
||||
|
||||
// Send sessionStart message to kafka
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, Encode(sessionStart)); err != nil {
|
||||
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, sessionStart.Encode()); err != nil {
|
||||
log.Printf("can't send session start: %s", err)
|
||||
}
|
||||
}
|
||||
|
|
@ -137,6 +159,7 @@ func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request)
|
|||
ProjectID: strconv.FormatUint(uint64(p.ProjectID), 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
StartTimestamp: int64(flakeid.ExtractTimestamp(tokenData.ID)),
|
||||
Delay: tokenData.Delay,
|
||||
})
|
||||
}
|
||||
|
||||
|
|
|
|||
|
|
@ -4,7 +4,7 @@ type StartSessionRequest struct {
|
|||
Token string `json:"token"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
RevID string `json:"revID"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
IsSnippet bool `json:"isSnippet"`
|
||||
DeviceMemory uint64 `json:"deviceMemory"`
|
||||
|
|
|
|||
|
|
@ -5,6 +5,8 @@ import (
|
|||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
log2 "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
|
@ -27,9 +29,10 @@ type SessionEnder struct {
|
|||
timeCtrl *timeController
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
totalSessions syncfloat64.Counter
|
||||
stats log2.QueueStats
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder, error) {
|
||||
func New(metrics *monitoring.Metrics, timeout int64, parts int, stats log2.QueueStats) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
|
|
@ -48,24 +51,31 @@ func New(metrics *monitoring.Metrics, timeout int64, parts int) (*SessionEnder,
|
|||
timeCtrl: NewTimeController(parts),
|
||||
activeSessions: activeSessions,
|
||||
totalSessions: totalSessions,
|
||||
stats: stats,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateSession save timestamp for new sessions and update for existing sessions
|
||||
func (se *SessionEnder) UpdateSession(sessionID uint64, timestamp, msgTimestamp int64) {
|
||||
localTS := time.Now().UnixMilli()
|
||||
currTS := timestamp
|
||||
if currTS == 0 {
|
||||
func (se *SessionEnder) UpdateSession(msg messages.Message) {
|
||||
se.stats.Collect(msg)
|
||||
var (
|
||||
sessionID = msg.Meta().SessionID()
|
||||
batchTimestamp = msg.Meta().Batch().Timestamp()
|
||||
msgTimestamp = msg.Meta().Timestamp
|
||||
localTimestamp = time.Now().UnixMilli()
|
||||
)
|
||||
if batchTimestamp == 0 {
|
||||
log.Printf("got empty timestamp for sessionID: %d", sessionID)
|
||||
return
|
||||
}
|
||||
se.timeCtrl.UpdateTime(sessionID, currTS)
|
||||
se.timeCtrl.UpdateTime(sessionID, batchTimestamp)
|
||||
sess, ok := se.sessions[sessionID]
|
||||
if !ok {
|
||||
// Register new session
|
||||
se.sessions[sessionID] = &session{
|
||||
lastTimestamp: currTS, // timestamp from message broker
|
||||
lastUpdate: localTS, // local timestamp
|
||||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
lastTimestamp: batchTimestamp, // timestamp from message broker
|
||||
lastUpdate: localTimestamp, // local timestamp
|
||||
lastUserTime: msgTimestamp, // last timestamp from user's machine
|
||||
isEnded: false,
|
||||
}
|
||||
se.activeSessions.Add(context.Background(), 1)
|
||||
|
|
@ -77,9 +87,9 @@ func (se *SessionEnder) UpdateSession(sessionID uint64, timestamp, msgTimestamp
|
|||
sess.lastUserTime = msgTimestamp
|
||||
}
|
||||
// Keep information about the latest message for generating sessionEnd trigger
|
||||
if currTS > sess.lastTimestamp {
|
||||
sess.lastTimestamp = currTS
|
||||
sess.lastUpdate = localTS
|
||||
if batchTimestamp > sess.lastTimestamp {
|
||||
sess.lastTimestamp = batchTimestamp
|
||||
sess.lastUpdate = localTimestamp
|
||||
sess.isEnded = false
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,35 +1,111 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"context"
|
||||
"crypto/md5"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"io"
|
||||
"log"
|
||||
"net/url"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type AssetsCache struct {
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
type CachedAsset struct {
|
||||
msg string
|
||||
ts time.Time
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
return &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
type AssetsCache struct {
|
||||
mutex sync.RWMutex
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
cache map[string]*CachedAsset
|
||||
totalAssets syncfloat64.Counter
|
||||
cachedAssets syncfloat64.Counter
|
||||
skippedAssets syncfloat64.Counter
|
||||
assetSize syncfloat64.Histogram
|
||||
assetDuration syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer, metrics *monitoring.Metrics) *AssetsCache {
|
||||
// Assets metrics
|
||||
totalAssets, err := metrics.RegisterCounter("assets_total")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_total metric: %s", err)
|
||||
}
|
||||
cachedAssets, err := metrics.RegisterCounter("assets_cached")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_cached metric: %s", err)
|
||||
}
|
||||
skippedAssets, err := metrics.RegisterCounter("assets_skipped")
|
||||
if err != nil {
|
||||
log.Printf("can't create assets_skipped metric: %s", err)
|
||||
}
|
||||
assetSize, err := metrics.RegisterHistogram("asset_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_size metric: %s", err)
|
||||
}
|
||||
assetDuration, err := metrics.RegisterHistogram("asset_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create asset_duration metric: %s", err)
|
||||
}
|
||||
assetsCache := &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
cache: make(map[string]*CachedAsset, 64),
|
||||
totalAssets: totalAssets,
|
||||
cachedAssets: cachedAssets,
|
||||
skippedAssets: skippedAssets,
|
||||
assetSize: assetSize,
|
||||
assetDuration: assetDuration,
|
||||
}
|
||||
go assetsCache.cleaner()
|
||||
return assetsCache
|
||||
}
|
||||
|
||||
func (e *AssetsCache) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 30)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
e.clearCache()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.Message {
|
||||
func (e *AssetsCache) clearCache() {
|
||||
e.mutex.Lock()
|
||||
defer e.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
cacheSize := len(e.cache)
|
||||
deleted := 0
|
||||
|
||||
for id, cache := range e.cache {
|
||||
if int64(now.Sub(cache.ts).Minutes()) > e.cfg.CacheExpiration {
|
||||
deleted++
|
||||
delete(e.cache, id)
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d assets", deleted, cacheSize)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(msg messages.Message) messages.Message {
|
||||
switch m := msg.(type) {
|
||||
case *messages.SetNodeAttributeURLBased:
|
||||
if m.Name == "src" || m.Name == "href" {
|
||||
newMsg := &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleURL(sessID, m.BaseURL, m.Value),
|
||||
Value: e.handleURL(m.SessionID(), m.BaseURL, m.Value),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -37,7 +113,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleCSS(sessID, m.BaseURL, m.Value),
|
||||
Value: e.handleCSS(m.SessionID(), m.BaseURL, m.Value),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -45,7 +121,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
case *messages.SetCSSDataURLBased:
|
||||
newMsg := &messages.SetCSSData{
|
||||
ID: m.ID,
|
||||
Data: e.handleCSS(sessID, m.BaseURL, m.Data),
|
||||
Data: e.handleCSS(m.SessionID(), m.BaseURL, m.Data),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -53,14 +129,14 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
|
||||
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
case *messages.AdoptedSSReplaceURLBased:
|
||||
newMsg := &messages.AdoptedSSReplace{
|
||||
SheetID: m.SheetID,
|
||||
Text: e.handleCSS(sessID, m.BaseURL, m.Text),
|
||||
Text: e.handleCSS(m.SessionID(), m.BaseURL, m.Text),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -68,7 +144,7 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
newMsg := &messages.AdoptedSSInsertRule{
|
||||
SheetID: m.SheetID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
|
||||
Rule: e.handleCSS(m.SessionID(), m.BaseURL, m.Rule),
|
||||
}
|
||||
newMsg.SetMeta(msg.Meta())
|
||||
return newMsg
|
||||
|
|
@ -78,10 +154,11 @@ func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.
|
|||
|
||||
func (e *AssetsCache) sendAssetForCache(sessionID uint64, baseURL string, relativeURL string) {
|
||||
if fullURL, cacheable := assets.GetFullCachableURL(baseURL, relativeURL); cacheable {
|
||||
assetMessage := &messages.AssetCache{URL: fullURL}
|
||||
if err := e.producer.Produce(
|
||||
e.cfg.TopicCache,
|
||||
sessionID,
|
||||
messages.Encode(&messages.AssetCache{URL: fullURL}),
|
||||
assetMessage.Encode(),
|
||||
); err != nil {
|
||||
log.Printf("can't send asset to cache topic, sessID: %d, err: %s", sessionID, err)
|
||||
}
|
||||
|
|
@ -94,18 +171,72 @@ func (e *AssetsCache) sendAssetsForCacheFromCSS(sessionID uint64, baseURL string
|
|||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, url string) string {
|
||||
func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, urlVal string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetForCache(sessionID, baseURL, url)
|
||||
return e.rewriter.RewriteURL(sessionID, baseURL, url)
|
||||
e.sendAssetForCache(sessionID, baseURL, urlVal)
|
||||
return e.rewriter.RewriteURL(sessionID, baseURL, urlVal)
|
||||
} else {
|
||||
return assets.ResolveURL(baseURL, urlVal)
|
||||
}
|
||||
return assets.ResolveURL(baseURL, url)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
ctx := context.Background()
|
||||
e.totalAssets.Add(ctx, 1)
|
||||
// Try to find asset in cache
|
||||
h := md5.New()
|
||||
// Cut first part of url (scheme + host)
|
||||
u, err := url.Parse(baseURL)
|
||||
if err != nil {
|
||||
log.Printf("can't parse url: %s, err: %s", baseURL, err)
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
}
|
||||
return e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
}
|
||||
justUrl := u.Scheme + "://" + u.Host + "/"
|
||||
// Calculate hash sum of url + css
|
||||
io.WriteString(h, justUrl)
|
||||
io.WriteString(h, css)
|
||||
hash := string(h.Sum(nil))
|
||||
// Check the resulting hash in cache
|
||||
e.mutex.RLock()
|
||||
cachedAsset, ok := e.cache[hash]
|
||||
e.mutex.RUnlock()
|
||||
if ok {
|
||||
if int64(time.Now().Sub(cachedAsset.ts).Minutes()) < e.cfg.CacheExpiration {
|
||||
e.skippedAssets.Add(ctx, 1)
|
||||
return cachedAsset.msg
|
||||
}
|
||||
}
|
||||
// Send asset to download in assets service
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
return e.rewriter.RewriteCSS(sessionID, baseURL, css)
|
||||
}
|
||||
return assets.ResolveCSS(baseURL, css)
|
||||
// Rewrite asset
|
||||
start := time.Now()
|
||||
res := e.getRewrittenCSS(sessionID, baseURL, css)
|
||||
duration := time.Now().Sub(start).Milliseconds()
|
||||
e.assetSize.Record(ctx, float64(len(res)))
|
||||
e.assetDuration.Record(ctx, float64(duration))
|
||||
// Save asset to cache if we spent more than threshold
|
||||
if duration > e.cfg.CacheThreshold {
|
||||
e.mutex.Lock()
|
||||
e.cache[hash] = &CachedAsset{
|
||||
msg: res,
|
||||
ts: time.Now(),
|
||||
}
|
||||
e.mutex.Unlock()
|
||||
e.cachedAssets.Add(ctx, 1)
|
||||
}
|
||||
// Return rewritten asset
|
||||
return res
|
||||
}
|
||||
|
||||
func (e *AssetsCache) getRewrittenCSS(sessionID uint64, url, css string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
return e.rewriter.RewriteCSS(sessionID, url, css)
|
||||
} else {
|
||||
return assets.ResolveCSS(url, css)
|
||||
}
|
||||
}
|
||||
|
|
|
|||
|
|
@ -1,100 +0,0 @@
|
|||
package oswriter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Writer struct {
|
||||
ulimit int
|
||||
dir string
|
||||
files map[uint64]*os.File
|
||||
atimes map[uint64]int64
|
||||
}
|
||||
|
||||
func NewWriter(ulimit uint16, dir string) *Writer {
|
||||
return &Writer{
|
||||
ulimit: int(ulimit),
|
||||
dir: dir + "/",
|
||||
files: make(map[uint64]*os.File),
|
||||
atimes: make(map[uint64]int64),
|
||||
}
|
||||
}
|
||||
|
||||
func (w *Writer) open(key uint64) (*os.File, error) {
|
||||
file, ok := w.files[key]
|
||||
if ok {
|
||||
return file, nil
|
||||
}
|
||||
if len(w.atimes) == w.ulimit {
|
||||
var m_k uint64
|
||||
var m_t int64 = math.MaxInt64
|
||||
for k, t := range w.atimes {
|
||||
if t < m_t {
|
||||
m_k = k
|
||||
m_t = t
|
||||
}
|
||||
}
|
||||
if err := w.Close(m_k); err != nil {
|
||||
return nil, err
|
||||
}
|
||||
}
|
||||
file, err := os.OpenFile(w.dir+strconv.FormatUint(key, 10), os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
w.files[key] = file
|
||||
w.atimes[key] = time.Now().Unix()
|
||||
return file, nil
|
||||
}
|
||||
|
||||
func (w *Writer) Close(key uint64) error {
|
||||
file := w.files[key]
|
||||
if file == nil {
|
||||
return nil
|
||||
}
|
||||
if err := file.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
delete(w.files, key)
|
||||
delete(w.atimes, key)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) Write(key uint64, data []byte) error {
|
||||
file, err := w.open(key)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
// TODO: add check for the number of recorded bytes to file
|
||||
_, err = file.Write(data)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *Writer) SyncAll() error {
|
||||
for _, file := range w.files {
|
||||
if err := file.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (w *Writer) CloseAll() error {
|
||||
for _, file := range w.files {
|
||||
if err := file.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := file.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
w.files = nil
|
||||
w.atimes = nil
|
||||
return nil
|
||||
}
|
||||
57
backend/internal/sink/sessionwriter/file.go
Normal file
57
backend/internal/sink/sessionwriter/file.go
Normal file
|
|
@ -0,0 +1,57 @@
|
|||
package sessionwriter
|
||||
|
||||
import (
|
||||
"bufio"
|
||||
"os"
|
||||
)
|
||||
|
||||
type File struct {
|
||||
file *os.File
|
||||
buffer *bufio.Writer
|
||||
updated bool
|
||||
}
|
||||
|
||||
func NewFile(path string, bufSize int) (*File, error) {
|
||||
file, err := os.OpenFile(path, os.O_WRONLY|os.O_CREATE|os.O_APPEND, 0644)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
return &File{
|
||||
file: file,
|
||||
buffer: bufio.NewWriterSize(file, bufSize),
|
||||
updated: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (f *File) Write(data []byte) error {
|
||||
leftToWrite := len(data)
|
||||
for leftToWrite > 0 {
|
||||
writtenDown, err := f.buffer.Write(data)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
leftToWrite -= writtenDown
|
||||
}
|
||||
f.updated = true
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) Sync() error {
|
||||
if !f.updated {
|
||||
return nil
|
||||
}
|
||||
if err := f.buffer.Flush(); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := f.file.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
f.updated = false
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *File) Close() error {
|
||||
_ = f.buffer.Flush()
|
||||
_ = f.file.Sync()
|
||||
return f.file.Close()
|
||||
}
|
||||
56
backend/internal/sink/sessionwriter/meta.go
Normal file
56
backend/internal/sink/sessionwriter/meta.go
Normal file
|
|
@ -0,0 +1,56 @@
|
|||
package sessionwriter
|
||||
|
||||
import (
|
||||
"math"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Meta struct {
|
||||
limit int
|
||||
lock *sync.Mutex
|
||||
meta map[uint64]int64
|
||||
}
|
||||
|
||||
func NewMeta(limit int) *Meta {
|
||||
return &Meta{
|
||||
limit: limit,
|
||||
lock: &sync.Mutex{},
|
||||
meta: make(map[uint64]int64, limit),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *Meta) Add(sid uint64) {
|
||||
m.lock.Lock()
|
||||
m.meta[sid] = time.Now().Unix()
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
func (m *Meta) Count() int {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
return len(m.meta)
|
||||
}
|
||||
|
||||
func (m *Meta) Delete(sid uint64) {
|
||||
m.lock.Lock()
|
||||
delete(m.meta, sid)
|
||||
m.lock.Unlock()
|
||||
}
|
||||
|
||||
func (m *Meta) GetExtra() uint64 {
|
||||
m.lock.Lock()
|
||||
defer m.lock.Unlock()
|
||||
if len(m.meta) >= m.limit {
|
||||
var extraSessID uint64
|
||||
var minTimestamp int64 = math.MaxInt64
|
||||
for sessID, timestamp := range m.meta {
|
||||
if timestamp < minTimestamp {
|
||||
extraSessID = sessID
|
||||
minTimestamp = timestamp
|
||||
}
|
||||
}
|
||||
return extraSessID
|
||||
}
|
||||
return 0
|
||||
}
|
||||
96
backend/internal/sink/sessionwriter/session.go
Normal file
96
backend/internal/sink/sessionwriter/session.go
Normal file
|
|
@ -0,0 +1,96 @@
|
|||
package sessionwriter
|
||||
|
||||
import (
|
||||
"encoding/binary"
|
||||
"fmt"
|
||||
"strconv"
|
||||
"sync"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type Session struct {
|
||||
lock *sync.Mutex
|
||||
dom *File
|
||||
dev *File
|
||||
index []byte
|
||||
updated bool
|
||||
}
|
||||
|
||||
func NewSession(sessID uint64, workDir string, bufSize int) (*Session, error) {
|
||||
if sessID == 0 {
|
||||
return nil, fmt.Errorf("wrong session id")
|
||||
}
|
||||
filePath := workDir + strconv.FormatUint(sessID, 10)
|
||||
|
||||
dom, err := NewFile(filePath, bufSize)
|
||||
if err != nil {
|
||||
return nil, err
|
||||
}
|
||||
dev, err := NewFile(filePath+"devtools", bufSize)
|
||||
if err != nil {
|
||||
dom.Close()
|
||||
return nil, err
|
||||
}
|
||||
|
||||
return &Session{
|
||||
lock: &sync.Mutex{},
|
||||
dom: dom,
|
||||
dev: dev,
|
||||
index: make([]byte, 8),
|
||||
updated: false,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Session) Write(msg messages.Message) error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
// Encode message index
|
||||
binary.LittleEndian.PutUint64(s.index, msg.Meta().Index)
|
||||
|
||||
// Write message to dom.mob file
|
||||
if messages.IsDOMType(msg.TypeID()) {
|
||||
// Write message index
|
||||
if err := s.dom.Write(s.index); err != nil {
|
||||
return err
|
||||
}
|
||||
// Write message body
|
||||
if err := s.dom.Write(msg.Encode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
s.updated = true
|
||||
// Write message to dev.mob file
|
||||
if !messages.IsDOMType(msg.TypeID()) || msg.TypeID() == messages.MsgTimestamp {
|
||||
// Write message index
|
||||
if err := s.dev.Write(s.index); err != nil {
|
||||
return err
|
||||
}
|
||||
// Write message body
|
||||
if err := s.dev.Write(msg.Encode()); err != nil {
|
||||
return err
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (s *Session) Sync() error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if err := s.dom.Sync(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.dev.Sync()
|
||||
}
|
||||
|
||||
func (s *Session) Close() error {
|
||||
s.lock.Lock()
|
||||
defer s.lock.Unlock()
|
||||
|
||||
if err := s.dom.Close(); err != nil {
|
||||
return err
|
||||
}
|
||||
return s.dev.Close()
|
||||
}
|
||||
122
backend/internal/sink/sessionwriter/writer.go
Normal file
122
backend/internal/sink/sessionwriter/writer.go
Normal file
|
|
@ -0,0 +1,122 @@
|
|||
package sessionwriter
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type SessionWriter struct {
|
||||
filesLimit int
|
||||
workingDir string
|
||||
fileBuffer int
|
||||
syncTimeout time.Duration
|
||||
meta *Meta
|
||||
sessions *sync.Map
|
||||
done chan struct{}
|
||||
stopped chan struct{}
|
||||
}
|
||||
|
||||
func NewWriter(filesLimit uint16, workingDir string, fileBuffer int, syncTimeout int) *SessionWriter {
|
||||
w := &SessionWriter{
|
||||
filesLimit: int(filesLimit) / 2, // should divide by 2 because each session has 2 files
|
||||
workingDir: workingDir + "/",
|
||||
fileBuffer: fileBuffer,
|
||||
syncTimeout: time.Duration(syncTimeout) * time.Second,
|
||||
meta: NewMeta(int(filesLimit)),
|
||||
sessions: &sync.Map{},
|
||||
done: make(chan struct{}),
|
||||
stopped: make(chan struct{}),
|
||||
}
|
||||
go w.synchronizer()
|
||||
return w
|
||||
}
|
||||
|
||||
func (w *SessionWriter) Write(msg messages.Message) (err error) {
|
||||
var (
|
||||
sess *Session
|
||||
sid = msg.SessionID()
|
||||
)
|
||||
|
||||
// Load session
|
||||
sessObj, ok := w.sessions.Load(sid)
|
||||
if !ok {
|
||||
// Create new session
|
||||
sess, err = NewSession(sid, w.workingDir, w.fileBuffer)
|
||||
if err != nil {
|
||||
return fmt.Errorf("can't create session: %d, err: %s", sid, err)
|
||||
}
|
||||
|
||||
// Check opened sessions limit and close extra session if you need to
|
||||
if extraSessID := w.meta.GetExtra(); extraSessID != 0 {
|
||||
if err := w.Close(extraSessID); err != nil {
|
||||
log.Printf("can't close session: %s", err)
|
||||
}
|
||||
}
|
||||
|
||||
// Add created session
|
||||
w.sessions.Store(sid, sess)
|
||||
w.meta.Add(sid)
|
||||
} else {
|
||||
sess = sessObj.(*Session)
|
||||
}
|
||||
|
||||
// Write data to session
|
||||
return sess.Write(msg)
|
||||
}
|
||||
|
||||
func (w *SessionWriter) sync(sid uint64) error {
|
||||
sessObj, ok := w.sessions.Load(sid)
|
||||
if !ok {
|
||||
return fmt.Errorf("session: %d not found", sid)
|
||||
}
|
||||
sess := sessObj.(*Session)
|
||||
return sess.Sync()
|
||||
}
|
||||
|
||||
func (w *SessionWriter) Close(sid uint64) error {
|
||||
sessObj, ok := w.sessions.LoadAndDelete(sid)
|
||||
if !ok {
|
||||
return fmt.Errorf("session: %d not found", sid)
|
||||
}
|
||||
sess := sessObj.(*Session)
|
||||
err := sess.Close()
|
||||
w.meta.Delete(sid)
|
||||
return err
|
||||
}
|
||||
|
||||
func (w *SessionWriter) Stop() {
|
||||
w.done <- struct{}{}
|
||||
<-w.stopped
|
||||
}
|
||||
|
||||
func (w *SessionWriter) Info() string {
|
||||
return fmt.Sprintf("%d sessions", w.meta.Count())
|
||||
}
|
||||
|
||||
func (w *SessionWriter) synchronizer() {
|
||||
tick := time.Tick(w.syncTimeout)
|
||||
for {
|
||||
select {
|
||||
case <-tick:
|
||||
w.sessions.Range(func(sid, lockObj any) bool {
|
||||
if err := w.sync(sid.(uint64)); err != nil {
|
||||
log.Printf("can't sync file descriptor: %s", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
case <-w.done:
|
||||
w.sessions.Range(func(sid, lockObj any) bool {
|
||||
if err := w.Close(sid.(uint64)); err != nil {
|
||||
log.Printf("can't close file descriptor: %s", err)
|
||||
}
|
||||
return true
|
||||
})
|
||||
w.stopped <- struct{}{}
|
||||
return
|
||||
}
|
||||
}
|
||||
}
|
||||
17
backend/internal/storage/encryptor.go
Normal file
17
backend/internal/storage/encryptor.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"errors"
|
||||
)
|
||||
|
||||
func GenerateEncryptionKey() []byte {
|
||||
return nil
|
||||
}
|
||||
|
||||
func EncryptData(data, fullKey []byte) ([]byte, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
||||
func DecryptData(data, fullKey []byte) ([]byte, error) {
|
||||
return nil, errors.New("not supported")
|
||||
}
|
||||
|
|
@ -8,6 +8,7 @@ import (
|
|||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
|
|
@ -16,13 +17,16 @@ import (
|
|||
)
|
||||
|
||||
type Storage struct {
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionSize syncfloat64.Histogram
|
||||
readingTime syncfloat64.Histogram
|
||||
archivingTime syncfloat64.Histogram
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
|
||||
totalSessions syncfloat64.Counter
|
||||
sessionDOMSize syncfloat64.Histogram
|
||||
sessionDevtoolsSize syncfloat64.Histogram
|
||||
readingDOMTime syncfloat64.Histogram
|
||||
readingTime syncfloat64.Histogram
|
||||
archivingTime syncfloat64.Histogram
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Storage, error) {
|
||||
|
|
@ -37,10 +41,14 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
|
|||
if err != nil {
|
||||
log.Printf("can't create sessions_total metric: %s", err)
|
||||
}
|
||||
sessionSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
sessionDOMSize, err := metrics.RegisterHistogram("sessions_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create session_size metric: %s", err)
|
||||
}
|
||||
sessionDevtoolsSize, err := metrics.RegisterHistogram("sessions_dt_size")
|
||||
if err != nil {
|
||||
log.Printf("can't create sessions_dt_size metric: %s", err)
|
||||
}
|
||||
readingTime, err := metrics.RegisterHistogram("reading_duration")
|
||||
if err != nil {
|
||||
log.Printf("can't create reading_duration metric: %s", err)
|
||||
|
|
@ -50,71 +58,177 @@ func New(cfg *config.Config, s3 *storage.S3, metrics *monitoring.Metrics) (*Stor
|
|||
log.Printf("can't create archiving_duration metric: %s", err)
|
||||
}
|
||||
return &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionSize: sessionSize,
|
||||
readingTime: readingTime,
|
||||
archivingTime: archivingTime,
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
totalSessions: totalSessions,
|
||||
sessionDOMSize: sessionDOMSize,
|
||||
sessionDevtoolsSize: sessionDevtoolsSize,
|
||||
readingTime: readingTime,
|
||||
archivingTime: archivingTime,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) UploadKey(key string, retryCount int) error {
|
||||
func (s *Storage) UploadSessionFiles(msg *messages.SessionEnd) error {
|
||||
if err := s.uploadKey(msg.SessionID(), "/dom.mob", true, 5, msg.EncryptionKey); err != nil {
|
||||
return err
|
||||
}
|
||||
if err := s.uploadKey(msg.SessionID(), "/devtools.mob", false, 4, msg.EncryptionKey); err != nil {
|
||||
log.Printf("can't find devtools for session: %d, err: %s", msg.SessionID(), err)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
// TODO: make a bit cleaner.
|
||||
// TODO: Of course, I'll do!
|
||||
func (s *Storage) uploadKey(sessID uint64, suffix string, shouldSplit bool, retryCount int, encryptionKey string) error {
|
||||
if retryCount <= 0 {
|
||||
return nil
|
||||
}
|
||||
|
||||
start := time.Now()
|
||||
file, err := os.Open(s.cfg.FSDir + "/" + key)
|
||||
fileName := strconv.FormatUint(sessID, 10)
|
||||
mobFileName := fileName
|
||||
if suffix == "/devtools.mob" {
|
||||
mobFileName += "devtools"
|
||||
}
|
||||
filePath := s.cfg.FSDir + "/" + mobFileName
|
||||
|
||||
// Check file size before download into memory
|
||||
info, err := os.Stat(filePath)
|
||||
if err == nil {
|
||||
if info.Size() > s.cfg.MaxFileSize {
|
||||
log.Printf("big file, size: %d, session: %d", info.Size(), sessID)
|
||||
return nil
|
||||
}
|
||||
}
|
||||
file, err := os.Open(filePath)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
return fmt.Errorf("File open error: %v; sessID: %s, part: %d, sessStart: %s\n",
|
||||
err, key, sessID%16,
|
||||
err, fileName, sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.UploadKey(key, retryCount-1)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
s.readingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
startReader := bytes.NewBuffer(s.startBytes[:nRead])
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), key, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
if err := s.s3.Upload(s.gzipFile(file), key+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
// Save metrics
|
||||
var fileSize float64 = 0
|
||||
var fileSize int64 = 0
|
||||
fileInfo, err := file.Stat()
|
||||
if err != nil {
|
||||
log.Printf("can't get file info: %s", err)
|
||||
} else {
|
||||
fileSize = float64(fileInfo.Size())
|
||||
fileSize = fileInfo.Size()
|
||||
}
|
||||
|
||||
var encryptedData []byte
|
||||
fileName += suffix
|
||||
if shouldSplit {
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.uploadKey(sessID, suffix, shouldSplit, retryCount-1, encryptionKey)
|
||||
})
|
||||
return nil
|
||||
}
|
||||
s.readingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
|
||||
start = time.Now()
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(s.startBytes[:nRead], []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
} else {
|
||||
encryptedData = s.startBytes[:nRead]
|
||||
}
|
||||
// Compress and save to s3
|
||||
startReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), fileName+"s", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
// TODO: fix possible error (if we read less then FileSplitSize)
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
restPartSize := fileSize - int64(nRead)
|
||||
fileData := make([]byte, restPartSize)
|
||||
nRead, err = file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != restPartSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
// Compress and save to s3
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), fileName+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
} else {
|
||||
start = time.Now()
|
||||
fileData := make([]byte, fileSize)
|
||||
nRead, err := file.Read(fileData)
|
||||
if err != nil {
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
fileName,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
return nil
|
||||
}
|
||||
if int64(nRead) != fileSize {
|
||||
log.Printf("can't read the rest part of file")
|
||||
}
|
||||
|
||||
// Encrypt session file if we have encryption key
|
||||
if encryptionKey != "" {
|
||||
encryptedData, err = EncryptData(fileData, []byte(encryptionKey))
|
||||
if err != nil {
|
||||
log.Printf("can't encrypt data: %s", err)
|
||||
encryptedData = fileData
|
||||
}
|
||||
} else {
|
||||
encryptedData = fileData
|
||||
}
|
||||
endReader := bytes.NewBuffer(encryptedData)
|
||||
if err := s.s3.Upload(s.gzipFile(endReader), fileName, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
s.archivingTime.Record(context.Background(), float64(time.Now().Sub(start).Milliseconds()))
|
||||
}
|
||||
|
||||
// Save metrics
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
if shouldSplit {
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
s.sessionDOMSize.Record(ctx, float64(fileSize))
|
||||
} else {
|
||||
s.sessionDevtoolsSize.Record(ctx, float64(fileSize))
|
||||
}
|
||||
ctx, _ := context.WithTimeout(context.Background(), time.Millisecond*200)
|
||||
|
||||
s.sessionSize.Record(ctx, fileSize)
|
||||
s.totalSessions.Add(ctx, 1)
|
||||
return nil
|
||||
}
|
||||
|
|
|
|||
73
backend/pkg/db/cache/cache.go
vendored
Normal file
73
backend/pkg/db/cache/cache.go
vendored
Normal file
|
|
@ -0,0 +1,73 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/db/types"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type SessionMeta struct {
|
||||
*types.Session
|
||||
lastUse time.Time
|
||||
}
|
||||
|
||||
type ProjectMeta struct {
|
||||
*types.Project
|
||||
expirationTime time.Time
|
||||
}
|
||||
|
||||
type Cache interface {
|
||||
SetSession(sess *types.Session)
|
||||
HasSession(sessID uint64) bool
|
||||
GetSession(sessID uint64) (*types.Session, error)
|
||||
GetProject(projectID uint32) (*types.Project, error)
|
||||
GetProjectByKey(projectKey string) (*types.Project, error)
|
||||
}
|
||||
|
||||
type cacheImpl struct {
|
||||
conn *postgres.Conn
|
||||
mutex sync.RWMutex
|
||||
sessions map[uint64]*SessionMeta
|
||||
projects map[uint32]*ProjectMeta
|
||||
projectsByKeys sync.Map
|
||||
projectExpirationTimeout time.Duration
|
||||
}
|
||||
|
||||
func NewCache(conn *postgres.Conn, projectExpirationTimeoutMs int64) Cache {
|
||||
newCache := &cacheImpl{
|
||||
conn: conn,
|
||||
sessions: make(map[uint64]*SessionMeta),
|
||||
projects: make(map[uint32]*ProjectMeta),
|
||||
projectExpirationTimeout: time.Duration(1000 * projectExpirationTimeoutMs),
|
||||
}
|
||||
go newCache.cleaner()
|
||||
return newCache
|
||||
}
|
||||
|
||||
func (c *cacheImpl) cleaner() {
|
||||
cleanTick := time.Tick(time.Minute * 5)
|
||||
for {
|
||||
select {
|
||||
case <-cleanTick:
|
||||
c.clearCache()
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (c *cacheImpl) clearCache() {
|
||||
c.mutex.Lock()
|
||||
defer c.mutex.Unlock()
|
||||
|
||||
now := time.Now()
|
||||
cacheSize := len(c.sessions)
|
||||
deleted := 0
|
||||
for id, sess := range c.sessions {
|
||||
if now.Sub(sess.lastUse).Minutes() > 3 {
|
||||
deleted++
|
||||
delete(c.sessions, id)
|
||||
}
|
||||
}
|
||||
log.Printf("cache cleaner: deleted %d/%d sessions", deleted, cacheSize)
|
||||
}
|
||||
12
backend/pkg/db/cache/messages-common.go
vendored
12
backend/pkg/db/cache/messages-common.go
vendored
|
|
@ -4,23 +4,25 @@ import (
|
|||
"log"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"time"
|
||||
// . "openreplay/backend/pkg/db/types"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertSessionEnd(sessionID uint64, timestamp uint64) (uint64, error) {
|
||||
return c.Conn.InsertSessionEnd(sessionID, timestamp)
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertSessionEncryptionKey(sessionID uint64, key []byte) error {
|
||||
return c.Conn.InsertSessionEncryptionKey(sessionID, key)
|
||||
}
|
||||
|
||||
func (c *PGCache) HandleSessionEnd(sessionID uint64) error {
|
||||
if err := c.Conn.HandleSessionEnd(sessionID); err != nil {
|
||||
log.Printf("can't handle session end: %s", err)
|
||||
}
|
||||
c.DeleteSession(sessionID)
|
||||
return nil
|
||||
}
|
||||
|
||||
func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -28,11 +30,11 @@ func (c *PGCache) InsertIssueEvent(sessionID uint64, crash *IssueEvent) error {
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertMetadata(sessionID uint64, metadata *Metadata) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
project, err := c.GetProject(session.ProjectID)
|
||||
project, err := c.Cache.GetProject(session.ProjectID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
22
backend/pkg/db/cache/messages-ios.go
vendored
22
backend/pkg/db/cache/messages-ios.go
vendored
|
|
@ -1,16 +1,16 @@
|
|||
package cache
|
||||
|
||||
import (
|
||||
"errors"
|
||||
"fmt"
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) error {
|
||||
if c.sessions[sessionID] != nil {
|
||||
return errors.New("This session already in cache!")
|
||||
if c.Cache.HasSession(sessionID) {
|
||||
return fmt.Errorf("session %d already in cache", sessionID)
|
||||
}
|
||||
c.sessions[sessionID] = &Session{
|
||||
newSess := &Session{
|
||||
SessionID: sessionID,
|
||||
Platform: "ios",
|
||||
Timestamp: s.Timestamp,
|
||||
|
|
@ -24,8 +24,10 @@ func (c *PGCache) InsertIOSSessionStart(sessionID uint64, s *IOSSessionStart) er
|
|||
UserCountry: s.UserCountry,
|
||||
UserDeviceType: s.UserDeviceType,
|
||||
}
|
||||
if err := c.Conn.InsertSessionStart(sessionID, c.sessions[sessionID]); err != nil {
|
||||
c.sessions[sessionID] = nil
|
||||
c.Cache.SetSession(newSess)
|
||||
if err := c.Conn.InsertSessionStart(sessionID, newSess); err != nil {
|
||||
// don't know why?
|
||||
c.Cache.SetSession(nil)
|
||||
return err
|
||||
}
|
||||
return nil
|
||||
|
|
@ -40,7 +42,7 @@ func (c *PGCache) InsertIOSScreenEnter(sessionID uint64, screenEnter *IOSScreenE
|
|||
if err := c.Conn.InsertIOSScreenEnter(sessionID, screenEnter); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -52,7 +54,7 @@ func (c *PGCache) InsertIOSClickEvent(sessionID uint64, clickEvent *IOSClickEven
|
|||
if err := c.Conn.InsertIOSClickEvent(sessionID, clickEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -64,7 +66,7 @@ func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEven
|
|||
if err := c.Conn.InsertIOSInputEvent(sessionID, inputEvent); err != nil {
|
||||
return err
|
||||
}
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
@ -73,7 +75,7 @@ func (c *PGCache) InsertIOSInputEvent(sessionID uint64, inputEvent *IOSInputEven
|
|||
}
|
||||
|
||||
func (c *PGCache) InsertIOSCrash(sessionID uint64, crash *IOSCrash) error {
|
||||
session, err := c.GetSession(sessionID)
|
||||
session, err := c.Cache.GetSession(sessionID)
|
||||
if err != nil {
|
||||
return err
|
||||
}
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue