Merge remote-tracking branch 'origin/dev' into api-v1.6.1
# Conflicts: # api/Dockerfile # api/development.md # backend/Dockerfile.bundle # backend/build.sh # backend/development.md # backend/internal/assets/jsexception.go # backend/internal/handlers/ios/performanceAggregator.go # backend/pkg/intervals/intervals.go # backend/pkg/log/queue.go # backend/pkg/messages/filters.go # backend/pkg/messages/legacy-message-transform.go # backend/pkg/messages/messages.go # backend/pkg/messages/read-message.go # backend/services/db/heuristics/anr.go # backend/services/db/heuristics/clickrage.go # backend/services/db/heuristics/heuristics.go # backend/services/db/heuristics/readyMessageStore.go # backend/services/db/heuristics/session.go # backend/services/db/stats.go # backend/services/ender/builder/builderMap.go # backend/services/ender/builder/clikRageDetector.go # backend/services/ender/builder/cpuIssueFinder.go # backend/services/ender/builder/deadClickDetector.go # backend/services/ender/builder/domDropDetector.go # backend/services/ender/builder/inputEventBuilder.go # backend/services/ender/builder/memoryIssueFinder.go # backend/services/ender/builder/pageEventBuilder.go # backend/services/ender/builder/performanceTrackAggrBuilder.go # backend/services/http/assets.go # backend/services/http/handlers-depricated.go # backend/services/http/ios-device.go # backend/services/integrations/clientManager/manager.go # backend/services/storage/gzip.go # backend/services/storage/main.go # ee/api/clean.sh # scripts/helmcharts/local_deploy.sh # scripts/helmcharts/vars.yaml
This commit is contained in:
commit
a6c75d3cdd
1385 changed files with 28101 additions and 64780 deletions
58
.github/workflows/api-ee.yaml
vendored
58
.github/workflows/api-ee.yaml
vendored
|
|
@ -1,5 +1,6 @@
|
|||
# This action will push the chalice changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -31,27 +32,64 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pusing api image
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ee-${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh ee
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helm/
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app chalice
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ee-${{ github.sha }}
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
ENVIRONMENT: staging
|
||||
|
||||
# - name: Debug Job
|
||||
|
|
@ -59,6 +97,6 @@ jobs:
|
|||
# uses: mxschmitt/action-tmate@v3
|
||||
# env:
|
||||
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
# IMAGE_TAG: ee-${{ github.sha }}
|
||||
# IMAGE_TAG: ${{ github.sha }}-ee
|
||||
# ENVIRONMENT: staging
|
||||
#
|
||||
|
|
|
|||
49
.github/workflows/api.yaml
vendored
49
.github/workflows/api.yaml
vendored
|
|
@ -3,6 +3,7 @@ on:
|
|||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
- api-v1.5.5
|
||||
paths:
|
||||
- api/**
|
||||
|
|
@ -32,6 +33,12 @@ jobs:
|
|||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
|
||||
- name: Building and Pusing api image
|
||||
id: build-image
|
||||
env:
|
||||
|
|
@ -41,15 +48,43 @@ jobs:
|
|||
run: |
|
||||
cd api
|
||||
PUSH_IMAGE=1 bash build.sh
|
||||
- name: Creating old image input
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploy to kubernetes
|
||||
run: |
|
||||
cd scripts/helm/
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
bash kube-install.sh --app chalice
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
|
||||
# Update changed image tag
|
||||
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
|
|
|
|||
1
.github/workflows/utilities.yaml
vendored
1
.github/workflows/utilities.yaml
vendored
|
|
@ -1,5 +1,6 @@
|
|||
# This action will push the utilities changes to aws
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
|
|||
80
.github/workflows/workers-ee.yaml
vendored
80
.github/workflows/workers-ee.yaml
vendored
|
|
@ -1,6 +1,7 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -33,11 +34,16 @@ jobs:
|
|||
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Build, tag, and Deploy to k8s
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
|
||||
IMAGE_TAG: ee-${{ github.sha }}
|
||||
IMAGE_TAG: ${{ github.sha }}-ee
|
||||
ENVIRONMENT: staging
|
||||
run: |
|
||||
#
|
||||
|
|
@ -47,35 +53,79 @@ jobs:
|
|||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
git diff --name-only HEAD HEAD~1 | grep backend/services | cut -d '/' -f3 | uniq > backend/images_to_build.txt
|
||||
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
|
||||
set -x
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
|
||||
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | uniq > /tmp/images_to_build.txt
|
||||
|
||||
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
for image in $(cat images_to_build.txt);
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh ee $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Creating old image input
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
# We've to strip off the -ee, as helm will append it.
|
||||
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
# We're not passing -ee flag, because helm will add that.
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
cd ../scripts/helm/
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#jwt_secret_key.*#jwt_secret_key: \"${{ secrets.EE_JWT_SECRET }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
for image in $(cat ../../backend/images_to_build.txt);
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
|
||||
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
# Deploy command
|
||||
bash openreplay-cli --install $image
|
||||
sed -i "/${image}/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
done
|
||||
|
||||
cat /tmp/image_override.yaml
|
||||
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
71
.github/workflows/workers.yaml
vendored
71
.github/workflows/workers.yaml
vendored
|
|
@ -1,6 +1,7 @@
|
|||
# Ref: https://docs.github.com/en/actions/reference/workflow-syntax-for-github-actions
|
||||
|
||||
on:
|
||||
workflow_dispatch:
|
||||
push:
|
||||
branches:
|
||||
- dev
|
||||
|
|
@ -33,7 +34,12 @@ jobs:
|
|||
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
|
||||
id: setcontext
|
||||
|
||||
- name: Build, tag, and Deploy to k8s
|
||||
# Caching docker images
|
||||
- uses: satackey/action-docker-layer-caching@v0.0.11
|
||||
# Ignore the failure of a step and avoid terminating the job.
|
||||
continue-on-error: true
|
||||
|
||||
- name: Build, tag
|
||||
id: build-image
|
||||
env:
|
||||
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
|
||||
|
|
@ -47,42 +53,73 @@ jobs:
|
|||
#
|
||||
# Getting the images to build
|
||||
#
|
||||
|
||||
set -x
|
||||
{
|
||||
git diff --name-only HEAD HEAD~1 | grep backend/services | grep -vE ^ee/ | cut -d '/' -f3
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
|
||||
|
||||
git diff --name-only HEAD HEAD~1 | grep backend/pkg | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services | cut -d '/' -f3
|
||||
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
|
||||
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
|
||||
done
|
||||
} | uniq > backend/images_to_build.txt
|
||||
} | uniq > /tmp/images_to_build.txt
|
||||
|
||||
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
|
||||
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
|
||||
#
|
||||
# Pushing image to registry
|
||||
#
|
||||
cd backend
|
||||
for image in $(cat images_to_build.txt);
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
echo "Bulding $image"
|
||||
PUSH_IMAGE=1 bash -x ./build.sh skip $image
|
||||
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
|
||||
done
|
||||
|
||||
- name: Creating old image input
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Create yaml with existing image tags
|
||||
#
|
||||
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
|
||||
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
|
||||
|
||||
echo > /tmp/image_override.yaml
|
||||
|
||||
for line in `cat /tmp/image_tag.txt`;
|
||||
do
|
||||
image_array=($(echo "$line" | tr ':' '\n'))
|
||||
cat <<EOF >> /tmp/image_override.yaml
|
||||
${image_array[0]}:
|
||||
image:
|
||||
tag: ${image_array[1]}
|
||||
EOF
|
||||
done
|
||||
|
||||
- name: Deploying to kuberntes
|
||||
env:
|
||||
IMAGE_TAG: ${{ github.sha }}
|
||||
run: |
|
||||
#
|
||||
# Deploying image to environment.
|
||||
#
|
||||
cd ../scripts/helm/
|
||||
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
|
||||
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
|
||||
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
|
||||
for image in $(cat ../../backend/images_to_build.txt);
|
||||
cd scripts/helmcharts/
|
||||
|
||||
## Update secerts
|
||||
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
|
||||
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
|
||||
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
|
||||
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
|
||||
## Update images
|
||||
for image in $(cat /tmp/images_to_build.txt);
|
||||
do
|
||||
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
|
||||
# Deploy command
|
||||
bash kube-install.sh --app $image
|
||||
sed -i "/${image}/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
|
||||
done
|
||||
|
||||
# Deploy command
|
||||
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml
|
||||
|
||||
# - name: Debug Job
|
||||
# if: ${{ failure() }}
|
||||
# uses: mxschmitt/action-tmate@v3
|
||||
|
|
|
|||
|
|
@ -7,7 +7,6 @@
|
|||
### Development environment
|
||||
|
||||
```bash
|
||||
**FOSS:**
|
||||
cd openreplay/api
|
||||
# Make your own copy of .env file and edit it as you want
|
||||
cp .env.dev .env
|
||||
|
|
@ -17,18 +16,6 @@ mkdir .venv
|
|||
|
||||
# Installing dependencies (pipenv will detect the .venv folder and use it as a target)
|
||||
pipenv install -r requirements.txt [--skip-lock]
|
||||
|
||||
# Create a .venv folder to contain all you dependencies
|
||||
mkdir .venv
|
||||
|
||||
# Installing dependencies (pipenv will detect the .venv folder and use it as a target)
|
||||
pipenv install -r requirements.txt [--skip-lock]
|
||||
|
||||
# These commands must bu used everytime you make changes to FOSS.
|
||||
# To clean the unused files before getting new ones
|
||||
bash clean.sh
|
||||
# To copy commun files from FOSS
|
||||
bash prepare-dev.sh
|
||||
```
|
||||
|
||||
### Building and deploying locally
|
||||
|
|
|
|||
|
|
@ -10,13 +10,15 @@ RUN go mod download
|
|||
|
||||
|
||||
FROM prepare AS build
|
||||
COPY cmd cmd
|
||||
COPY pkg pkg
|
||||
COPY services services
|
||||
COPY internal internal
|
||||
|
||||
ARG SERVICE_NAME
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/services/$SERVICE_NAME
|
||||
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME
|
||||
|
||||
FROM alpine
|
||||
|
||||
FROM alpine AS entrypoint
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
ENV TZ=UTC \
|
||||
|
|
@ -28,7 +30,7 @@ ENV TZ=UTC \
|
|||
BEACON_SIZE_LIMIT=7000000 \
|
||||
KAFKA_USE_SSL=true \
|
||||
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
|
||||
REDIS_STREAMS_MAX_LEN=3000 \
|
||||
REDIS_STREAMS_MAX_LEN=5000 \
|
||||
TOPIC_RAW_WEB=raw \
|
||||
TOPIC_RAW_IOS=raw-ios \
|
||||
TOPIC_CACHE=cache \
|
||||
|
|
@ -39,12 +41,14 @@ ENV TZ=UTC \
|
|||
GROUP_DB=db \
|
||||
GROUP_ENDER=ender \
|
||||
GROUP_CACHE=cache \
|
||||
GROUP_HEURISTICS=heuristics \
|
||||
AWS_REGION_WEB=eu-central-1 \
|
||||
AWS_REGION_IOS=eu-west-1 \
|
||||
AWS_REGION_ASSETS=eu-central-1 \
|
||||
CACHE_ASSETS=true \
|
||||
ASSETS_SIZE_LIMIT=6291456 \
|
||||
FS_CLEAN_HRS=72 \
|
||||
FILE_SPLIT_SIZE=300000 \
|
||||
LOG_QUEUE_STATS_INTERVAL_SEC=60
|
||||
|
||||
|
||||
|
|
|
|||
|
|
@ -10,13 +10,13 @@ RUN go mod download
|
|||
|
||||
|
||||
FROM prepare AS build
|
||||
COPY cmd cmd
|
||||
COPY pkg pkg
|
||||
COPY services services
|
||||
COPY internal internal
|
||||
|
||||
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/services/$name; done
|
||||
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/cmd/$name; done
|
||||
|
||||
|
||||
FROM alpine
|
||||
FROM alpine AS entrypoint
|
||||
#FROM pygmy/alpine-tini:latest
|
||||
RUN apk add --no-cache ca-certificates
|
||||
|
||||
|
|
@ -46,6 +46,7 @@ ENV TZ=UTC \
|
|||
CACHE_ASSETS=true \
|
||||
ASSETS_SIZE_LIMIT=6291456 \
|
||||
FS_CLEAN_HRS=12 \
|
||||
FILE_SPLIT_SIZE=300000 \
|
||||
LOG_QUEUE_STATS_INTERVAL_SEC=60
|
||||
|
||||
RUN mkdir $FS_DIR
|
||||
|
|
|
|||
24
backend/build.sh
Normal file → Executable file
24
backend/build.sh
Normal file → Executable file
|
|
@ -18,6 +18,16 @@ check_prereq() {
|
|||
return
|
||||
}
|
||||
|
||||
function build_service() {
|
||||
image="$1"
|
||||
echo "BUILDING $image"
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
function build_api(){
|
||||
# Copy enterprise code
|
||||
[[ $1 == "ee" ]] && {
|
||||
|
|
@ -25,20 +35,12 @@ function build_api(){
|
|||
ee="true"
|
||||
}
|
||||
[[ $2 != "" ]] && {
|
||||
image="$2"
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
}
|
||||
echo "build completed for http"
|
||||
build_service $2
|
||||
return
|
||||
}
|
||||
for image in $(ls services);
|
||||
for image in $(ls cmd);
|
||||
do
|
||||
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
|
||||
[[ $PUSH_IMAGE -eq 1 ]] && {
|
||||
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
|
||||
}
|
||||
build_service $image
|
||||
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
|
||||
done
|
||||
echo "backend build completed"
|
||||
|
|
|
|||
|
|
@ -2,35 +2,34 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/internal/assets"
|
||||
"openreplay/backend/internal/assets/cacher"
|
||||
config "openreplay/backend/internal/config/assets"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/services/assets/cacher"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
GROUP_CACHE := env.String("GROUP_CACHE")
|
||||
TOPIC_CACHE := env.String("TOPIC_CACHE")
|
||||
cfg := config.New()
|
||||
|
||||
cacher := cacher.NewCacher(
|
||||
env.String("AWS_REGION"),
|
||||
env.String("S3_BUCKET_ASSETS"),
|
||||
env.String("ASSETS_ORIGIN"),
|
||||
env.Int("ASSETS_SIZE_LIMIT"),
|
||||
cfg.AWSRegion,
|
||||
cfg.S3BucketAssets,
|
||||
cfg.AssetsOrigin,
|
||||
cfg.AssetsSizeLimit,
|
||||
)
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
GROUP_CACHE,
|
||||
[]string{TOPIC_CACHE},
|
||||
cfg.GroupCache,
|
||||
[]string{cfg.TopicCache},
|
||||
func(sessionID uint64, message messages.Message, e *types.Meta) {
|
||||
switch msg := message.(type) {
|
||||
case *messages.AssetCache:
|
||||
|
|
@ -39,7 +38,7 @@ func main() {
|
|||
if msg.Source != "js_exception" {
|
||||
return
|
||||
}
|
||||
sourceList, err := extractJSExceptionSources(&msg.Payload)
|
||||
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
|
||||
if err != nil {
|
||||
log.Printf("Error on source extraction: %v", err)
|
||||
return
|
||||
|
|
@ -52,12 +51,12 @@ func main() {
|
|||
true,
|
||||
)
|
||||
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
log.Printf("Cacher service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
log.Printf("Cacher service started\n")
|
||||
tick := time.Tick(20 * time.Minute)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
136
backend/cmd/db/main.go
Normal file
136
backend/cmd/db/main.go
Normal file
|
|
@ -0,0 +1,136 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/builder"
|
||||
"openreplay/backend/internal/config/db"
|
||||
"openreplay/backend/internal/datasaver"
|
||||
"openreplay/backend/internal/handlers"
|
||||
"openreplay/backend/internal/handlers/custom"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := db.New()
|
||||
|
||||
// Init database
|
||||
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres), cfg.ProjectExpirationTimeoutMs)
|
||||
defer pg.Close()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
return []handlers.MessageProcessor{
|
||||
&custom.EventMapper{},
|
||||
custom.NewInputEventBuilder(),
|
||||
custom.NewPageEventBuilder(),
|
||||
}
|
||||
}
|
||||
|
||||
// Create handler's aggregator
|
||||
builderMap := builder.NewBuilderMap(handlersFabric)
|
||||
|
||||
// Init modules
|
||||
saver := datasaver.New(pg)
|
||||
saver.InitStats()
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
// Handler logic
|
||||
handler := func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
|
||||
// Check if session in db and get session info for the following stats insertion (actually for CH case only)
|
||||
session, err := pg.GetSession(sessionID)
|
||||
if session == nil {
|
||||
if err != nil {
|
||||
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Just save session data into db without additional checks
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
// Save statistics to db
|
||||
err = saver.InsertStats(session, msg)
|
||||
if err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
|
||||
}
|
||||
|
||||
// Handle heuristics and save to temporary queue in memory
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
|
||||
// Process saved heuristics messages as usual messages above in the code
|
||||
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
|
||||
// TODO: DRY code (carefully with the return statement logic)
|
||||
if err := saver.InsertMessage(sessionID, msg); err != nil {
|
||||
if !postgres.IsPkeyViolation(err) {
|
||||
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
if err := saver.InsertStats(session, msg); err != nil {
|
||||
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
|
||||
}
|
||||
})
|
||||
}
|
||||
|
||||
// Init consumer
|
||||
consumer := queue.NewMessageConsumer(
|
||||
cfg.GroupDB,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
cfg.TopicTrigger, // to receive SessionEnd events
|
||||
},
|
||||
handler,
|
||||
false,
|
||||
)
|
||||
|
||||
log.Printf("Db service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
commitTick := time.Tick(cfg.CommitBatchTimeout)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-commitTick:
|
||||
pg.CommitBatches()
|
||||
if err := saver.CommitStats(); err != nil {
|
||||
log.Printf("Error on stats commit: %v", err)
|
||||
}
|
||||
// TODO?: separate stats & regular messages
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("Error on consumer commit: %v", err)
|
||||
}
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err) // TODO: is always fatal?
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
86
backend/cmd/ender/main.go
Normal file
86
backend/cmd/ender/main.go
Normal file
|
|
@ -0,0 +1,86 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/config/ender"
|
||||
"openreplay/backend/internal/sessionender"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
)
|
||||
|
||||
func main() {
|
||||
metrics := monitoring.New("ender")
|
||||
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
// Load service configuration
|
||||
cfg := ender.New()
|
||||
|
||||
// Init all modules
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT)
|
||||
if err != nil {
|
||||
log.Printf("can't init ender service: %s", err)
|
||||
return
|
||||
}
|
||||
producer := queue.NewProducer()
|
||||
consumer := queue.NewMessageConsumer(
|
||||
cfg.GroupEnder,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
sessions.UpdateSession(sessionID, messages.GetTimestamp(msg))
|
||||
},
|
||||
false,
|
||||
)
|
||||
|
||||
log.Printf("Ender service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
producer.Close(cfg.ProducerTimeout)
|
||||
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
|
||||
log.Printf("can't commit messages with offset: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
// Find ended sessions and send notification to other services
|
||||
sessions.HandleEndedSessions(func(sessionID uint64, timestamp int64) bool {
|
||||
msg := &messages.SessionEnd{Timestamp: uint64(timestamp)}
|
||||
if err := producer.Produce(cfg.TopicTrigger, sessionID, messages.Encode(msg)); err != nil {
|
||||
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
|
||||
return false
|
||||
}
|
||||
return true
|
||||
})
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
|
||||
log.Printf("can't commit messages with offset: %s", err)
|
||||
}
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
95
backend/cmd/heuristics/main.go
Normal file
95
backend/cmd/heuristics/main.go
Normal file
|
|
@ -0,0 +1,95 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/builder"
|
||||
"openreplay/backend/internal/config/heuristics"
|
||||
"openreplay/backend/internal/handlers"
|
||||
"openreplay/backend/internal/handlers/custom"
|
||||
"openreplay/backend/internal/handlers/ios"
|
||||
"openreplay/backend/internal/handlers/web"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
logger "openreplay/backend/pkg/log"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
"time"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
// Load service configuration
|
||||
cfg := heuristics.New()
|
||||
|
||||
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
|
||||
handlersFabric := func() []handlers.MessageProcessor {
|
||||
return []handlers.MessageProcessor{
|
||||
// web handlers
|
||||
&web.ClickRageDetector{},
|
||||
&web.CpuIssueDetector{},
|
||||
&web.DeadClickDetector{},
|
||||
&web.MemoryIssueDetector{},
|
||||
&web.NetworkIssueDetector{},
|
||||
&web.PerformanceAggregator{},
|
||||
// iOS handlers
|
||||
&ios.AppNotResponding{},
|
||||
&ios.ClickRageDetector{},
|
||||
&ios.PerformanceAggregator{},
|
||||
// Other handlers (you can add your custom handlers here)
|
||||
&custom.CustomHandler{},
|
||||
}
|
||||
}
|
||||
|
||||
// Create handler's aggregator
|
||||
builderMap := builder.NewBuilderMap(handlersFabric)
|
||||
|
||||
// Init logger
|
||||
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
|
||||
|
||||
// Init producer and consumer for data bus
|
||||
producer := queue.NewProducer()
|
||||
consumer := queue.NewMessageConsumer(
|
||||
cfg.GroupHeuristics,
|
||||
[]string{
|
||||
cfg.TopicRawWeb,
|
||||
cfg.TopicRawIOS,
|
||||
cfg.TopicTrigger, // to receive SessionEnd events
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
statsLogger.Collect(sessionID, meta)
|
||||
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
|
||||
},
|
||||
false,
|
||||
)
|
||||
|
||||
log.Printf("Heuristics service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
producer.Close(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
|
||||
producer.Produce(cfg.TopicTrigger, sessionID, messages.Encode(readyMsg))
|
||||
})
|
||||
producer.Flush(cfg.ProducerTimeout)
|
||||
consumer.Commit()
|
||||
default:
|
||||
if err := consumer.ConsumeNext(); err != nil {
|
||||
log.Fatalf("Error on consuming: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
63
backend/cmd/http/main.go
Normal file
63
backend/cmd/http/main.go
Normal file
|
|
@ -0,0 +1,63 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/internal/config"
|
||||
"openreplay/backend/internal/router"
|
||||
"openreplay/backend/internal/server"
|
||||
"openreplay/backend/internal/services"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/pprof"
|
||||
"openreplay/backend/pkg/queue"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
pprof.StartProfilingServer()
|
||||
|
||||
// Load configuration
|
||||
cfg := config.New()
|
||||
|
||||
// Connect to queue
|
||||
producer := queue.NewProducer()
|
||||
defer producer.Close(15000)
|
||||
|
||||
// Connect to database
|
||||
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres), 1000*60*20)
|
||||
defer dbConn.Close()
|
||||
|
||||
// Build all services
|
||||
services := services.New(cfg, producer, dbConn)
|
||||
|
||||
// Init server's routes
|
||||
router, err := router.NewRouter(cfg, services)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating engine: %s", err)
|
||||
}
|
||||
|
||||
// Init server
|
||||
server, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
|
||||
if err != nil {
|
||||
log.Fatalf("failed while creating server: %s", err)
|
||||
}
|
||||
|
||||
// Run server
|
||||
go func() {
|
||||
if err := server.Start(); err != nil {
|
||||
log.Fatalf("Server error: %v\n", err)
|
||||
}
|
||||
}()
|
||||
log.Printf("Server successfully started on port %v\n", cfg.HTTPPort)
|
||||
|
||||
// Wait stop signal to shut down server gracefully
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
<-sigchan
|
||||
log.Printf("Shutting down the server\n")
|
||||
server.Stop()
|
||||
}
|
||||
|
|
@ -2,6 +2,8 @@ package main
|
|||
|
||||
import (
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/integrations"
|
||||
"openreplay/backend/internal/integrations/clientManager"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
|
|
@ -9,23 +11,21 @@ import (
|
|||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/pkg/intervals"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/token"
|
||||
"openreplay/backend/services/integrations/clientManager"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
TOPIC_RAW_WEB := env.String("TOPIC_RAW_WEB")
|
||||
POSTGRES_STRING := env.String("POSTGRES_STRING")
|
||||
|
||||
pg := postgres.NewConn(POSTGRES_STRING)
|
||||
cfg := config.New()
|
||||
|
||||
pg := postgres.NewConn(cfg.PostgresURI)
|
||||
defer pg.Close()
|
||||
|
||||
tokenizer := token.NewTokenizer(env.String("TOKEN_SECRET"))
|
||||
tokenizer := token.NewTokenizer(cfg.TokenSecret)
|
||||
|
||||
manager := clientManager.NewManager()
|
||||
|
||||
|
|
@ -45,7 +45,7 @@ func main() {
|
|||
producer := queue.NewProducer()
|
||||
defer producer.Close(15000)
|
||||
|
||||
listener, err := postgres.NewIntegrationsListener(POSTGRES_STRING)
|
||||
listener, err := postgres.NewIntegrationsListener(cfg.PostgresURI)
|
||||
if err != nil {
|
||||
log.Printf("Postgres listener error: %v\n", err)
|
||||
log.Fatalf("Postgres listener error")
|
||||
|
|
@ -81,7 +81,7 @@ func main() {
|
|||
sessionID = sessData.ID
|
||||
}
|
||||
// TODO: send to ready-events topic. Otherwise it have to go through the events worker.
|
||||
producer.Produce(TOPIC_RAW_WEB, sessionID, messages.Encode(event.RawErrorEvent))
|
||||
producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(event.RawErrorEvent))
|
||||
case err := <-manager.Errors:
|
||||
log.Printf("Integration error: %v\n", err)
|
||||
case i := <-manager.RequestDataUpdates:
|
||||
|
|
@ -3,48 +3,64 @@ package main
|
|||
import (
|
||||
"encoding/binary"
|
||||
"log"
|
||||
"openreplay/backend/internal/storage"
|
||||
"time"
|
||||
|
||||
"os"
|
||||
"os/signal"
|
||||
"syscall"
|
||||
|
||||
"openreplay/backend/pkg/env"
|
||||
"openreplay/backend/internal/assetscache"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/internal/oswriter"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
FS_DIR := env.String("FS_DIR")
|
||||
if _, err := os.Stat(FS_DIR); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", FS_DIR, err)
|
||||
cfg := sink.New()
|
||||
|
||||
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
|
||||
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
|
||||
}
|
||||
|
||||
writer := NewWriter(env.Uint16("FS_ULIMIT"), FS_DIR)
|
||||
writer := oswriter.NewWriter(cfg.FsUlimit, cfg.FsDir)
|
||||
|
||||
count := 0
|
||||
producer := queue.NewProducer()
|
||||
defer producer.Close(cfg.ProducerCloseTimeout)
|
||||
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
|
||||
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
|
||||
|
||||
counter := storage.NewLogCounter()
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
env.String("GROUP_SINK"),
|
||||
cfg.GroupSink,
|
||||
[]string{
|
||||
env.String("TOPIC_RAW_WEB"),
|
||||
env.String("TOPIC_RAW_IOS"),
|
||||
cfg.TopicRawIOS,
|
||||
cfg.TopicRawWeb,
|
||||
},
|
||||
func(sessionID uint64, message Message, _ *types.Meta) {
|
||||
//typeID, err := GetMessageTypeID(value)
|
||||
// if err != nil {
|
||||
// log.Printf("Message type decoding error: %v", err)
|
||||
// return
|
||||
// }
|
||||
typeID := message.Meta().TypeID
|
||||
// Process assets
|
||||
message = assetMessageHandler.ParseAssets(sessionID, message)
|
||||
|
||||
// Filter message
|
||||
typeID := message.TypeID()
|
||||
if !IsReplayerType(typeID) {
|
||||
return
|
||||
}
|
||||
|
||||
count++
|
||||
// If message timestamp is empty, use at least ts of session start
|
||||
ts := message.Meta().Timestamp
|
||||
if ts == 0 {
|
||||
log.Printf("zero ts; sessID: %d, msg: %+v", sessionID, message)
|
||||
} else {
|
||||
// Log ts of last processed message
|
||||
counter.Update(sessionID, time.UnixMilli(ts))
|
||||
}
|
||||
|
||||
value := message.Encode()
|
||||
var data []byte
|
||||
|
|
@ -61,29 +77,29 @@ func main() {
|
|||
},
|
||||
false,
|
||||
)
|
||||
log.Printf("Sink service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
tick := time.Tick(30 * time.Second)
|
||||
|
||||
log.Printf("Sink service started\n")
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Commit()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-tick:
|
||||
if err := writer.SyncAll(); err != nil {
|
||||
log.Fatalf("Sync error: %v\n", err)
|
||||
}
|
||||
|
||||
log.Printf("%v messages during 30 sec", count)
|
||||
count = 0
|
||||
|
||||
consumer.Commit()
|
||||
counter.Print()
|
||||
if err := consumer.Commit(); err != nil {
|
||||
log.Printf("can't commit messages: %s", err)
|
||||
}
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
70
backend/cmd/storage/main.go
Normal file
70
backend/cmd/storage/main.go
Normal file
|
|
@ -0,0 +1,70 @@
|
|||
package main
|
||||
|
||||
import (
|
||||
"log"
|
||||
"os"
|
||||
"os/signal"
|
||||
"strconv"
|
||||
"syscall"
|
||||
"time"
|
||||
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/internal/storage"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
s3storage "openreplay/backend/pkg/storage"
|
||||
)
|
||||
|
||||
func main() {
|
||||
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
|
||||
|
||||
cfg := config.New()
|
||||
|
||||
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
|
||||
srv, err := storage.New(cfg, s3)
|
||||
if err != nil {
|
||||
log.Printf("can't init storage service: %s", err)
|
||||
return
|
||||
}
|
||||
|
||||
counter := storage.NewLogCounter()
|
||||
|
||||
consumer := queue.NewMessageConsumer(
|
||||
cfg.GroupStorage,
|
||||
[]string{
|
||||
cfg.TopicTrigger,
|
||||
},
|
||||
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
|
||||
switch msg.(type) {
|
||||
case *messages.SessionEnd:
|
||||
srv.UploadKey(strconv.FormatUint(sessionID, 10), 5)
|
||||
// Log timestamp of last processed session
|
||||
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
|
||||
}
|
||||
},
|
||||
true,
|
||||
)
|
||||
|
||||
log.Printf("Storage service started\n")
|
||||
|
||||
sigchan := make(chan os.Signal, 1)
|
||||
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
|
||||
|
||||
counterTick := time.Tick(time.Second * 30)
|
||||
for {
|
||||
select {
|
||||
case sig := <-sigchan:
|
||||
log.Printf("Caught signal %v: terminating\n", sig)
|
||||
consumer.Close()
|
||||
os.Exit(0)
|
||||
case <-counterTick:
|
||||
go counter.Print()
|
||||
default:
|
||||
err := consumer.ConsumeNext()
|
||||
if err != nil {
|
||||
log.Fatalf("Error on consumption: %v", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -2,13 +2,6 @@
|
|||
|
||||
- [Vagrant](../scripts/vagrant/README.md)
|
||||
|
||||
### Development environment
|
||||
|
||||
```bash
|
||||
docker build -f Dockerfile.bundle .
|
||||
|
||||
```
|
||||
|
||||
### Building and deploying locally
|
||||
|
||||
```bash
|
||||
|
|
|
|||
|
|
@ -4,7 +4,6 @@ go 1.18
|
|||
|
||||
require (
|
||||
cloud.google.com/go/logging v1.4.2
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3
|
||||
github.com/aws/aws-sdk-go v1.35.23
|
||||
github.com/btcsuite/btcutil v1.0.2
|
||||
github.com/elastic/go-elasticsearch/v7 v7.13.1
|
||||
|
|
@ -19,18 +18,21 @@ require (
|
|||
github.com/pkg/errors v0.9.1
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
|
||||
go.opentelemetry.io/otel/metric v0.30.0
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5
|
||||
google.golang.org/api v0.50.0
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0
|
||||
)
|
||||
|
||||
require (
|
||||
cloud.google.com/go v0.84.0 // indirect
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
|
||||
github.com/beorn7/perks v1.0.1 // indirect
|
||||
github.com/cespare/xxhash/v2 v2.1.2 // indirect
|
||||
github.com/go-logr/logr v1.2.3 // indirect
|
||||
github.com/go-logr/stdr v1.2.2 // indirect
|
||||
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
|
||||
github.com/golang/protobuf v1.5.2 // indirect
|
||||
github.com/google/go-cmp v0.5.6 // indirect
|
||||
github.com/google/go-cmp v0.5.7 // indirect
|
||||
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
|
||||
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
|
||||
github.com/jackc/pgio v1.0.0 // indirect
|
||||
|
|
@ -42,13 +44,22 @@ require (
|
|||
github.com/jmespath/go-jmespath v0.4.0 // indirect
|
||||
github.com/jstemmer/go-junit-report v0.9.1 // indirect
|
||||
github.com/klauspost/compress v1.11.9 // indirect
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
|
||||
github.com/prometheus/client_golang v1.12.1 // indirect
|
||||
github.com/prometheus/client_model v0.2.0 // indirect
|
||||
github.com/prometheus/common v0.32.1 // indirect
|
||||
github.com/prometheus/procfs v0.7.3 // indirect
|
||||
go.opencensus.io v0.23.0 // indirect
|
||||
go.opentelemetry.io/otel v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 // indirect
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0 // indirect
|
||||
go.opentelemetry.io/otel/trace v1.7.0 // indirect
|
||||
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
|
||||
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
|
||||
golang.org/x/mod v0.4.2 // indirect
|
||||
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
|
||||
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 // indirect
|
||||
golang.org/x/text v0.3.6 // indirect
|
||||
golang.org/x/tools v0.1.4 // indirect
|
||||
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
|
||||
|
|
@ -56,5 +67,5 @@ require (
|
|||
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
|
||||
google.golang.org/grpc v1.38.0 // indirect
|
||||
google.golang.org/protobuf v1.26.0 // indirect
|
||||
gopkg.in/yaml.v2 v2.2.8 // indirect
|
||||
gopkg.in/yaml.v2 v2.4.0 // indirect
|
||||
)
|
||||
|
|
|
|||
128
backend/go.sum
128
backend/go.sum
|
|
@ -44,13 +44,20 @@ cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9
|
|||
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
|
||||
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
|
||||
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
|
||||
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
|
||||
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
|
||||
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
|
||||
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
|
||||
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
|
||||
github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts=
|
||||
github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
|
||||
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
|
||||
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
|
||||
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
|
||||
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
|
||||
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
|
||||
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
|
||||
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
|
||||
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
|
||||
github.com/btcsuite/btclog v0.0.0-20170628155309-84c8d2346e9f/go.mod h1:TdznJufoqS23FtqVCzL0ZqgP5MqXbb4fg/WgDys70nA=
|
||||
github.com/btcsuite/btcutil v0.0.0-20190425235716-9e5f4b9a998d/go.mod h1:+5NJ2+qvTyV9exUAL/rxXi3DcLg2Ts+ymUAY5y4NvMg=
|
||||
|
|
@ -62,19 +69,18 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
|
|||
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
|
||||
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
|
||||
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
|
||||
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
|
||||
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
|
||||
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
|
||||
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
|
||||
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
|
||||
github.com/client9/misspell v0.3.4/go.mod h1:qj6jICC3Q7zFZvVWo7KLAzC3yx5G7kyvSDkc90ppPyw=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 h1:F1EaeKL/ta07PY/k9Os/UFtwERei2/XzGemhpGnBKNg=
|
||||
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h4xcZ5GoxqC5SDxFQ8gwyZPKQoEzownBlhI80=
|
||||
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
|
||||
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
|
||||
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
|
||||
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM=
|
||||
github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
|
||||
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
|
||||
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
|
||||
|
|
@ -95,12 +101,23 @@ github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMo
|
|||
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
|
||||
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
|
||||
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
|
||||
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
|
||||
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
|
||||
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
|
||||
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
|
||||
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
|
||||
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
|
||||
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
|
||||
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
|
||||
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
|
||||
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
|
||||
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
|
||||
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
|
||||
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
|
||||
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
|
||||
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
|
||||
|
|
@ -146,8 +163,10 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
|
|||
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
|
||||
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
|
||||
github.com/google/go-cmp v0.5.7 h1:81/ik6ipDQS2aGcBfIN5dHDB36BwrStyeAQquSYCV4o=
|
||||
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
|
||||
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
|
||||
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
|
||||
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
|
||||
|
|
@ -226,11 +245,17 @@ github.com/jmespath/go-jmespath v0.4.0 h1:BEgLn5cpjn8UN1mAw4NjwDrS35OdebyEtFe+9Y
|
|||
github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHWvzYPziyZiYoo=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
|
||||
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
|
||||
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
|
||||
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
|
||||
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
|
||||
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
|
||||
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
|
||||
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
|
||||
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
|
||||
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
|
||||
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
|
||||
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
|
||||
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
|
||||
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
|
||||
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
|
||||
github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA=
|
||||
|
|
@ -239,6 +264,8 @@ github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE
|
|||
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
|
||||
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
|
||||
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
|
||||
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
|
||||
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
|
||||
|
|
@ -255,7 +282,15 @@ github.com/mattn/go-isatty v0.0.5/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
|
|||
github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
|
||||
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
|
||||
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
|
||||
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
|
||||
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
|
||||
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
|
||||
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
|
||||
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
|
||||
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
|
||||
|
|
@ -263,14 +298,34 @@ github.com/onsi/gomega v1.4.3 h1:RE1xgDvH7imwFD45h+u2SgIfERHlS2yNG4DObb5BSKU=
|
|||
github.com/onsi/gomega v1.4.3/go.mod h1:ex+gbHU/CVuBBDIJjb2X0qEXbFg53c61hWP/1CpauHY=
|
||||
github.com/oschwald/maxminddb-golang v1.7.0 h1:JmU4Q1WBv5Q+2KZy5xJI+98aUwTIrPPxZUkd5Cwr8Zc=
|
||||
github.com/oschwald/maxminddb-golang v1.7.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
|
||||
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
|
||||
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
|
||||
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
|
||||
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
|
||||
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
|
||||
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
|
||||
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
|
||||
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
|
||||
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
|
||||
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
|
||||
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
|
||||
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
|
||||
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
|
||||
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
|
||||
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
|
||||
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
|
||||
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
|
||||
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
|
||||
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
|
||||
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
|
||||
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
|
||||
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
|
||||
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
|
||||
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
|
||||
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
|
||||
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
|
||||
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
|
||||
|
|
@ -278,8 +333,10 @@ github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThC
|
|||
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
|
||||
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
|
||||
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
|
||||
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
|
||||
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
|
||||
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
|
||||
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
|
||||
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
|
||||
|
|
@ -287,8 +344,9 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
|
|||
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
|
||||
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
|
||||
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
|
||||
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
|
||||
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
|
||||
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
|
||||
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
|
||||
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I=
|
||||
|
|
@ -307,12 +365,25 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
|
|||
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
|
||||
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
|
||||
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
|
||||
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
|
||||
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
|
||||
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
|
||||
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
|
||||
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
|
||||
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
|
||||
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
|
||||
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
|
||||
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
|
||||
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
|
||||
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
|
||||
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
|
||||
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
|
||||
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
|
||||
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
|
||||
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
|
||||
|
|
@ -363,6 +434,7 @@ golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
|
|||
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
|
||||
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
|
|
@ -370,6 +442,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
|
|||
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
|
||||
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
|
||||
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
|
||||
|
|
@ -396,8 +469,9 @@ golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v
|
|||
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
|
||||
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
|
||||
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=
|
||||
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5 h1:wjuX4b5yYQnEQHzd+CBcrcC6OVR2J1CN6mUy0oSxIPo=
|
||||
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
|
||||
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
|
||||
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
|
||||
|
|
@ -428,6 +502,7 @@ golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJ
|
|||
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
|
||||
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -445,6 +520,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -457,6 +533,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
|
|
@ -464,18 +542,22 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
|
|||
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
|
||||
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
|
||||
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9 h1:XfKQ4OlFl8okEOr5UvAqFRVj8pY/4yfcXrddB8qAbU0=
|
||||
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
|
||||
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
|
||||
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
|
||||
|
|
@ -666,11 +748,11 @@ google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlba
|
|||
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
|
||||
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
|
||||
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
|
||||
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
|
||||
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
|
||||
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ=
|
||||
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
|
||||
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
|
||||
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
|
||||
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
|
||||
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
|
||||
|
|
@ -679,8 +761,12 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
|
|||
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
|
||||
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
|
||||
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
|
||||
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
|
||||
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
|
||||
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
|
||||
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
|
||||
|
|
|
|||
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package assets
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
|
@ -9,7 +9,7 @@ type frame struct {
|
|||
FileName string `json:"fileName"`
|
||||
}
|
||||
|
||||
func extractJSExceptionSources(payload *string) ([]string, error) {
|
||||
func ExtractJSExceptionSources(payload *string) ([]string, error) {
|
||||
var frameList []frame
|
||||
err := json.Unmarshal([]byte(*payload), &frameList)
|
||||
if err != nil {
|
||||
88
backend/internal/assetscache/assets.go
Normal file
88
backend/internal/assetscache/assets.go
Normal file
|
|
@ -0,0 +1,88 @@
|
|||
package assetscache
|
||||
|
||||
import (
|
||||
"log"
|
||||
"openreplay/backend/internal/config/sink"
|
||||
"openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/url/assets"
|
||||
)
|
||||
|
||||
type AssetsCache struct {
|
||||
cfg *sink.Config
|
||||
rewriter *assets.Rewriter
|
||||
producer types.Producer
|
||||
}
|
||||
|
||||
func New(cfg *sink.Config, rewriter *assets.Rewriter, producer types.Producer) *AssetsCache {
|
||||
return &AssetsCache{
|
||||
cfg: cfg,
|
||||
rewriter: rewriter,
|
||||
producer: producer,
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) ParseAssets(sessID uint64, msg messages.Message) messages.Message {
|
||||
switch m := msg.(type) {
|
||||
case *messages.SetNodeAttributeURLBased:
|
||||
if m.Name == "src" || m.Name == "href" {
|
||||
return &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleURL(sessID, m.BaseURL, m.Value),
|
||||
}
|
||||
} else if m.Name == "style" {
|
||||
return &messages.SetNodeAttribute{
|
||||
ID: m.ID,
|
||||
Name: m.Name,
|
||||
Value: e.handleCSS(sessID, m.BaseURL, m.Value),
|
||||
}
|
||||
}
|
||||
case *messages.SetCSSDataURLBased:
|
||||
return &messages.SetCSSData{
|
||||
ID: m.ID,
|
||||
Data: e.handleCSS(sessID, m.BaseURL, m.Data),
|
||||
}
|
||||
case *messages.CSSInsertRuleURLBased:
|
||||
return &messages.CSSInsertRule{
|
||||
ID: m.ID,
|
||||
Index: m.Index,
|
||||
Rule: e.handleCSS(sessID, m.BaseURL, m.Rule),
|
||||
}
|
||||
}
|
||||
return msg
|
||||
}
|
||||
|
||||
func (e *AssetsCache) sendAssetForCache(sessionID uint64, baseURL string, relativeURL string) {
|
||||
if fullURL, cacheable := assets.GetFullCachableURL(baseURL, relativeURL); cacheable {
|
||||
if err := e.producer.Produce(
|
||||
e.cfg.TopicCache,
|
||||
sessionID,
|
||||
messages.Encode(&messages.AssetCache{URL: fullURL}),
|
||||
); err != nil {
|
||||
log.Printf("can't send asset to cache topic, sessID: %d, err: %s", sessionID, err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) sendAssetsForCacheFromCSS(sessionID uint64, baseURL string, css string) {
|
||||
for _, u := range assets.ExtractURLsFromCSS(css) { // TODO: in one shot with rewriting
|
||||
e.sendAssetForCache(sessionID, baseURL, u)
|
||||
}
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleURL(sessionID uint64, baseURL string, url string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetForCache(sessionID, baseURL, url)
|
||||
return e.rewriter.RewriteURL(sessionID, baseURL, url)
|
||||
}
|
||||
return assets.ResolveURL(baseURL, url)
|
||||
}
|
||||
|
||||
func (e *AssetsCache) handleCSS(sessionID uint64, baseURL string, css string) string {
|
||||
if e.cfg.CacheAssets {
|
||||
e.sendAssetsForCacheFromCSS(sessionID, baseURL, css)
|
||||
return e.rewriter.RewriteCSS(sessionID, baseURL, css)
|
||||
}
|
||||
return assets.ResolveCSS(baseURL, css)
|
||||
}
|
||||
68
backend/internal/builder/builder.go
Normal file
68
backend/internal/builder/builder.go
Normal file
|
|
@ -0,0 +1,68 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"log"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type builder struct {
|
||||
sessionID uint64
|
||||
readyMsgs []Message
|
||||
timestamp uint64
|
||||
lastMessageID uint64
|
||||
lastSystemTime time.Time
|
||||
processors []handlers.MessageProcessor
|
||||
ended bool
|
||||
}
|
||||
|
||||
func NewBuilder(sessionID uint64, handlers ...handlers.MessageProcessor) *builder {
|
||||
return &builder{
|
||||
sessionID: sessionID,
|
||||
processors: handlers,
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) iterateReadyMessages(iter func(msg Message)) {
|
||||
for _, readyMsg := range b.readyMsgs {
|
||||
iter(readyMsg)
|
||||
}
|
||||
b.readyMsgs = nil
|
||||
}
|
||||
|
||||
func (b *builder) checkSessionEnd(message Message) {
|
||||
if _, isEnd := message.(*IOSSessionEnd); isEnd {
|
||||
b.ended = true
|
||||
}
|
||||
if _, isEnd := message.(*SessionEnd); isEnd {
|
||||
b.ended = true
|
||||
}
|
||||
}
|
||||
|
||||
func (b *builder) handleMessage(message Message, messageID uint64) {
|
||||
if messageID < b.lastMessageID {
|
||||
// May happen in case of duplicated messages in kafka (if `idempotence: false`)
|
||||
log.Printf("skip message with wrong msgID, sessID: %d, msgID: %d, lastID: %d", b.sessionID, messageID, b.lastMessageID)
|
||||
return
|
||||
}
|
||||
timestamp := GetTimestamp(message)
|
||||
if timestamp == 0 {
|
||||
log.Printf("skip message with empty timestamp, sessID: %d, msgID: %d, msgType: %d", b.sessionID, messageID, message.TypeID())
|
||||
return
|
||||
}
|
||||
if timestamp < b.timestamp {
|
||||
log.Printf("skip message with wrong timestamp, sessID: %d, msgID: %d, type: %d", b.sessionID, messageID, message.TypeID())
|
||||
return
|
||||
}
|
||||
|
||||
b.timestamp = timestamp
|
||||
b.lastSystemTime = time.Now()
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Handle(message, messageID, b.timestamp); rm != nil {
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
b.checkSessionEnd(message)
|
||||
}
|
||||
74
backend/internal/builder/builderMap.go
Normal file
74
backend/internal/builder/builderMap.go
Normal file
|
|
@ -0,0 +1,74 @@
|
|||
package builder
|
||||
|
||||
import (
|
||||
"time"
|
||||
|
||||
"openreplay/backend/internal/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const FORCE_DELETE_TIMEOUT = 4 * time.Hour
|
||||
|
||||
type builderMap struct {
|
||||
handlersFabric func() []handlers.MessageProcessor
|
||||
sessions map[uint64]*builder
|
||||
}
|
||||
|
||||
func NewBuilderMap(handlersFabric func() []handlers.MessageProcessor) *builderMap {
|
||||
return &builderMap{
|
||||
handlersFabric: handlersFabric,
|
||||
sessions: make(map[uint64]*builder),
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) GetBuilder(sessionID uint64) *builder {
|
||||
b := m.sessions[sessionID]
|
||||
if b == nil {
|
||||
b = NewBuilder(sessionID, m.handlersFabric()...) // Should create new instances
|
||||
m.sessions[sessionID] = b
|
||||
}
|
||||
return b
|
||||
}
|
||||
|
||||
func (m *builderMap) HandleMessage(sessionID uint64, msg Message, messageID uint64) {
|
||||
b := m.GetBuilder(sessionID)
|
||||
b.handleMessage(msg, messageID)
|
||||
}
|
||||
|
||||
func (m *builderMap) iterateSessionReadyMessages(sessionID uint64, b *builder, iter func(msg Message)) {
|
||||
if b.ended || b.lastSystemTime.Add(FORCE_DELETE_TIMEOUT).Before(time.Now()) {
|
||||
for _, p := range b.processors {
|
||||
if rm := p.Build(); rm != nil {
|
||||
b.readyMsgs = append(b.readyMsgs, rm)
|
||||
}
|
||||
}
|
||||
}
|
||||
b.iterateReadyMessages(iter)
|
||||
if b.ended {
|
||||
delete(m.sessions, sessionID)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) IterateReadyMessages(iter func(sessionID uint64, msg Message)) {
|
||||
for sessionID, session := range m.sessions {
|
||||
m.iterateSessionReadyMessages(
|
||||
sessionID,
|
||||
session,
|
||||
func(msg Message) {
|
||||
iter(sessionID, msg)
|
||||
},
|
||||
)
|
||||
}
|
||||
}
|
||||
|
||||
func (m *builderMap) IterateSessionReadyMessages(sessionID uint64, iter func(msg Message)) {
|
||||
session, ok := m.sessions[sessionID]
|
||||
if !ok {
|
||||
return
|
||||
}
|
||||
m.iterateSessionReadyMessages(
|
||||
sessionID,
|
||||
session,
|
||||
iter,
|
||||
)
|
||||
}
|
||||
23
backend/internal/config/assets/config.go
Normal file
23
backend/internal/config/assets/config.go
Normal file
|
|
@ -0,0 +1,23 @@
|
|||
package assets
|
||||
|
||||
import "openreplay/backend/pkg/env"
|
||||
|
||||
type Config struct {
|
||||
GroupCache string
|
||||
TopicCache string
|
||||
AWSRegion string
|
||||
S3BucketAssets string
|
||||
AssetsOrigin string
|
||||
AssetsSizeLimit int
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
GroupCache: env.String("GROUP_CACHE"),
|
||||
TopicCache: env.String("TOPIC_CACHE"),
|
||||
AWSRegion: env.String("AWS_REGION"),
|
||||
S3BucketAssets: env.String("S3_BUCKET_ASSETS"),
|
||||
AssetsOrigin: env.String("ASSETS_ORIGIN"),
|
||||
AssetsSizeLimit: env.Int("ASSETS_SIZE_LIMIT"),
|
||||
}
|
||||
}
|
||||
44
backend/internal/config/config.go
Normal file
44
backend/internal/config/config.go
Normal file
|
|
@ -0,0 +1,44 @@
|
|||
package config
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
HTTPHost string
|
||||
HTTPPort string
|
||||
HTTPTimeout time.Duration
|
||||
TopicRawWeb string
|
||||
TopicRawIOS string
|
||||
BeaconSizeLimit int64
|
||||
JsonSizeLimit int64
|
||||
FileSizeLimit int64
|
||||
AWSRegion string
|
||||
S3BucketIOSImages string
|
||||
Postgres string
|
||||
TokenSecret string
|
||||
UAParserFile string
|
||||
MaxMinDBFile string
|
||||
WorkerID uint16
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
HTTPHost: "", // empty by default
|
||||
HTTPPort: env.String("HTTP_PORT"),
|
||||
HTTPTimeout: time.Second * 60,
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
|
||||
BeaconSizeLimit: int64(env.Uint64("BEACON_SIZE_LIMIT")),
|
||||
JsonSizeLimit: 1e3, // 1Kb
|
||||
FileSizeLimit: 1e7, // 10Mb
|
||||
AWSRegion: env.String("AWS_REGION"),
|
||||
S3BucketIOSImages: env.String("S3_BUCKET_IOS_IMAGES"),
|
||||
Postgres: env.String("POSTGRES_STRING"),
|
||||
TokenSecret: env.String("TOKEN_SECRET"),
|
||||
UAParserFile: env.String("UAPARSER_FILE"),
|
||||
MaxMinDBFile: env.String("MAXMINDDB_FILE"),
|
||||
WorkerID: env.WorkerID(),
|
||||
}
|
||||
}
|
||||
30
backend/internal/config/db/config.go
Normal file
30
backend/internal/config/db/config.go
Normal file
|
|
@ -0,0 +1,30 @@
|
|||
package db
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
Postgres string
|
||||
ProjectExpirationTimeoutMs int64
|
||||
LoggerTimeout int
|
||||
GroupDB string
|
||||
TopicRawWeb string
|
||||
TopicRawIOS string
|
||||
TopicTrigger string
|
||||
CommitBatchTimeout time.Duration
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
Postgres: env.String("POSTGRES_STRING"),
|
||||
ProjectExpirationTimeoutMs: 1000 * 60 * 20,
|
||||
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
|
||||
GroupDB: env.String("GROUP_DB"),
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
|
||||
TopicTrigger: env.String("TOPIC_TRIGGER"),
|
||||
CommitBatchTimeout: 15 * time.Second,
|
||||
}
|
||||
}
|
||||
25
backend/internal/config/ender/config.go
Normal file
25
backend/internal/config/ender/config.go
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package ender
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
GroupEnder string
|
||||
TopicTrigger string
|
||||
LoggerTimeout int
|
||||
TopicRawWeb string
|
||||
TopicRawIOS string
|
||||
ProducerTimeout int
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
GroupEnder: env.String("GROUP_ENDER"),
|
||||
TopicTrigger: env.String("TOPIC_TRIGGER"),
|
||||
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
|
||||
ProducerTimeout: 2000,
|
||||
}
|
||||
}
|
||||
25
backend/internal/config/heuristics/config.go
Normal file
25
backend/internal/config/heuristics/config.go
Normal file
|
|
@ -0,0 +1,25 @@
|
|||
package heuristics
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
GroupHeuristics string
|
||||
TopicTrigger string
|
||||
LoggerTimeout int
|
||||
TopicRawWeb string
|
||||
TopicRawIOS string
|
||||
ProducerTimeout int
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
GroupHeuristics: env.String("GROUP_HEURISTICS"),
|
||||
TopicTrigger: env.String("TOPIC_TRIGGER"),
|
||||
LoggerTimeout: env.Int("LOG_QUEUE_STATS_INTERVAL_SEC"),
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
|
||||
ProducerTimeout: 2000,
|
||||
}
|
||||
}
|
||||
17
backend/internal/config/integrations/config.go
Normal file
17
backend/internal/config/integrations/config.go
Normal file
|
|
@ -0,0 +1,17 @@
|
|||
package integrations
|
||||
|
||||
import "openreplay/backend/pkg/env"
|
||||
|
||||
type Config struct {
|
||||
TopicRawWeb string
|
||||
PostgresURI string
|
||||
TokenSecret string
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
PostgresURI: env.String("POSTGRES_STRING"),
|
||||
TokenSecret: env.String("TOKEN_SECRET"),
|
||||
}
|
||||
}
|
||||
31
backend/internal/config/sink/config.go
Normal file
31
backend/internal/config/sink/config.go
Normal file
|
|
@ -0,0 +1,31 @@
|
|||
package sink
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
FsDir string
|
||||
FsUlimit uint16
|
||||
GroupSink string
|
||||
TopicRawWeb string
|
||||
TopicRawIOS string
|
||||
TopicCache string
|
||||
CacheAssets bool
|
||||
AssetsOrigin string
|
||||
ProducerCloseTimeout int
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
FsDir: env.String("FS_DIR"),
|
||||
FsUlimit: env.Uint16("FS_ULIMIT"),
|
||||
GroupSink: env.String("GROUP_SINK"),
|
||||
TopicRawWeb: env.String("TOPIC_RAW_WEB"),
|
||||
TopicRawIOS: env.String("TOPIC_RAW_IOS"),
|
||||
TopicCache: env.String("TOPIC_CACHE"),
|
||||
CacheAssets: env.Bool("CACHE_ASSETS"),
|
||||
AssetsOrigin: env.String("ASSETS_ORIGIN"),
|
||||
ProducerCloseTimeout: 15000,
|
||||
}
|
||||
}
|
||||
32
backend/internal/config/storage/config.go
Normal file
32
backend/internal/config/storage/config.go
Normal file
|
|
@ -0,0 +1,32 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"openreplay/backend/pkg/env"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Config struct {
|
||||
S3Region string
|
||||
S3Bucket string
|
||||
FSDir string
|
||||
FSCleanHRS int
|
||||
FileSplitSize int
|
||||
RetryTimeout time.Duration
|
||||
GroupStorage string
|
||||
TopicTrigger string
|
||||
DeleteTimeout time.Duration
|
||||
}
|
||||
|
||||
func New() *Config {
|
||||
return &Config{
|
||||
S3Region: env.String("AWS_REGION_WEB"),
|
||||
S3Bucket: env.String("S3_BUCKET_WEB"),
|
||||
FSDir: env.String("FS_DIR"),
|
||||
FSCleanHRS: env.Int("FS_CLEAN_HRS"),
|
||||
FileSplitSize: env.Int("FILE_SPLIT_SIZE"),
|
||||
RetryTimeout: 2 * time.Minute,
|
||||
GroupStorage: env.String("GROUP_STORAGE"),
|
||||
TopicTrigger: env.String("TOPIC_TRIGGER"),
|
||||
DeleteTimeout: 48 * time.Hour,
|
||||
}
|
||||
}
|
||||
76
backend/internal/datasaver/messages.go
Normal file
76
backend/internal/datasaver/messages.go
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
// Common
|
||||
case *Metadata:
|
||||
return mi.pg.InsertMetadata(sessionID, m)
|
||||
case *IssueEvent:
|
||||
return mi.pg.InsertIssueEvent(sessionID, m)
|
||||
//TODO: message adapter (transformer) (at the level of pkg/message) for types: *IOSMetadata, *IOSIssueEvent and others
|
||||
|
||||
// Web
|
||||
case *SessionStart:
|
||||
return mi.pg.InsertWebSessionStart(sessionID, m)
|
||||
case *SessionEnd:
|
||||
return mi.pg.InsertWebSessionEnd(sessionID, m)
|
||||
case *UserID:
|
||||
return mi.pg.InsertWebUserID(sessionID, m)
|
||||
case *UserAnonymousID:
|
||||
return mi.pg.InsertWebUserAnonymousID(sessionID, m)
|
||||
case *CustomEvent:
|
||||
return mi.pg.InsertWebCustomEvent(sessionID, m)
|
||||
case *ClickEvent:
|
||||
return mi.pg.InsertWebClickEvent(sessionID, m)
|
||||
case *InputEvent:
|
||||
return mi.pg.InsertWebInputEvent(sessionID, m)
|
||||
|
||||
// Unique Web messages
|
||||
case *PageEvent:
|
||||
return mi.pg.InsertWebPageEvent(sessionID, m)
|
||||
case *ErrorEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, m)
|
||||
case *FetchEvent:
|
||||
return mi.pg.InsertWebFetchEvent(sessionID, m)
|
||||
case *GraphQLEvent:
|
||||
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
|
||||
|
||||
// IOS
|
||||
case *IOSSessionStart:
|
||||
return mi.pg.InsertIOSSessionStart(sessionID, m)
|
||||
case *IOSSessionEnd:
|
||||
return mi.pg.InsertIOSSessionEnd(sessionID, m)
|
||||
case *IOSUserID:
|
||||
return mi.pg.InsertIOSUserID(sessionID, m)
|
||||
case *IOSUserAnonymousID:
|
||||
return mi.pg.InsertIOSUserAnonymousID(sessionID, m)
|
||||
case *IOSCustomEvent:
|
||||
return mi.pg.InsertIOSCustomEvent(sessionID, m)
|
||||
case *IOSClickEvent:
|
||||
return mi.pg.InsertIOSClickEvent(sessionID, m)
|
||||
case *IOSInputEvent:
|
||||
return mi.pg.InsertIOSInputEvent(sessionID, m)
|
||||
// Unique IOS messages
|
||||
case *IOSNetworkCall:
|
||||
return mi.pg.InsertIOSNetworkCall(sessionID, m)
|
||||
case *IOSScreenEnter:
|
||||
return mi.pg.InsertIOSScreenEnter(sessionID, m)
|
||||
case *IOSCrash:
|
||||
return mi.pg.InsertIOSCrash(sessionID, m)
|
||||
|
||||
case *RawErrorEvent:
|
||||
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{
|
||||
MessageID: m.Meta().Index, // TODO: is it possible to catch panic here???
|
||||
Timestamp: m.Timestamp,
|
||||
Source: m.Source,
|
||||
Name: m.Name,
|
||||
Message: m.Message,
|
||||
Payload: m.Payload,
|
||||
})
|
||||
}
|
||||
return nil // "Not implemented"
|
||||
}
|
||||
11
backend/internal/datasaver/saver.go
Normal file
11
backend/internal/datasaver/saver.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package datasaver
|
||||
|
||||
import "openreplay/backend/pkg/db/cache"
|
||||
|
||||
type Saver struct {
|
||||
pg *cache.PGCache
|
||||
}
|
||||
|
||||
func New(pg *cache.PGCache) *Saver {
|
||||
return &Saver{pg: pg}
|
||||
}
|
||||
27
backend/internal/datasaver/stats.go
Normal file
27
backend/internal/datasaver/stats.go
Normal file
|
|
@ -0,0 +1,27 @@
|
|||
package datasaver
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/db/types"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func (si *Saver) InitStats() {
|
||||
// noop
|
||||
}
|
||||
|
||||
func (si *Saver) InsertStats(session *Session, msg Message) error {
|
||||
switch m := msg.(type) {
|
||||
// Web
|
||||
case *PerformanceTrackAggr:
|
||||
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
|
||||
case *ResourceEvent:
|
||||
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
|
||||
case *LongTask:
|
||||
return si.pg.InsertWebStatsLongtask(session.SessionID, m)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (si *Saver) CommitStats() error {
|
||||
return nil
|
||||
}
|
||||
16
backend/internal/handlers/custom/customHandler.go
Normal file
16
backend/internal/handlers/custom/customHandler.go
Normal file
|
|
@ -0,0 +1,16 @@
|
|||
package custom
|
||||
|
||||
import . "openreplay/backend/pkg/messages"
|
||||
|
||||
type CustomHandler struct {
|
||||
lastTimestamp uint64
|
||||
}
|
||||
|
||||
func (h *CustomHandler) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
h.lastTimestamp = timestamp
|
||||
return nil
|
||||
}
|
||||
|
||||
func (h *CustomHandler) Build() Message {
|
||||
return nil
|
||||
}
|
||||
124
backend/internal/handlers/custom/eventMapper.go
Normal file
124
backend/internal/handlers/custom/eventMapper.go
Normal file
|
|
@ -0,0 +1,124 @@
|
|||
package custom
|
||||
|
||||
import (
|
||||
"net/url"
|
||||
"strings"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
func getURLExtention(URL string) string {
|
||||
u, err := url.Parse(URL)
|
||||
if err != nil {
|
||||
return ""
|
||||
}
|
||||
i := strings.LastIndex(u.Path, ".")
|
||||
return u.Path[i+1:]
|
||||
}
|
||||
|
||||
func getResourceType(initiator string, URL string) string {
|
||||
switch initiator {
|
||||
case "xmlhttprequest", "fetch":
|
||||
return "fetch"
|
||||
case "img":
|
||||
return "img"
|
||||
default:
|
||||
switch getURLExtention(URL) {
|
||||
case "css":
|
||||
return "stylesheet"
|
||||
case "js":
|
||||
return "script"
|
||||
case "png", "gif", "jpg", "jpeg", "svg":
|
||||
return "img"
|
||||
case "mp4", "mkv", "ogg", "webm", "avi", "mp3":
|
||||
return "media"
|
||||
default:
|
||||
return "other"
|
||||
}
|
||||
}
|
||||
}
|
||||
|
||||
type EventMapper struct{}
|
||||
|
||||
func (b *EventMapper) Build() Message {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *EventMapper) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *MouseClick:
|
||||
if msg.Label != "" {
|
||||
return &ClickEvent{
|
||||
MessageID: messageID,
|
||||
Label: msg.Label,
|
||||
HesitationTime: msg.HesitationTime,
|
||||
Timestamp: timestamp,
|
||||
Selector: msg.Selector,
|
||||
}
|
||||
}
|
||||
case *JSException:
|
||||
return &ErrorEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Source: "js_exception",
|
||||
Name: msg.Name,
|
||||
Message: msg.Message,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *ResourceTiming:
|
||||
return &ResourceEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Duration: msg.Duration,
|
||||
TTFB: msg.TTFB,
|
||||
HeaderSize: msg.HeaderSize,
|
||||
EncodedBodySize: msg.EncodedBodySize,
|
||||
DecodedBodySize: msg.DecodedBodySize,
|
||||
URL: msg.URL,
|
||||
Type: getResourceType(msg.Initiator, msg.URL),
|
||||
Success: msg.Duration != 0,
|
||||
}
|
||||
case *RawCustomEvent:
|
||||
return &CustomEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Name: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *CustomIssue:
|
||||
return &IssueEvent{
|
||||
Type: "custom",
|
||||
Timestamp: timestamp,
|
||||
MessageID: messageID,
|
||||
ContextString: msg.Name,
|
||||
Payload: msg.Payload,
|
||||
}
|
||||
case *Fetch:
|
||||
return &FetchEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
Method: msg.Method,
|
||||
URL: msg.URL,
|
||||
Request: msg.Request,
|
||||
Response: msg.Response,
|
||||
Status: msg.Status,
|
||||
Duration: msg.Duration,
|
||||
}
|
||||
case *GraphQL:
|
||||
return &GraphQLEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
OperationKind: msg.OperationKind,
|
||||
OperationName: msg.OperationName,
|
||||
Variables: msg.Variables,
|
||||
Response: msg.Response,
|
||||
}
|
||||
case *StateAction:
|
||||
return &StateActionEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Type: msg.Type,
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
77
backend/internal/handlers/custom/inputEventBuilder.go
Normal file
77
backend/internal/handlers/custom/inputEventBuilder.go
Normal file
|
|
@ -0,0 +1,77 @@
|
|||
package custom
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const INPUT_EVENT_TIMEOUT = 1 * 60 * 1000
|
||||
|
||||
type inputLabels map[uint64]string
|
||||
|
||||
type inputEventBuilder struct {
|
||||
inputEvent *InputEvent
|
||||
inputLabels inputLabels
|
||||
inputID uint64
|
||||
}
|
||||
|
||||
func NewInputEventBuilder() *inputEventBuilder {
|
||||
ieBuilder := &inputEventBuilder{}
|
||||
ieBuilder.clearLabels()
|
||||
return ieBuilder
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) clearLabels() {
|
||||
b.inputLabels = make(inputLabels)
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
var inputEvent Message = nil
|
||||
switch msg := message.(type) {
|
||||
case *SetInputTarget:
|
||||
if b.inputID != msg.ID {
|
||||
inputEvent = b.Build()
|
||||
b.inputID = msg.ID
|
||||
}
|
||||
b.inputLabels[msg.ID] = msg.Label
|
||||
return inputEvent
|
||||
case *SetInputValue:
|
||||
if b.inputID != msg.ID {
|
||||
inputEvent = b.Build()
|
||||
b.inputID = msg.ID
|
||||
}
|
||||
if b.inputEvent == nil {
|
||||
b.inputEvent = &InputEvent{
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
Value: msg.Value,
|
||||
ValueMasked: msg.Mask > 0,
|
||||
}
|
||||
} else {
|
||||
b.inputEvent.Value = msg.Value
|
||||
b.inputEvent.ValueMasked = msg.Mask > 0
|
||||
}
|
||||
return inputEvent
|
||||
case *CreateDocument:
|
||||
inputEvent = b.Build()
|
||||
b.clearLabels()
|
||||
return inputEvent
|
||||
case *MouseClick:
|
||||
return b.Build()
|
||||
}
|
||||
|
||||
if b.inputEvent != nil && b.inputEvent.Timestamp+INPUT_EVENT_TIMEOUT < timestamp {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *inputEventBuilder) Build() Message {
|
||||
if b.inputEvent == nil {
|
||||
return nil
|
||||
}
|
||||
inputEvent := b.inputEvent
|
||||
inputEvent.Label = b.inputLabels[b.inputID] // might be empty string
|
||||
|
||||
b.inputEvent = nil
|
||||
return inputEvent
|
||||
}
|
||||
106
backend/internal/handlers/custom/pageEventBuilder.go
Normal file
106
backend/internal/handlers/custom/pageEventBuilder.go
Normal file
|
|
@ -0,0 +1,106 @@
|
|||
package custom
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const PAGE_EVENT_TIMEOUT = 1 * 60 * 1000
|
||||
|
||||
type pageEventBuilder struct {
|
||||
pageEvent *PageEvent
|
||||
firstTimingHandled bool
|
||||
}
|
||||
|
||||
func NewPageEventBuilder() *pageEventBuilder {
|
||||
ieBuilder := &pageEventBuilder{}
|
||||
return ieBuilder
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *SetPageLocation:
|
||||
if msg.NavigationStart == 0 { // routing without new page loading
|
||||
return &PageEvent{
|
||||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: false,
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
} else {
|
||||
pageEvent := b.Build()
|
||||
b.pageEvent = &PageEvent{
|
||||
URL: msg.URL,
|
||||
Referrer: msg.Referrer,
|
||||
Loaded: true,
|
||||
MessageID: messageID,
|
||||
Timestamp: timestamp,
|
||||
}
|
||||
return pageEvent
|
||||
}
|
||||
case *PageLoadTiming:
|
||||
if b.pageEvent == nil {
|
||||
break
|
||||
}
|
||||
if msg.RequestStart <= 30000 {
|
||||
b.pageEvent.RequestStart = msg.RequestStart
|
||||
}
|
||||
if msg.ResponseStart <= 30000 {
|
||||
b.pageEvent.ResponseStart = msg.ResponseStart
|
||||
}
|
||||
if msg.ResponseEnd <= 30000 {
|
||||
b.pageEvent.ResponseEnd = msg.ResponseEnd
|
||||
}
|
||||
if msg.DomContentLoadedEventStart <= 30000 {
|
||||
b.pageEvent.DomContentLoadedEventStart = msg.DomContentLoadedEventStart
|
||||
}
|
||||
if msg.DomContentLoadedEventEnd <= 30000 {
|
||||
b.pageEvent.DomContentLoadedEventEnd = msg.DomContentLoadedEventEnd
|
||||
}
|
||||
if msg.LoadEventStart <= 30000 {
|
||||
b.pageEvent.LoadEventStart = msg.LoadEventStart
|
||||
}
|
||||
if msg.LoadEventEnd <= 30000 {
|
||||
b.pageEvent.LoadEventEnd = msg.LoadEventEnd
|
||||
}
|
||||
if msg.FirstPaint <= 30000 {
|
||||
b.pageEvent.FirstPaint = msg.FirstPaint
|
||||
}
|
||||
if msg.FirstContentfulPaint <= 30000 {
|
||||
b.pageEvent.FirstContentfulPaint = msg.FirstContentfulPaint
|
||||
}
|
||||
return b.buildIfTimingsComplete()
|
||||
case *PageRenderTiming:
|
||||
if b.pageEvent == nil {
|
||||
break
|
||||
}
|
||||
b.pageEvent.SpeedIndex = msg.SpeedIndex
|
||||
b.pageEvent.VisuallyComplete = msg.VisuallyComplete
|
||||
b.pageEvent.TimeToInteractive = msg.TimeToInteractive
|
||||
return b.buildIfTimingsComplete()
|
||||
|
||||
}
|
||||
|
||||
if b.pageEvent != nil && b.pageEvent.Timestamp+PAGE_EVENT_TIMEOUT < timestamp {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) Build() Message {
|
||||
if b.pageEvent == nil {
|
||||
return nil
|
||||
}
|
||||
pageEvent := b.pageEvent
|
||||
b.pageEvent = nil
|
||||
b.firstTimingHandled = false
|
||||
return pageEvent
|
||||
}
|
||||
|
||||
func (b *pageEventBuilder) buildIfTimingsComplete() Message {
|
||||
if b.firstTimingHandled {
|
||||
return b.Build()
|
||||
}
|
||||
b.firstTimingHandled = true
|
||||
return nil
|
||||
}
|
||||
69
backend/internal/handlers/ios/appNotResponding.go
Normal file
69
backend/internal/handlers/ios/appNotResponding.go
Normal file
|
|
@ -0,0 +1,69 @@
|
|||
package ios
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: AppNotResponding
|
||||
Input events: IOSClickEvent,
|
||||
IOSInputEvent,
|
||||
IOSPerformanceEvent,
|
||||
IOSSessionEnd
|
||||
Output event: IOSIssueEvent
|
||||
*/
|
||||
|
||||
const MIN_TIME_AFTER_LAST_HEARTBEAT = 60 * 1000
|
||||
|
||||
type AppNotResponding struct {
|
||||
handlers.ReadyMessageStore
|
||||
lastLabel string
|
||||
lastHeartbeatTimestamp uint64
|
||||
lastHeartbeatIndex uint64
|
||||
lastTimestamp uint64
|
||||
}
|
||||
|
||||
func (h *AppNotResponding) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
h.lastTimestamp = timestamp
|
||||
var event Message = nil
|
||||
switch m := message.(type) {
|
||||
case *IOSClickEvent:
|
||||
event = h.build(m.Timestamp)
|
||||
h.lastLabel = m.Label
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSInputEvent:
|
||||
event = h.build(m.Timestamp)
|
||||
h.lastLabel = m.Label
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSPerformanceEvent:
|
||||
event = h.build(m.Timestamp)
|
||||
h.lastHeartbeatTimestamp = m.Timestamp
|
||||
h.lastHeartbeatIndex = m.Index
|
||||
case *IOSSessionEnd:
|
||||
event = h.build(m.Timestamp)
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (h *AppNotResponding) Build() Message {
|
||||
return h.build(h.lastTimestamp)
|
||||
}
|
||||
|
||||
func (h *AppNotResponding) build(timestamp uint64) Message {
|
||||
if h.lastHeartbeatTimestamp != 0 && h.lastHeartbeatTimestamp+MIN_TIME_AFTER_LAST_HEARTBEAT <= timestamp {
|
||||
event := &IOSIssueEvent{
|
||||
Type: "anr",
|
||||
ContextString: h.lastLabel,
|
||||
Timestamp: h.lastHeartbeatTimestamp,
|
||||
}
|
||||
event.Index = h.lastHeartbeatIndex // Associated Index/ MessageID ?
|
||||
// Reset
|
||||
h.lastHeartbeatTimestamp = 0
|
||||
h.lastHeartbeatIndex = 0
|
||||
return event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,14 +1,22 @@
|
|||
package heuristics
|
||||
package ios
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/handlers"
|
||||
"openreplay/backend/internal/handlers/web"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
const CLICK_TIME_DIFF = 200
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
/*
|
||||
Handler name: ClickRage
|
||||
Input events: IOSClickEvent,
|
||||
IOSSessionEnd
|
||||
Output event: IOSIssueEvent
|
||||
*/
|
||||
|
||||
type clickrage struct {
|
||||
readyMessageStore
|
||||
const CLICK_TIME_DIFF = 200
|
||||
|
||||
type ClickRageDetector struct {
|
||||
handlers.ReadyMessageStore
|
||||
lastTimestamp uint64
|
||||
lastLabel string
|
||||
firstInARawTimestamp uint64
|
||||
|
|
@ -16,34 +24,16 @@ type clickrage struct {
|
|||
countsInARow int
|
||||
}
|
||||
|
||||
func (h *clickrage) build() {
|
||||
if h.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
m := &IOSIssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: h.lastLabel,
|
||||
//Context: "{}",
|
||||
//Payload: fmt.SPrint
|
||||
}
|
||||
m.Timestamp = h.firstInARawTimestamp
|
||||
m.Index = h.firstInARawSeqIndex // Associated Index/ MessageID ?
|
||||
h.append(m)
|
||||
}
|
||||
h.lastTimestamp = 0
|
||||
h.lastLabel = ""
|
||||
h.firstInARawTimestamp = 0
|
||||
h.firstInARawSeqIndex = 0
|
||||
h.countsInARow = 0
|
||||
}
|
||||
|
||||
func (h *clickrage) HandleMessage(msg Message) {
|
||||
switch m := msg.(type) {
|
||||
func (h *ClickRageDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
var event Message = nil
|
||||
switch m := message.(type) {
|
||||
case *IOSClickEvent:
|
||||
if h.lastTimestamp+CLICK_TIME_DIFF < m.Timestamp && h.lastLabel == m.Label {
|
||||
h.lastTimestamp = m.Timestamp
|
||||
h.countsInARow += 1
|
||||
return
|
||||
return nil
|
||||
}
|
||||
h.build()
|
||||
event = h.Build()
|
||||
if m.Label != "" {
|
||||
h.lastTimestamp = m.Timestamp
|
||||
h.lastLabel = m.Label
|
||||
|
|
@ -52,6 +42,25 @@ func (h *clickrage) HandleMessage(msg Message) {
|
|||
h.countsInARow = 1
|
||||
}
|
||||
case *IOSSessionEnd:
|
||||
h.build()
|
||||
event = h.Build()
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (h *ClickRageDetector) Build() Message {
|
||||
if h.countsInARow >= web.MIN_CLICKS_IN_A_ROW {
|
||||
event := &IOSIssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: h.lastLabel,
|
||||
}
|
||||
event.Timestamp = h.firstInARawTimestamp
|
||||
event.Index = h.firstInARawSeqIndex // Associated Index/ MessageID ?
|
||||
return event
|
||||
}
|
||||
h.lastTimestamp = 0
|
||||
h.lastLabel = ""
|
||||
h.firstInARawTimestamp = 0
|
||||
h.firstInARawSeqIndex = 0
|
||||
h.countsInARow = 0
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,9 +1,17 @@
|
|||
package heuristics
|
||||
package ios
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/handlers"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: PerformanceAggregator
|
||||
Input events: IOSPerformanceEvent,
|
||||
IOSSessionEnd
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const AGGR_TIME = 15 * 60 * 1000
|
||||
|
||||
type valueAggregator struct {
|
||||
|
|
@ -18,45 +26,29 @@ func (va *valueAggregator) aggregate() uint64 {
|
|||
return uint64(va.sum / va.count)
|
||||
}
|
||||
|
||||
type performanceAggregator struct {
|
||||
readyMessageStore
|
||||
pa *IOSPerformanceAggregated
|
||||
fps valueAggregator
|
||||
cpu valueAggregator
|
||||
memory valueAggregator
|
||||
battery valueAggregator
|
||||
type PerformanceAggregator struct {
|
||||
handlers.ReadyMessageStore
|
||||
pa *IOSPerformanceAggregated
|
||||
fps valueAggregator
|
||||
cpu valueAggregator
|
||||
memory valueAggregator
|
||||
battery valueAggregator
|
||||
lastTimestamp uint64
|
||||
}
|
||||
|
||||
func (h *performanceAggregator) build(timestamp uint64) {
|
||||
if h.pa == nil {
|
||||
return
|
||||
}
|
||||
h.pa.TimestampEnd = timestamp
|
||||
h.pa.AvgFPS = h.fps.aggregate()
|
||||
h.pa.AvgCPU = h.cpu.aggregate()
|
||||
h.pa.AvgMemory = h.memory.aggregate()
|
||||
h.pa.AvgBattery = h.battery.aggregate()
|
||||
|
||||
h.append(h.pa)
|
||||
|
||||
h.pa = &IOSPerformanceAggregated{}
|
||||
for _, agg := range []valueAggregator{h.fps, h.cpu, h.memory, h.battery} {
|
||||
agg.sum = 0
|
||||
agg.count = 0
|
||||
}
|
||||
}
|
||||
|
||||
func (h *performanceAggregator) HandleMessage(msg Message) {
|
||||
func (h *PerformanceAggregator) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
h.lastTimestamp = timestamp
|
||||
if h.pa == nil {
|
||||
h.pa = &IOSPerformanceAggregated{} // TODO: struct type in messages
|
||||
}
|
||||
switch m := msg.(type) { // TODO: All Timestampe messages
|
||||
var event Message = nil
|
||||
switch m := message.(type) { // TODO: All Timestamp messages
|
||||
case *IOSPerformanceEvent:
|
||||
if h.pa.TimestampStart == 0 {
|
||||
h.pa.TimestampStart = m.Timestamp
|
||||
}
|
||||
if h.pa.TimestampStart+AGGR_TIME <= m.Timestamp {
|
||||
h.build(m.Timestamp)
|
||||
event = h.Build()
|
||||
}
|
||||
switch m.Name {
|
||||
case "fps":
|
||||
|
|
@ -97,6 +89,28 @@ func (h *performanceAggregator) HandleMessage(msg Message) {
|
|||
}
|
||||
}
|
||||
case *IOSSessionEnd:
|
||||
h.build(m.Timestamp)
|
||||
event = h.Build()
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (h *PerformanceAggregator) Build() Message {
|
||||
if h.pa == nil {
|
||||
return nil
|
||||
}
|
||||
|
||||
h.pa.TimestampEnd = h.lastTimestamp
|
||||
h.pa.AvgFPS = h.fps.aggregate()
|
||||
h.pa.AvgCPU = h.cpu.aggregate()
|
||||
h.pa.AvgMemory = h.memory.aggregate()
|
||||
h.pa.AvgBattery = h.battery.aggregate()
|
||||
|
||||
event := h.pa
|
||||
|
||||
h.pa = &IOSPerformanceAggregated{}
|
||||
for _, agg := range []valueAggregator{h.fps, h.cpu, h.memory, h.battery} {
|
||||
agg.sum = 0
|
||||
agg.count = 0
|
||||
}
|
||||
return event
|
||||
}
|
||||
11
backend/internal/handlers/messageProcessor.go
Normal file
11
backend/internal/handlers/messageProcessor.go
Normal file
|
|
@ -0,0 +1,11 @@
|
|||
package handlers
|
||||
|
||||
import . "openreplay/backend/pkg/messages"
|
||||
|
||||
// Heuristic interface - common interface for user's realisations
|
||||
// U can create your own message handler and easily connect to heuristics service
|
||||
|
||||
type MessageProcessor interface {
|
||||
Handle(message Message, messageID uint64, timestamp uint64) Message
|
||||
Build() Message
|
||||
}
|
||||
|
|
@ -1,18 +1,18 @@
|
|||
package heuristics
|
||||
package handlers
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
type readyMessageStore struct {
|
||||
type ReadyMessageStore struct {
|
||||
store []Message
|
||||
}
|
||||
|
||||
func (s *readyMessageStore) append(msg Message) {
|
||||
func (s *ReadyMessageStore) Append(msg Message) {
|
||||
s.store = append(s.store, msg)
|
||||
}
|
||||
|
||||
func (s *readyMessageStore) IterateReadyMessages(cb func(msg Message)) {
|
||||
func (s *ReadyMessageStore) IterateReadyMessages(cb func(msg Message)) {
|
||||
for _, msg := range s.store {
|
||||
cb(msg)
|
||||
}
|
||||
75
backend/internal/handlers/web/clickRage.go
Normal file
75
backend/internal/handlers/web/clickRage.go
Normal file
|
|
@ -0,0 +1,75 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: ClickRage
|
||||
Input event: MouseClick
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const MAX_TIME_DIFF = 300
|
||||
const MIN_CLICKS_IN_A_ROW = 3
|
||||
|
||||
type ClickRageDetector struct {
|
||||
lastTimestamp uint64
|
||||
lastLabel string
|
||||
firstInARawTimestamp uint64
|
||||
firstInARawMessageId uint64
|
||||
countsInARow int
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) reset() {
|
||||
crd.lastTimestamp = 0
|
||||
crd.lastLabel = ""
|
||||
crd.firstInARawTimestamp = 0
|
||||
crd.firstInARawMessageId = 0
|
||||
crd.countsInARow = 0
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) Build() Message {
|
||||
defer crd.reset()
|
||||
if crd.countsInARow >= MIN_CLICKS_IN_A_ROW {
|
||||
payload, err := json.Marshal(struct{ Count int }{crd.countsInARow})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal ClickRage payload to json: %s", err)
|
||||
}
|
||||
event := &IssueEvent{
|
||||
Type: "click_rage",
|
||||
ContextString: crd.lastLabel,
|
||||
Payload: string(payload),
|
||||
Timestamp: crd.firstInARawTimestamp,
|
||||
MessageID: crd.firstInARawMessageId,
|
||||
}
|
||||
return event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (crd *ClickRageDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *MouseClick:
|
||||
// TODO: check if we it is ok to capture clickRage event without the connected ClickEvent in db.
|
||||
if msg.Label == "" {
|
||||
return crd.Build()
|
||||
}
|
||||
if crd.lastLabel == msg.Label && timestamp-crd.lastTimestamp < MAX_TIME_DIFF {
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.countsInARow += 1
|
||||
return nil
|
||||
}
|
||||
event := crd.Build()
|
||||
crd.lastTimestamp = timestamp
|
||||
crd.lastLabel = msg.Label
|
||||
crd.firstInARawTimestamp = timestamp
|
||||
crd.firstInARawMessageId = messageID
|
||||
crd.countsInARow = 1
|
||||
return event
|
||||
}
|
||||
return nil
|
||||
}
|
||||
93
backend/internal/handlers/web/cpuIssue.go
Normal file
93
backend/internal/handlers/web/cpuIssue.go
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/messages/performance"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: CpuIssue
|
||||
Input events: PerformanceTrack,
|
||||
SetPageLocation
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const CPU_THRESHOLD = 70 // % out of 100
|
||||
const CPU_MIN_DURATION_TRIGGER = 6 * 1000
|
||||
|
||||
type CpuIssueDetector struct {
|
||||
startTimestamp uint64
|
||||
startMessageID uint64
|
||||
lastTimestamp uint64
|
||||
maxRate uint64
|
||||
contextString string
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) Build() Message {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
duration := f.lastTimestamp - f.startTimestamp
|
||||
timestamp := f.startTimestamp
|
||||
messageID := f.startMessageID
|
||||
maxRate := f.maxRate
|
||||
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.maxRate = 0
|
||||
if duration < CPU_MIN_DURATION_TRIGGER {
|
||||
return nil
|
||||
}
|
||||
|
||||
payload, err := json.Marshal(struct {
|
||||
Duration uint64
|
||||
Rate uint64
|
||||
}{duration, maxRate})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal CpuIssue payload to json: %s", err)
|
||||
}
|
||||
|
||||
return &IssueEvent{
|
||||
Type: "cpu",
|
||||
Timestamp: timestamp,
|
||||
MessageID: messageID,
|
||||
ContextString: f.contextString,
|
||||
Payload: string(payload),
|
||||
}
|
||||
}
|
||||
|
||||
func (f *CpuIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
dt := performance.TimeDiff(timestamp, f.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // TODO: handle error
|
||||
}
|
||||
|
||||
f.lastTimestamp = timestamp
|
||||
|
||||
if msg.Frames == -1 || msg.Ticks == -1 {
|
||||
return f.Build()
|
||||
}
|
||||
|
||||
cpuRate := performance.CPURate(msg.Ticks, dt)
|
||||
|
||||
if cpuRate >= CPU_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.maxRate < cpuRate {
|
||||
f.maxRate = cpuRate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
case *SetPageLocation:
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
93
backend/internal/handlers/web/deadClick.go
Normal file
93
backend/internal/handlers/web/deadClick.go
Normal file
|
|
@ -0,0 +1,93 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: DeadClick
|
||||
Input events: SetInputTarget,
|
||||
CreateDocument,
|
||||
MouseClick,
|
||||
SetNodeAttribute,
|
||||
RemoveNodeAttribute,
|
||||
CreateElementNode,
|
||||
CreateTextNode,
|
||||
MoveNode,
|
||||
RemoveNode,
|
||||
SetCSSData,
|
||||
CSSInsertRule,
|
||||
CSSDeleteRule
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const CLICK_RELATION_TIME = 1400
|
||||
|
||||
type DeadClickDetector struct {
|
||||
lastTimestamp uint64
|
||||
lastMouseClick *MouseClick
|
||||
lastClickTimestamp uint64
|
||||
lastMessageID uint64
|
||||
inputIDSet map[uint64]bool
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) reset() {
|
||||
d.inputIDSet = nil
|
||||
d.lastMouseClick = nil
|
||||
d.lastClickTimestamp = 0
|
||||
d.lastMessageID = 0
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) build(timestamp uint64) Message {
|
||||
defer d.reset()
|
||||
if d.lastMouseClick == nil || d.lastClickTimestamp+CLICK_RELATION_TIME > timestamp { // reaction is instant
|
||||
return nil
|
||||
}
|
||||
event := &IssueEvent{
|
||||
Type: "dead_click",
|
||||
ContextString: d.lastMouseClick.Label,
|
||||
Timestamp: d.lastClickTimestamp,
|
||||
MessageID: d.lastMessageID,
|
||||
}
|
||||
return event
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) Build() Message {
|
||||
return d.build(d.lastTimestamp)
|
||||
}
|
||||
|
||||
func (d *DeadClickDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
d.lastTimestamp = timestamp
|
||||
switch msg := message.(type) {
|
||||
case *SetInputTarget:
|
||||
if d.inputIDSet == nil {
|
||||
d.inputIDSet = make(map[uint64]bool)
|
||||
}
|
||||
d.inputIDSet[msg.ID] = true
|
||||
case *CreateDocument:
|
||||
d.inputIDSet = nil
|
||||
case *MouseClick:
|
||||
if msg.Label == "" {
|
||||
return nil
|
||||
}
|
||||
event := d.build(timestamp)
|
||||
if d.inputIDSet[msg.ID] { // ignore if input
|
||||
return event
|
||||
}
|
||||
d.lastMouseClick = msg
|
||||
d.lastClickTimestamp = timestamp
|
||||
d.lastMessageID = messageID
|
||||
return event
|
||||
case *SetNodeAttribute,
|
||||
*RemoveNodeAttribute,
|
||||
*CreateElementNode,
|
||||
*CreateTextNode,
|
||||
*MoveNode,
|
||||
*RemoveNode,
|
||||
*SetCSSData,
|
||||
*CSSInsertRule,
|
||||
*CSSDeleteRule:
|
||||
return d.build(timestamp)
|
||||
}
|
||||
return nil
|
||||
}
|
||||
55
backend/internal/handlers/web/domDrop.go
Normal file
55
backend/internal/handlers/web/domDrop.go
Normal file
|
|
@ -0,0 +1,55 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: DomDrop
|
||||
Input events: CreateElementNode,
|
||||
CreateTextNode,
|
||||
RemoveNode
|
||||
Output event: DOMDrop
|
||||
*/
|
||||
|
||||
const DROP_WINDOW = 200 //ms
|
||||
const CRITICAL_COUNT = 1 // Our login page contains 20. But on crush it removes only roots (1-3 nodes).
|
||||
// TODO: smart detection (making whole DOM tree would eat all memory)
|
||||
|
||||
type domDropDetector struct {
|
||||
removedCount int
|
||||
lastDropTimestamp uint64
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) reset() {
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
switch message.(type) {
|
||||
case *CreateElementNode,
|
||||
*CreateTextNode:
|
||||
dd.removedCount = 0
|
||||
dd.lastDropTimestamp = 0
|
||||
case *RemoveNode:
|
||||
if dd.lastDropTimestamp+DROP_WINDOW > timestamp {
|
||||
dd.removedCount += 1
|
||||
} else {
|
||||
dd.removedCount = 1
|
||||
}
|
||||
dd.lastDropTimestamp = timestamp
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (dd *domDropDetector) Build() Message {
|
||||
defer dd.reset()
|
||||
if dd.removedCount >= CRITICAL_COUNT {
|
||||
domDrop := &DOMDrop{
|
||||
Timestamp: dd.lastDropTimestamp,
|
||||
}
|
||||
return domDrop
|
||||
}
|
||||
return nil
|
||||
}
|
||||
85
backend/internal/handlers/web/memoryIssue.go
Normal file
85
backend/internal/handlers/web/memoryIssue.go
Normal file
|
|
@ -0,0 +1,85 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"log"
|
||||
"math"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: MemoryIssue
|
||||
Input events: PerformanceTrack,
|
||||
SetPageLocation
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
const MIN_COUNT = 3
|
||||
const MEM_RATE_THRESHOLD = 300 // % to average
|
||||
|
||||
type MemoryIssueDetector struct {
|
||||
startMessageID uint64
|
||||
startTimestamp uint64
|
||||
rate int
|
||||
count float64
|
||||
sum float64
|
||||
contextString string
|
||||
}
|
||||
|
||||
func (f *MemoryIssueDetector) reset() {
|
||||
f.startTimestamp = 0
|
||||
f.startMessageID = 0
|
||||
f.rate = 0
|
||||
}
|
||||
|
||||
func (f *MemoryIssueDetector) Build() Message {
|
||||
if f.startTimestamp == 0 {
|
||||
return nil
|
||||
}
|
||||
payload, err := json.Marshal(struct{ Rate int }{f.rate - 100})
|
||||
if err != nil {
|
||||
log.Printf("can't marshal MemoryIssue payload to json: %s", err)
|
||||
}
|
||||
event := &IssueEvent{
|
||||
Type: "memory",
|
||||
Timestamp: f.startTimestamp,
|
||||
MessageID: f.startMessageID,
|
||||
ContextString: f.contextString,
|
||||
Payload: string(payload),
|
||||
}
|
||||
f.reset()
|
||||
return event
|
||||
}
|
||||
|
||||
func (f *MemoryIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if f.count < MIN_COUNT {
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
return nil
|
||||
}
|
||||
|
||||
average := f.sum / f.count
|
||||
rate := int(math.Round(float64(msg.UsedJSHeapSize) / average * 100))
|
||||
|
||||
f.sum += float64(msg.UsedJSHeapSize)
|
||||
f.count++
|
||||
|
||||
if rate >= MEM_RATE_THRESHOLD {
|
||||
if f.startTimestamp == 0 {
|
||||
f.startTimestamp = timestamp
|
||||
f.startMessageID = messageID
|
||||
}
|
||||
if f.rate < rate {
|
||||
f.rate = rate
|
||||
}
|
||||
} else {
|
||||
return f.Build()
|
||||
}
|
||||
case *SetPageLocation:
|
||||
f.contextString = msg.URL
|
||||
}
|
||||
return nil
|
||||
}
|
||||
47
backend/internal/handlers/web/networkIssue.go
Normal file
47
backend/internal/handlers/web/networkIssue.go
Normal file
|
|
@ -0,0 +1,47 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
. "openreplay/backend/pkg/messages"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: NetworkIssue
|
||||
Input events: ResourceTiming,
|
||||
Fetch
|
||||
Output event: IssueEvent
|
||||
*/
|
||||
|
||||
type NetworkIssueDetector struct{}
|
||||
|
||||
func (f *NetworkIssueDetector) Build() Message {
|
||||
return nil
|
||||
}
|
||||
|
||||
func (f *NetworkIssueDetector) Handle(message Message, messageID uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *ResourceTiming:
|
||||
success := msg.Duration != 0 // The only available way here
|
||||
if !success {
|
||||
issueType := "missing_resource"
|
||||
if msg.Initiator == "fetch" || msg.Initiator == "xmlhttprequest" {
|
||||
issueType = "bad_request"
|
||||
}
|
||||
return &IssueEvent{
|
||||
Type: issueType,
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
ContextString: msg.URL,
|
||||
}
|
||||
}
|
||||
case *Fetch:
|
||||
if msg.Status >= 400 {
|
||||
return &IssueEvent{
|
||||
Type: "bad_request",
|
||||
MessageID: messageID,
|
||||
Timestamp: msg.Timestamp,
|
||||
ContextString: msg.URL,
|
||||
}
|
||||
}
|
||||
}
|
||||
return nil
|
||||
}
|
||||
118
backend/internal/handlers/web/performanceAggregator.go
Normal file
118
backend/internal/handlers/web/performanceAggregator.go
Normal file
|
|
@ -0,0 +1,118 @@
|
|||
package web
|
||||
|
||||
import (
|
||||
"math"
|
||||
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/messages/performance"
|
||||
)
|
||||
|
||||
/*
|
||||
Handler name: PerformanceAggregator
|
||||
Input event: PerformanceTrack
|
||||
Output event: PerformanceTrackAggr
|
||||
*/
|
||||
|
||||
const AGGREGATION_WINDOW = 2 * 60 * 1000
|
||||
|
||||
type PerformanceAggregator struct {
|
||||
*PerformanceTrackAggr
|
||||
lastTimestamp uint64
|
||||
count float64
|
||||
sumFrameRate float64
|
||||
sumTickRate float64
|
||||
sumTotalJSHeapSize float64
|
||||
sumUsedJSHeapSize float64
|
||||
}
|
||||
|
||||
func (b *PerformanceAggregator) start(timestamp uint64) {
|
||||
b.PerformanceTrackAggr = &PerformanceTrackAggr{
|
||||
TimestampStart: timestamp,
|
||||
}
|
||||
b.lastTimestamp = timestamp
|
||||
}
|
||||
|
||||
func (b *PerformanceAggregator) reset() {
|
||||
b.PerformanceTrackAggr = nil
|
||||
b.count = 0
|
||||
b.sumFrameRate = 0
|
||||
b.sumTickRate = 0
|
||||
b.sumTotalJSHeapSize = 0
|
||||
b.sumUsedJSHeapSize = 0
|
||||
b.lastTimestamp = 0
|
||||
}
|
||||
|
||||
func (b *PerformanceAggregator) Handle(message Message, _ uint64, timestamp uint64) Message {
|
||||
switch msg := message.(type) {
|
||||
case *PerformanceTrack:
|
||||
if b.PerformanceTrackAggr == nil || msg.Frames == -1 || msg.Ticks == -1 {
|
||||
pta := b.Build()
|
||||
b.start(timestamp)
|
||||
return pta
|
||||
}
|
||||
|
||||
dt := performance.TimeDiff(timestamp, b.lastTimestamp)
|
||||
if dt == 0 {
|
||||
return nil // shouldn't happen
|
||||
}
|
||||
|
||||
frameRate := performance.FrameRate(msg.Frames, dt)
|
||||
tickRate := performance.TickRate(msg.Ticks, dt)
|
||||
|
||||
fps := uint64(math.Round(frameRate))
|
||||
cpu := performance.CPURateFromTickRate(tickRate)
|
||||
if fps < b.MinFPS || b.MinFPS == 0 {
|
||||
b.MinFPS = fps
|
||||
}
|
||||
if fps > b.MaxFPS {
|
||||
b.MaxFPS = fps
|
||||
}
|
||||
if cpu < b.MinCPU || b.MinCPU == 0 {
|
||||
b.MinCPU = cpu
|
||||
}
|
||||
if cpu > b.MaxCPU {
|
||||
b.MaxCPU = cpu
|
||||
}
|
||||
if msg.TotalJSHeapSize < b.MinTotalJSHeapSize || b.MinTotalJSHeapSize == 0 {
|
||||
b.MinTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.TotalJSHeapSize > b.MaxTotalJSHeapSize {
|
||||
b.MaxTotalJSHeapSize = msg.TotalJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize < b.MinUsedJSHeapSize || b.MinUsedJSHeapSize == 0 {
|
||||
b.MinUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
if msg.UsedJSHeapSize > b.MaxUsedJSHeapSize {
|
||||
b.MaxUsedJSHeapSize = msg.UsedJSHeapSize
|
||||
}
|
||||
b.sumFrameRate += frameRate
|
||||
b.sumTickRate += tickRate
|
||||
b.sumTotalJSHeapSize += float64(msg.TotalJSHeapSize)
|
||||
b.sumUsedJSHeapSize += float64(msg.UsedJSHeapSize)
|
||||
b.count += 1
|
||||
b.lastTimestamp = timestamp
|
||||
}
|
||||
if b.PerformanceTrackAggr != nil &&
|
||||
timestamp-b.PerformanceTrackAggr.TimestampStart >= AGGREGATION_WINDOW {
|
||||
return b.Build()
|
||||
}
|
||||
return nil
|
||||
}
|
||||
|
||||
func (b *PerformanceAggregator) Build() Message {
|
||||
if b.PerformanceTrackAggr == nil {
|
||||
return nil
|
||||
}
|
||||
if b.count != 0 && b.PerformanceTrackAggr.TimestampStart < b.lastTimestamp { // the last one shouldn't happen
|
||||
b.PerformanceTrackAggr.TimestampEnd = b.lastTimestamp
|
||||
b.PerformanceTrackAggr.AvgFPS = uint64(math.Round(b.sumFrameRate / b.count))
|
||||
b.PerformanceTrackAggr.AvgCPU = 100 - uint64(math.Round(b.sumTickRate*100/b.count))
|
||||
b.PerformanceTrackAggr.AvgTotalJSHeapSize = uint64(math.Round(b.sumTotalJSHeapSize / b.count))
|
||||
b.PerformanceTrackAggr.AvgUsedJSHeapSize = uint64(math.Round(b.sumUsedJSHeapSize / b.count))
|
||||
msg := b.PerformanceTrackAggr
|
||||
b.reset()
|
||||
return msg
|
||||
}
|
||||
b.reset()
|
||||
return nil
|
||||
}
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
package clientManager
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/integrations/integration"
|
||||
"strconv"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
"openreplay/backend/services/integrations/integration"
|
||||
)
|
||||
|
||||
type manager struct {
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package ios
|
||||
|
||||
import (
|
||||
"strings"
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package oswriter
|
||||
|
||||
import (
|
||||
"math"
|
||||
172
backend/internal/router/handlers-ios.go
Normal file
172
backend/internal/router/handlers-ios.go
Normal file
|
|
@ -0,0 +1,172 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/ios"
|
||||
"openreplay/backend/internal/uuid"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
req := &StartIOSSessionRequest{}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
userUUID := uuid.GetUUID(req.UserUUID)
|
||||
tokenData, err := e.services.Tokenizer.Parse(req.Token)
|
||||
|
||||
if err != nil { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
|
||||
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
|
||||
// The difference with web is mostly here:
|
||||
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, Encode(&IOSSessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserOS: "IOS",
|
||||
UserOSVersion: req.UserOSVersion,
|
||||
UserDevice: ios.MapIOSDevice(req.UserDevice),
|
||||
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
|
||||
UserCountry: country,
|
||||
}))
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartIOSSessionResponse{
|
||||
Token: e.services.Tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil && err != token.EXPIRED {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
// Check timestamps here?
|
||||
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
|
||||
}
|
||||
|
||||
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
|
||||
log.Printf("recieved imagerequest")
|
||||
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil { // Should accept expired token?
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
return
|
||||
}
|
||||
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
|
||||
defer r.Body.Close()
|
||||
|
||||
err = r.ParseMultipartForm(1e6) // ~1Mb
|
||||
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
|
||||
ResponseWithError(w, http.StatusUnsupportedMediaType, err)
|
||||
return
|
||||
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
|
||||
} else if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
if r.MultipartForm == nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
|
||||
return
|
||||
}
|
||||
|
||||
if len(r.MultipartForm.Value["projectKey"]) == 0 {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter?
|
||||
return
|
||||
}
|
||||
|
||||
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
|
||||
|
||||
for _, fileHeaderList := range r.MultipartForm.File {
|
||||
for _, fileHeader := range fileHeaderList {
|
||||
file, err := fileHeader.Open()
|
||||
if err != nil {
|
||||
continue // TODO: send server error or accumulate successful files
|
||||
}
|
||||
key := prefix + fileHeader.Filename
|
||||
log.Printf("Uploading image... %v", key)
|
||||
go func() { //TODO: mime type from header
|
||||
if err := e.services.Storage.Upload(file, key, "image/jpeg", false); err != nil {
|
||||
log.Printf("Upload ios screen error. %v", err)
|
||||
}
|
||||
}()
|
||||
}
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
182
backend/internal/router/handlers-web.go
Normal file
182
backend/internal/router/handlers-web.go
Normal file
|
|
@ -0,0 +1,182 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
"errors"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"math/rand"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/uuid"
|
||||
"strconv"
|
||||
"time"
|
||||
|
||||
"openreplay/backend/pkg/db/postgres"
|
||||
. "openreplay/backend/pkg/messages"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
startTime := time.Now()
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
// Parse request body
|
||||
req := &StartSessionRequest{}
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
return
|
||||
}
|
||||
|
||||
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
|
||||
if err != nil {
|
||||
if postgres.IsNoRowsErr(err) {
|
||||
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or capture limit has been reached"))
|
||||
} else {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
}
|
||||
return
|
||||
}
|
||||
|
||||
userUUID := uuid.GetUUID(req.UserUUID)
|
||||
tokenData, err := e.services.Tokenizer.Parse(req.Token)
|
||||
if err != nil || req.Reset { // Starting the new one
|
||||
dice := byte(rand.Intn(100)) // [0, 100)
|
||||
if dice >= p.SampleRate {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
|
||||
return
|
||||
}
|
||||
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r)
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err)
|
||||
return
|
||||
}
|
||||
// TODO: if EXPIRED => send message for two sessions association
|
||||
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
|
||||
tokenData = &token.TokenData{ID: sessionID, ExpTime: expTime.UnixMilli()}
|
||||
|
||||
e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, Encode(&SessionStart{
|
||||
Timestamp: req.Timestamp,
|
||||
ProjectID: uint64(p.ProjectID),
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
RevID: req.RevID,
|
||||
UserUUID: userUUID,
|
||||
UserAgent: r.Header.Get("User-Agent"),
|
||||
UserOS: ua.OS,
|
||||
UserOSVersion: ua.OSVersion,
|
||||
UserBrowser: ua.Browser,
|
||||
UserBrowserVersion: ua.BrowserVersion,
|
||||
UserDevice: ua.Device,
|
||||
UserDeviceType: ua.DeviceType,
|
||||
UserCountry: e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r),
|
||||
UserDeviceMemorySize: req.DeviceMemory,
|
||||
UserDeviceHeapSize: req.JsHeapSizeLimit,
|
||||
UserID: req.UserID,
|
||||
}))
|
||||
}
|
||||
|
||||
ResponseWithJSON(w, &StartSessionResponse{
|
||||
Token: e.services.Tokenizer.Compose(*tokenData),
|
||||
UserUUID: userUUID,
|
||||
SessionID: strconv.FormatUint(tokenData.ID, 10),
|
||||
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
// Check authorization
|
||||
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusUnauthorized, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
bytes, err := ioutil.ReadAll(body)
|
||||
if err != nil {
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: Split environments; send error here only on staging
|
||||
return
|
||||
}
|
||||
|
||||
// Send processed messages to queue as array of bytes
|
||||
// TODO: check bytes for nonsense crap
|
||||
err = e.services.Producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bytes)
|
||||
if err != nil {
|
||||
log.Printf("can't send processed messages to queue: %s", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
|
||||
// Check request body
|
||||
if r.Body == nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
|
||||
return
|
||||
}
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
// Parse request body
|
||||
req := &NotStartedRequest{}
|
||||
if err := json.NewDecoder(body).Decode(req); err != nil {
|
||||
ResponseWithError(w, http.StatusBadRequest, err)
|
||||
return
|
||||
}
|
||||
|
||||
// Handler's logic
|
||||
if req.ProjectKey == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
|
||||
return
|
||||
}
|
||||
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
|
||||
if ua == nil {
|
||||
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
|
||||
return
|
||||
}
|
||||
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
|
||||
err := e.services.Database.InsertUnstartedSession(postgres.UnstartedSession{
|
||||
ProjectKey: *req.ProjectKey,
|
||||
TrackerVersion: req.TrackerVersion,
|
||||
DoNotTrack: req.DoNotTrack,
|
||||
Platform: "web",
|
||||
UserAgent: r.Header.Get("User-Agent"),
|
||||
UserOS: ua.OS,
|
||||
UserOSVersion: ua.OSVersion,
|
||||
UserBrowser: ua.Browser,
|
||||
UserBrowserVersion: ua.BrowserVersion,
|
||||
UserDevice: ua.Device,
|
||||
UserDeviceType: ua.DeviceType,
|
||||
UserCountry: country,
|
||||
})
|
||||
if err != nil {
|
||||
log.Printf("Unable to insert Unstarted Session: %v\n", err)
|
||||
}
|
||||
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
|
@ -1,28 +1,27 @@
|
|||
package main
|
||||
package router
|
||||
|
||||
import (
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"net/http"
|
||||
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
)
|
||||
|
||||
const JSON_SIZE_LIMIT int64 = 1e3 // 1Kb
|
||||
|
||||
func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
body := http.MaxBytesReader(w, r.Body, BEACON_SIZE_LIMIT)
|
||||
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
|
||||
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
|
||||
defer body.Close()
|
||||
|
||||
var reader io.ReadCloser
|
||||
var err error
|
||||
|
||||
switch r.Header.Get("Content-Encoding") {
|
||||
case "gzip":
|
||||
log.Println("Gzip", reader)
|
||||
|
||||
reader, err = gzip.NewReader(body)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent responce
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response
|
||||
return
|
||||
}
|
||||
log.Println("Gzip reader init", reader)
|
||||
|
|
@ -33,9 +32,9 @@ func pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topi
|
|||
log.Println("Reader after switch:", reader)
|
||||
buf, err := ioutil.ReadAll(reader)
|
||||
if err != nil {
|
||||
responseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
|
||||
return
|
||||
}
|
||||
producer.Produce(topicName, sessionID, buf) // What if not able to send?
|
||||
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
49
backend/internal/router/model.go
Normal file
49
backend/internal/router/model.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package router
|
||||
|
||||
type StartSessionRequest struct {
|
||||
Token string `json:"token"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
RevID string `json:"revID"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
IsSnippet bool `json:"isSnippet"`
|
||||
DeviceMemory uint64 `json:"deviceMemory"`
|
||||
JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
Reset bool `json:"reset"`
|
||||
UserID string `json:"userID"`
|
||||
}
|
||||
|
||||
type StartSessionResponse struct {
|
||||
Timestamp int64 `json:"timestamp"`
|
||||
Delay int64 `json:"delay"`
|
||||
Token string `json:"token"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
SessionID string `json:"sessionID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
}
|
||||
|
||||
type NotStartedRequest struct {
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
DoNotTrack bool `json:"DoNotTrack"`
|
||||
}
|
||||
|
||||
type StartIOSSessionRequest struct {
|
||||
Token string `json:"token"`
|
||||
ProjectKey *string `json:"projectKey"`
|
||||
TrackerVersion string `json:"trackerVersion"`
|
||||
RevID string `json:"revID"`
|
||||
UserUUID *string `json:"userUUID"`
|
||||
UserOSVersion string `json:"userOSVersion"`
|
||||
UserDevice string `json:"userDevice"`
|
||||
Timestamp uint64 `json:"timestamp"`
|
||||
}
|
||||
|
||||
type StartIOSSessionResponse struct {
|
||||
Token string `json:"token"`
|
||||
ImagesHashList []string `json:"imagesHashList"`
|
||||
UserUUID string `json:"userUUID"`
|
||||
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
|
||||
SessionID string `json:"sessionID"`
|
||||
}
|
||||
|
|
@ -1,4 +1,4 @@
|
|||
package main
|
||||
package router
|
||||
|
||||
import (
|
||||
"encoding/json"
|
||||
|
|
@ -6,7 +6,7 @@ import (
|
|||
"net/http"
|
||||
)
|
||||
|
||||
func responseWithJSON(w http.ResponseWriter, res interface{}) {
|
||||
func ResponseWithJSON(w http.ResponseWriter, res interface{}) {
|
||||
body, err := json.Marshal(res)
|
||||
if err != nil {
|
||||
log.Println(err)
|
||||
|
|
@ -15,10 +15,10 @@ func responseWithJSON(w http.ResponseWriter, res interface{}) {
|
|||
w.Write(body)
|
||||
}
|
||||
|
||||
func responseWithError(w http.ResponseWriter, code int, err error) {
|
||||
func ResponseWithError(w http.ResponseWriter, code int, err error) {
|
||||
type response struct {
|
||||
Error string `json:"error"`
|
||||
}
|
||||
w.WriteHeader(code)
|
||||
responseWithJSON(w, &response{err.Error()})
|
||||
ResponseWithJSON(w, &response{err.Error()})
|
||||
}
|
||||
76
backend/internal/router/router.go
Normal file
76
backend/internal/router/router.go
Normal file
|
|
@ -0,0 +1,76 @@
|
|||
package router
|
||||
|
||||
import (
|
||||
"github.com/gorilla/mux"
|
||||
"log"
|
||||
"net/http"
|
||||
"openreplay/backend/internal/config"
|
||||
http2 "openreplay/backend/internal/services"
|
||||
)
|
||||
|
||||
type Router struct {
|
||||
router *mux.Router
|
||||
cfg *config.Config
|
||||
services *http2.ServicesBuilder
|
||||
}
|
||||
|
||||
func NewRouter(cfg *config.Config, services *http2.ServicesBuilder) (*Router, error) {
|
||||
e := &Router{
|
||||
cfg: cfg,
|
||||
services: services,
|
||||
}
|
||||
e.init()
|
||||
return e, nil
|
||||
}
|
||||
|
||||
func (e *Router) init() {
|
||||
e.router = mux.NewRouter()
|
||||
|
||||
// Root path
|
||||
e.router.HandleFunc("/", e.root)
|
||||
|
||||
handlers := map[string]func(http.ResponseWriter, *http.Request){
|
||||
"/v1/web/not-started": e.notStartedHandlerWeb,
|
||||
"/v1/web/start": e.startSessionHandlerWeb,
|
||||
"/v1/web/i": e.pushMessagesHandlerWeb,
|
||||
"/v1/ios/start": e.startSessionHandlerIOS,
|
||||
"/v1/ios/i": e.pushMessagesHandlerIOS,
|
||||
"/v1/ios/late": e.pushLateMessagesHandlerIOS,
|
||||
"/v1/ios/images": e.imagesUploadHandlerIOS,
|
||||
}
|
||||
prefix := "/ingest"
|
||||
|
||||
for path, handler := range handlers {
|
||||
e.router.HandleFunc(path, handler).Methods("POST", "OPTIONS")
|
||||
e.router.HandleFunc(prefix+path, handler).Methods("POST", "OPTIONS")
|
||||
}
|
||||
|
||||
// CORS middleware
|
||||
e.router.Use(e.corsMiddleware)
|
||||
}
|
||||
|
||||
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
|
||||
w.WriteHeader(http.StatusOK)
|
||||
}
|
||||
|
||||
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
|
||||
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
|
||||
// Prepare headers for preflight requests
|
||||
w.Header().Set("Access-Control-Allow-Origin", "*")
|
||||
w.Header().Set("Access-Control-Allow-Methods", "POST")
|
||||
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
|
||||
if r.Method == http.MethodOptions {
|
||||
w.Header().Set("Cache-Control", "max-age=86400")
|
||||
w.WriteHeader(http.StatusOK)
|
||||
return
|
||||
}
|
||||
log.Printf("Request: %v - %v ", r.Method, r.URL.Path)
|
||||
|
||||
// Serve request
|
||||
next.ServeHTTP(w, r)
|
||||
})
|
||||
}
|
||||
|
||||
func (e *Router) GetHandler() http.Handler {
|
||||
return e.router
|
||||
}
|
||||
46
backend/internal/server/server.go
Normal file
46
backend/internal/server/server.go
Normal file
|
|
@ -0,0 +1,46 @@
|
|||
package server
|
||||
|
||||
import (
|
||||
"context"
|
||||
"errors"
|
||||
"fmt"
|
||||
"golang.org/x/net/http2"
|
||||
"log"
|
||||
"net/http"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Server struct {
|
||||
server *http.Server
|
||||
}
|
||||
|
||||
func New(handler http.Handler, host, port string, timeout time.Duration) (*Server, error) {
|
||||
switch {
|
||||
case port == "":
|
||||
return nil, errors.New("empty server port")
|
||||
case handler == nil:
|
||||
return nil, errors.New("empty handler")
|
||||
case timeout < 1:
|
||||
return nil, fmt.Errorf("invalid timeout %d", timeout)
|
||||
}
|
||||
server := &http.Server{
|
||||
Addr: fmt.Sprintf("%s:%s", host, port),
|
||||
Handler: handler,
|
||||
ReadTimeout: timeout,
|
||||
WriteTimeout: timeout,
|
||||
}
|
||||
if err := http2.ConfigureServer(server, nil); err != nil {
|
||||
log.Printf("can't configure http2 server: %s", err)
|
||||
}
|
||||
return &Server{
|
||||
server: server,
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Server) Start() error {
|
||||
return s.server.ListenAndServe()
|
||||
}
|
||||
|
||||
func (s *Server) Stop() {
|
||||
s.server.Shutdown(context.Background())
|
||||
}
|
||||
34
backend/internal/services/services.go
Normal file
34
backend/internal/services/services.go
Normal file
|
|
@ -0,0 +1,34 @@
|
|||
package services
|
||||
|
||||
import (
|
||||
"openreplay/backend/internal/config"
|
||||
"openreplay/backend/internal/geoip"
|
||||
"openreplay/backend/internal/uaparser"
|
||||
"openreplay/backend/pkg/db/cache"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/queue/types"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"openreplay/backend/pkg/token"
|
||||
)
|
||||
|
||||
type ServicesBuilder struct {
|
||||
Database *cache.PGCache
|
||||
Producer types.Producer
|
||||
Flaker *flakeid.Flaker
|
||||
UaParser *uaparser.UAParser
|
||||
GeoIP *geoip.GeoIP
|
||||
Tokenizer *token.Tokenizer
|
||||
Storage *storage.S3
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, producer types.Producer, pgconn *cache.PGCache) *ServicesBuilder {
|
||||
return &ServicesBuilder{
|
||||
Database: pgconn,
|
||||
Producer: producer,
|
||||
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages),
|
||||
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
|
||||
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
|
||||
GeoIP: geoip.NewGeoIP(cfg.MaxMinDBFile),
|
||||
Flaker: flakeid.NewFlaker(cfg.WorkerID),
|
||||
}
|
||||
}
|
||||
81
backend/internal/sessionender/ender.go
Normal file
81
backend/internal/sessionender/ender.go
Normal file
|
|
@ -0,0 +1,81 @@
|
|||
package sessionender
|
||||
|
||||
import (
|
||||
"context"
|
||||
"fmt"
|
||||
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
|
||||
"log"
|
||||
"openreplay/backend/pkg/monitoring"
|
||||
"time"
|
||||
)
|
||||
|
||||
// EndedSessionHandler handler for ended sessions
|
||||
type EndedSessionHandler func(sessionID uint64, timestamp int64) bool
|
||||
|
||||
// session holds information about user's session live status
|
||||
type session struct {
|
||||
lastTimestamp int64
|
||||
isEnded bool
|
||||
}
|
||||
|
||||
// SessionEnder updates timestamp of last message for each session
|
||||
type SessionEnder struct {
|
||||
timeout int64
|
||||
sessions map[uint64]*session // map[sessionID]session
|
||||
activeSessions syncfloat64.UpDownCounter
|
||||
}
|
||||
|
||||
func New(metrics *monitoring.Metrics, timeout int64) (*SessionEnder, error) {
|
||||
if metrics == nil {
|
||||
return nil, fmt.Errorf("metrics module is empty")
|
||||
}
|
||||
activeSessions, err := metrics.RegisterUpDownCounter("active_sessions")
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("can't register active_session metric: %s", err)
|
||||
}
|
||||
|
||||
return &SessionEnder{
|
||||
timeout: timeout,
|
||||
sessions: make(map[uint64]*session),
|
||||
activeSessions: activeSessions,
|
||||
}, nil
|
||||
}
|
||||
|
||||
// UpdateSession save timestamp for new sessions and update for existing sessions
|
||||
func (se *SessionEnder) UpdateSession(sessionID, timestamp uint64) {
|
||||
currTS := int64(timestamp)
|
||||
if currTS == 0 {
|
||||
log.Printf("got empty timestamp for sessionID: %d", sessionID)
|
||||
return
|
||||
}
|
||||
sess, ok := se.sessions[sessionID]
|
||||
if !ok {
|
||||
se.sessions[sessionID] = &session{
|
||||
lastTimestamp: currTS,
|
||||
isEnded: false,
|
||||
}
|
||||
se.activeSessions.Add(context.Background(), 1)
|
||||
return
|
||||
}
|
||||
if currTS > sess.lastTimestamp {
|
||||
sess.lastTimestamp = currTS
|
||||
sess.isEnded = false
|
||||
}
|
||||
}
|
||||
|
||||
// HandleEndedSessions runs handler for each ended session and delete information about session in successful case
|
||||
func (se *SessionEnder) HandleEndedSessions(handler EndedSessionHandler) {
|
||||
deadLine := time.Now().UnixMilli() - se.timeout
|
||||
allSessions, removedSessions := len(se.sessions), 0
|
||||
for sessID, sess := range se.sessions {
|
||||
if sess.isEnded || sess.lastTimestamp < deadLine {
|
||||
sess.isEnded = true
|
||||
if handler(sessID, sess.lastTimestamp) {
|
||||
delete(se.sessions, sessID)
|
||||
se.activeSessions.Add(context.Background(), -1)
|
||||
removedSessions++
|
||||
}
|
||||
}
|
||||
}
|
||||
log.Printf("Removed %d of %d sessions", removedSessions, allSessions)
|
||||
}
|
||||
49
backend/internal/storage/counter.go
Normal file
49
backend/internal/storage/counter.go
Normal file
|
|
@ -0,0 +1,49 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"log"
|
||||
"sync"
|
||||
"time"
|
||||
)
|
||||
|
||||
type logCounter struct {
|
||||
mu sync.Mutex
|
||||
counter int
|
||||
timestamp time.Time
|
||||
lastTS time.Time
|
||||
lastSessID uint64
|
||||
}
|
||||
|
||||
func NewLogCounter() *logCounter {
|
||||
nlc := &logCounter{}
|
||||
nlc.init()
|
||||
return nlc
|
||||
}
|
||||
|
||||
func (c *logCounter) init() {
|
||||
c.mu.Lock()
|
||||
c.counter = 0
|
||||
c.timestamp = time.Now()
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *logCounter) Update(sessID uint64, ts time.Time) {
|
||||
c.mu.Lock()
|
||||
c.counter++
|
||||
c.lastTS = ts
|
||||
c.lastSessID = sessID
|
||||
c.mu.Unlock()
|
||||
}
|
||||
|
||||
func (c *logCounter) Print() {
|
||||
c.mu.Lock()
|
||||
log.Printf("count: %d, dur: %ds, msgTS: %s, sessID: %d, part: %d",
|
||||
c.counter,
|
||||
int(time.Now().Sub(c.timestamp).Seconds()),
|
||||
c.lastTS.String(),
|
||||
c.lastSessID,
|
||||
c.lastSessID%16,
|
||||
)
|
||||
c.mu.Unlock()
|
||||
c.init()
|
||||
}
|
||||
|
|
@ -1,11 +1,11 @@
|
|||
package main
|
||||
package storage
|
||||
|
||||
import (
|
||||
gzip "github.com/klauspost/pgzip"
|
||||
"io"
|
||||
)
|
||||
|
||||
func gzipFile(file io.Reader) io.Reader {
|
||||
func (s *Storage) gzipFile(file io.Reader) io.Reader {
|
||||
reader, writer := io.Pipe()
|
||||
go func() {
|
||||
gw, _ := gzip.NewWriterLevel(writer, gzip.BestSpeed)
|
||||
80
backend/internal/storage/storage.go
Normal file
80
backend/internal/storage/storage.go
Normal file
|
|
@ -0,0 +1,80 @@
|
|||
package storage
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"fmt"
|
||||
"log"
|
||||
config "openreplay/backend/internal/config/storage"
|
||||
"openreplay/backend/pkg/flakeid"
|
||||
"openreplay/backend/pkg/storage"
|
||||
"os"
|
||||
"strconv"
|
||||
"time"
|
||||
)
|
||||
|
||||
type Storage struct {
|
||||
cfg *config.Config
|
||||
s3 *storage.S3
|
||||
startBytes []byte
|
||||
}
|
||||
|
||||
func New(cfg *config.Config, s3 *storage.S3) (*Storage, error) {
|
||||
switch {
|
||||
case cfg == nil:
|
||||
return nil, fmt.Errorf("config is empty")
|
||||
case s3 == nil:
|
||||
return nil, fmt.Errorf("s3 storage is empty")
|
||||
}
|
||||
return &Storage{
|
||||
cfg: cfg,
|
||||
s3: s3,
|
||||
startBytes: make([]byte, cfg.FileSplitSize),
|
||||
}, nil
|
||||
}
|
||||
|
||||
func (s *Storage) UploadKey(key string, retryCount int) {
|
||||
if retryCount <= 0 {
|
||||
return
|
||||
}
|
||||
|
||||
file, err := os.Open(s.cfg.FSDir + "/" + key)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
log.Printf("File error: %v; Will retry %v more time(s); sessID: %s, part: %d, sessStart: %s\n",
|
||||
err,
|
||||
retryCount,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.UploadKey(key, retryCount-1)
|
||||
})
|
||||
return
|
||||
}
|
||||
defer file.Close()
|
||||
|
||||
nRead, err := file.Read(s.startBytes)
|
||||
if err != nil {
|
||||
sessID, _ := strconv.ParseUint(key, 10, 64)
|
||||
log.Printf("File read error: %s; sessID: %s, part: %d, sessStart: %s",
|
||||
err,
|
||||
key,
|
||||
sessID%16,
|
||||
time.UnixMilli(int64(flakeid.ExtractTimestamp(sessID))),
|
||||
)
|
||||
time.AfterFunc(s.cfg.RetryTimeout, func() {
|
||||
s.UploadKey(key, retryCount-1)
|
||||
})
|
||||
return
|
||||
}
|
||||
startReader := bytes.NewBuffer(s.startBytes[:nRead])
|
||||
if err := s.s3.Upload(s.gzipFile(startReader), key, "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: start upload failed. %v\n", err)
|
||||
}
|
||||
if nRead == s.cfg.FileSplitSize {
|
||||
if err := s.s3.Upload(s.gzipFile(file), key+"e", "application/octet-stream", true); err != nil {
|
||||
log.Fatalf("Storage: end upload failed. %v\n", err)
|
||||
}
|
||||
}
|
||||
}
|
||||
|
|
@ -1,10 +1,10 @@
|
|||
package main
|
||||
package uuid
|
||||
|
||||
import (
|
||||
"github.com/google/uuid"
|
||||
)
|
||||
|
||||
func getUUID(u *string) string {
|
||||
func GetUUID(u *string) string {
|
||||
if u != nil {
|
||||
_, err := uuid.Parse(*u)
|
||||
if err == nil {
|
||||
5
backend/pkg/db/cache/session.go
vendored
5
backend/pkg/db/cache/session.go
vendored
|
|
@ -8,10 +8,7 @@ import (
|
|||
|
||||
func (c *PGCache) GetSession(sessionID uint64) (*Session, error) {
|
||||
if s, inCache := c.sessions[sessionID]; inCache {
|
||||
// TODO: review. Might cause bugs in case of multiple instances
|
||||
if s == nil {
|
||||
return nil, pgx.ErrNoRows
|
||||
}
|
||||
// TODO: review. Might cause bugs in case of multiple PG instances
|
||||
return s, nil
|
||||
}
|
||||
s, err := c.Conn.GetSession(sessionID)
|
||||
|
|
|
|||
|
|
@ -1,11 +1,8 @@
|
|||
package intervals
|
||||
|
||||
const EVENTS_COMMIT_INTERVAL = 30 * 1000
|
||||
const HEARTBEAT_INTERVAL = 2 * 60 * 1000
|
||||
const INTEGRATIONS_REQUEST_INTERVAL = 1 * 60 * 1000
|
||||
const EVENTS_PAGE_EVENT_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_INPUT_EVENT_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_PERFORMANCE_AGGREGATION_TIMEOUT = 2 * 60 * 1000
|
||||
const EVENTS_COMMIT_INTERVAL = 30 * 1000 // как часто комитим сообщения в кафке (ender)
|
||||
const HEARTBEAT_INTERVAL = 2 * 60 * 1000 // максимальный таймаут от трекера в рамках сессии
|
||||
const INTEGRATIONS_REQUEST_INTERVAL = 1 * 60 * 1000 // интеграции
|
||||
const EVENTS_SESSION_END_TIMEOUT = HEARTBEAT_INTERVAL + 30*1000
|
||||
const EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS = HEARTBEAT_INTERVAL + 3*60*1000
|
||||
const EVENTS_BACK_COMMIT_GAP = EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS + 1*60*1000
|
||||
const EVENTS_BACK_COMMIT_GAP = EVENTS_SESSION_END_TIMEOUT_WITH_INTEGRATIONS + 1*60*1000 // для бэк коммита
|
||||
|
|
|
|||
Some files were not shown because too many files have changed in this diff Show more
Loading…
Add table
Reference in a new issue