Chore(release): v1.7.0 (#578)

* change(ui) - redirect to the landing url on SSO login

* fix(ui): fix share popup styles

* change(ui) - non admin user preference restrictions

* fix(ui) - redirect fix

* change(ui) - show installation btn without mouse hover

* feat(api): api-v1 handle wrong projectKey
feat(api): api-v1 get live sessions

* change(ui) - show role edit on hover

* change(ui) - audit trail count with comma

* fix(ui) - audit trail date range custom picker alignment

* change(ui) - show a message when mob file not found

* feat(api): api-v1 fixed search live sessions

* feat(api): api-v1 handle wrong projectKey

* feat(api): fixed assist error response

* fix(tracker): check node scrolls only on start

* fixup! fix(tracker): check node scrolls only on start

* feat(ui/player): scroll view in click map

* feat(ui/player): rm unused check

* New configuration module (#558)

* ci(dbmigrate): Create db migrate when there is change

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui): fix login error/button margins

* fix(ui) - checkbox click

* fix(ui) - search rename and save fixes

* change(ui) - text changes

* fix(ui) - button text nowrap

* fix(ui): fix slowestdomains widget height

* change(ui) - ignore clicks while annotating

* change(ui) - if block with braces

* change(ui) - capitalize first letter in breadcrumb

* feat(db): remove errors from permissions
feat(api): remove errors from permissions

* feat(api): changed reset password response

* fix(ui) - assist active tab list, broken after with new api changes (pagination)

* fix(ui) - assist active tab list, broken after with new api changes (pagination)

* change(ui) - search compare

* fix(ui): last fixes for 1.7

* fix(ui): fix timeline

* fix(ui): small code fixes

* fix(ui): remove unused

* feat(frontend/assist): show when client tab is inactive + fix reconnection status update

* fix(ui) - visibility settings

* feat(assist): refactored extractSessionInfo
feat(assist): hardcoded session's attributes

* Added snabbdom (JS)

* fix(tracker): version check works with x.x.x-beta versions

* fix(backend): keep the highest user's timestamp instead of the latest message timestamp for correct session duration value

* feat(backend/s3): added file tag RETENTION (#561)

* change(ui) - search optimization and autocomplete improvements

* feat(backend/assets): added new metrics assets_downloaded

* change(ui) - show back the date range in bookmarks since the api is filtering by daterange

* feat(backend-assets): custom headers for cacher requests

* chore(backend): no tidy in dockerfile (local build speed up)

* feat(backend/assets): added proxy support for cacher module

* feat(backend/storage): set retention env variable as not required

* fix(ui): fix jira issues

* ci(helm): use kubectl for deployment

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(tracker):3.5.13: performance improvements for a case of extensive dom

* fix(backend): added missed err var and continue statement

* ci(helm): forcing namespace

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(api): fixed slowest_domains query

* ci(helm): update helm deployment method

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* change(ui) - filter dropdown colros

* fix(ui) - speed index location avg attribute changed to value

* ci(api): enable kubectl apply

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui) - widget y axis label

* feat(api): fixed slowest_domains query

* chore(helm): Adding namespaces to all templates (#565)

* feat(api): assist type-autocomplete

* feat(api): assist global-autocomplete

* feat(sourcemaps): include wasm file in build

* feat(sourcemaps-reader): refactored

* fix(ui): fix data for funnels

* fix(ui): fix all sessions section margin

* fix(ui) - assist loader flag

* fix(ui) - assist loader flag

* fix(ui): fix weird check

* feat(api): autocomplete accept unsupported types

* feat(ui): migrate to yarn v3

* feat(ui): minor fixes for installment

* feat(ui): add typescript plugin to yarn

* chore(helm): Ability to override image registry

* chore(helm): Overriding openreplay docker registry

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui): fix control arrows on firefox

* feat(crons): EE crons

* feat(api): fixed build script

* feat(alerts): fixed build script

* feat(crons): fixed build script

* chore(helm): Updating cron version

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(crons): changes

* chore(helm): optional minio ingress

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(crons): fix build script
feat(alerts): fix build script

* Revert "chore(helm): Updating cron version"

This reverts commit 3ca190ea2f.

* feat(crons): fix build script

* feat(crons): fix Dockerfile

* feat(api): fixed metadata change-case

* change(ui) - remove capitalize for the meta value

* change(ui) - autocomplete improvements with custom textfield

* fix(tracker):3.5.13+:reuse metadata on internal-caused restarts

* fix(tracker-assist):3.5.13:send active:true on start; scroll behavior fix

* change(ui) - filters autocomplete blur on pressing Enter key

* fix(tracker): fix node v to lower

* fix(tracker): fix deps

* fix(tracker): fix deps

* fix(ui) - dashboard modal width

* change(ui) - filter dropdown overflow

* chore(helm): clickhouse reclaim polity to retain

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(tracker): fix engine max v

* fix(ui): load metadata in assist tab for sorting

* fix(ui): rm unneeded api call

* fix(tracker): build script to cjs

* change(ui) - removed sample data

* chore(tracker): remove upper node version limit

* Updating Beacon size

Beacon size should be <= QUEUE_MESSAGE_SIZE_LIMIT

* feat(crons): run 24/7
feat(alerts): support env-file override

* feat(api): changed EE env handler

* fix(ui): fix sessions search modal

* change(ui) - margin for error message

* change(ui) - disable assist sort when there are no meta options to choose

* chore(helm): Adding utilities service namespace

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui) - dashboard date range selection reload, metric not found message

* change(ui) - disable clearsearch in assist when there are no filters\

* feat(api): fixed EE env handler

* chore(helm): Adding migration namespaces

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui) - report logo path

* chore(helm): Removing unnecessary SA

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(api): changed EE env handler

* feat(api): changed EE env handler

* feat(api): changed EE env handler

* feat(api): changed EE env handler

* feat(crons): changed crons

* feat(api): accept wrong metric_id

* feat(crons): changed env handler
feat(api): changed env handler
feat(alerts): changed env handler

* feat(utilities): support old version of nodejs

* feat(crons): changed env handler
feat(api): changed env handler
feat(alerts): changed env handler

* fix(tracker): fix srcset tracking

* chore(build): Adding frontent

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(assist): changed general helper

* feat(assist): changed general helper

* fix(ui): fix widget pagination (#570)

* feat(crons): changed entrypoint

* feat(player): dev-log on skipping message

* fix(tracker): removeNode mutation priority over attributes

* fix(tracker): capture relative img timing;use startsWith instead of substr; codestyle fix

* chore(build): fixing api build script

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* chore(ci): faster deployment

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* change(ui) - assist list show active status

* chore(actions): option to build all/specific services in GH

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui) - slowest domain metric data as per the api changes

* ci(helm): updated variable name

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(backend): cherrypick changes to ee

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(backend): disabled pprof in http service

* fix(ui) - TimeToRender avg value as per the API change

* fix(ui) - ResponseTimeDistribution avg value as per the API change

* fix(ui) - MemoryConsumption avg value as per the API change

* fix(ui) - ResponseTime avg value as per the API change

* fix(ui) - DomBuildTime avg value as per the API change

* fix(ui) - FrameRate avg value as per the API change

* chore(helm): proper default tag

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(backend): removed sensitive information from http logs

* ci(backend): adding default parameter value for workflow dispatch

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(backend): deleted empty file

* fix(actions): creating image source file prior

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(helm): variable substitution

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* change(ui) - project list item installation button text change

* fix(ui) - project create validation

* fix(backend): removed unsafe string logs in http service

* chore(kafka): Adding new topic

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(efs-cron): variable name

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui) - developer tools - hint links

* fix(ui) - session filters - country and platform dropdown values

* chore(helm): updating version

* chore(kafka): Update kafka default message size while provisioning

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(tracker): fix dependency security

* change(ui) - webhook delete confirmation

* change(ui) - assist url to handle when empty

* feat(api): autocomplete replace console with errors
feat(DB): clean extra files

* chore(helm): Adding cron jobs

* change(ui) - set changed flag to false after the metric delete to avoid prompt

* chore(helm): enbaling cron only for ee

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(api): autocomplete remove console

* change(ui) - removed Console filter type

* fix(ui) - timeline position

* fix(helm): RFC naming

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui): let user change project in dashboards and select default dashboard

* chore(helm): update registry url

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(DB): return pages_count to DB

* fix(ui) - account settings opt out checkbox

* fix(ui): fix modal width

* fix(ui) - explore circle bg

* fix(ui) - user name overlap

* fix(ui) - empty dashboards create button

* fix(ui): fix timeline position cursor for safari

* fix(ui) - custom metrics errors modal url reset on close

* fix(ui) - onboarding check for siteId

* change(ui) - tracker version

* Update local_deploy.sh

* fix(ui) - drilldown timestamp

* fix(tracker): fix deps for assist

* fix(tracker): update peerjs library

* fix(tracker): update assist v

* fix(tracker): fix type error

* fix(backend): no missing resource relying on resource zero-timing

* Update tracker to v3.5.15

* chore(helm): Adding CSP override variable.

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(backend): added pem file support for kafka ssl setup

* feat(backend): added useBatch setup for kafka producer

* ci(backend): set verbose logging

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(backend): using setKey instead of direct writes

* ci(backend): fix error code

* ci(deploy): Updating the image registry

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* feat(api): changed get user id alias

* ci(frontent): removing depricated steps

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(fix): variable replace

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* ci(helm): creating image image_override

Signed-off-by: rjshrjndrn <rjshrjndrn@gmail.com>

* fix(ui): fix timezone settings

* Added failover mechanism for storage service (#576)

* fix(ui): fix typescript config to remove array iterator error

* fix(ui): refactor timezone settings store/comp

* feat(snippet): opensource snippet

* feat(assist): support multiple IPs

* fix(ui): fix type errors in select /timezones fix

* feat(backend): set size of first part of sessions at 500kb

* change(ui) - removed logs

* fix(ui) - custom metric errors reset url on modal close

* feat(DB): no funnel migration

* fix(ui): fix screensize bug

* feat(DB): migrate super old funnels support

* changed db-migration workflow

Co-authored-by: Shekar Siri <sshekarsiri@gmail.com>
Co-authored-by: sylenien <nikita@openreplay.com>
Co-authored-by: Alex Kaminskii <alex@openreplay.com>
Co-authored-by: Alexander <zavorotynskiy@pm.me>
Co-authored-by: rjshrjndrn <rjshrjndrn@gmail.com>
Co-authored-by: Mehdi Osman <estradino@users.noreply.github.com>
Co-authored-by: Alexander <alexander@openreplay.com>
Co-authored-by: Rajesh Rajendran <rjshrjndrn@users.noreply.github.com>
Co-authored-by: Delirium <sylenien@gmail.com>
This commit is contained in:
Kraiem Taha Yassine 2022-07-07 18:44:43 +02:00 committed by GitHub
parent 2a6ca31691
commit 16c70044fb
No known key found for this signature in database
GPG key ID: 4AEE18F83AFDEB23
1848 changed files with 38096 additions and 94967 deletions

View file

@ -33,27 +33,65 @@ jobs:
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pusing api image
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
IMAGE_TAG: ${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
cd api
PUSH_IMAGE=1 bash build.sh ee
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app chalice
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/chalice/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set skipMigration=true --no-hooks
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
@ -61,6 +99,6 @@ jobs:
# uses: mxschmitt/action-tmate@v3
# env:
# DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# IMAGE_TAG: ee-${{ github.sha }}
# IMAGE_TAG: ${{ github.sha }}-ee
# ENVIRONMENT: staging
#

View file

@ -3,7 +3,7 @@ on:
workflow_dispatch:
push:
branches:
- api-v1.6.1
- dev
paths:
- api/**
@ -32,6 +32,12 @@ jobs:
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# Caching docker images
- uses: satackey/action-docker-layer-caching@v0.0.11
# Ignore the failure of a step and avoid terminating the job.
continue-on-error: true
- name: Building and Pusing api image
id: build-image
env:
@ -41,15 +47,47 @@ jobs:
run: |
cd api
PUSH_IMAGE=1 bash build.sh
- name: Creating old image input
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploy to kubernetes
run: |
cd scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
bash kube-install.sh --app chalice
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
# Update changed image tag
sed -i "/chalice/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,chalice} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,chalice} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}

151
.github/workflows/db-migrate.yaml vendored Normal file
View file

@ -0,0 +1,151 @@
name: Database migration Deployment
on:
workflow_dispatch:
push:
branches:
- dev
paths:
- ee/scripts/helm/db/init_dbs/**
- scripts/helm/db/init_dbs/**
# Disable previous workflows for this action.
concurrency:
group: ${{ github.workflow }} #-${{ github.ref }}
cancel-in-progress: false
jobs:
db-migration:
runs-on: ubuntu-latest
steps:
- name: Checkout
uses: actions/checkout@v2
with:
# We need to diff with old commit
# to see which workers got changed.
fetch-depth: 2
- name: Checking whether migration is needed for OSS
id: check-migration
run: |-
[[ `git --no-pager diff --name-only HEAD HEAD~1 | grep -E "scripts/helm/db/init_dbs" | grep -vE ^ee/` ]] || echo "::set-output name=skip_migration_oss::true"
- uses: azure/k8s-set-context@v1
if: ${{ steps.check-migration.outputs.skip_migration_oss != 'true' }}
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Creating old image input
if: ${{ steps.check-migration.outputs.skip_migration_oss != 'true' }}
run: |
set -x
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
tag: ${image_array[1]}
EOF
done
- name: Deploy to kubernetes foss
if: ${{ steps.check-migration.outputs.skip_migration_oss != 'true' }}
run: |
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --atomic --set forceMigration=true --set dbMigrationUpstreamBranch=${IMAGE_TAG}
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
### Enterprise code deployment
- name: cleaning old assets
run: |
rm -rf /tmp/image_*
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontextee
- name: Creating old image input
env:
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Create yaml with existing image tags
#
kubectl get pods -n app -o jsonpath="{.items[*].spec.containers[*].image}" |\
tr -s '[[:space:]]' '\n' | sort | uniq -c | grep '/foss/' | cut -d '/' -f3 > /tmp/image_tag.txt
echo > /tmp/image_override.yaml
for line in `cat /tmp/image_tag.txt`;
do
image_array=($(echo "$line" | tr ':' '\n'))
cat <<EOF >> /tmp/image_override.yaml
${image_array[0]}:
image:
# We've to strip off the -ee, as helm will append it.
tag: `echo ${image_array[1]} | cut -d '-' -f 1`
EOF
done
- name: Resetting vars file
run: |
git checkout -- scripts/helmcharts/vars.yaml
- name: Deploy to kubernetes ee
run: |
cd scripts/helmcharts/
## Update secerts
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
cat /tmp/image_override.yaml
# Deploy command
helm upgrade --install openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --atomic --set forceMigration=true --set dbMigrationUpstreamBranch=${IMAGE_TAG}
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}
# uses: mxschmitt/action-tmate@v3
# env:
# AWS_ACCESS_KEY_ID: ${{ secrets.AWS_ACCESS_KEY_ID }}
# AWS_SECRET_ACCESS_KEY: ${{ secrets.AWS_SECRET_ACCESS_KEY }}
# AWS_REGION: eu-central-1
# AWS_S3_BUCKET_NAME: ${{ secrets.AWS_S3_BUCKET_NAME }}

View file

@ -6,6 +6,10 @@ on:
- dev
paths:
- frontend/**
# Disable previous workflows for this action.
concurrency:
group: ${{ github.workflow }} #-${{ github.ref }}
cancel-in-progress: true
jobs:
build:
@ -23,24 +27,111 @@ jobs:
${{ runner.OS }}-build-
${{ runner.OS }}-
- name: Docker login
run: |
docker login ${{ secrets.EE_REGISTRY_URL }} -u ${{ secrets.EE_DOCKER_USERNAME }} -p "${{ secrets.EE_REGISTRY_TOKEN }}"
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
# - name: Install
# run: npm install
- name: Build and deploy
- name: Building and Pushing frontend image
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
run: |
set -x
cd frontend
bash build.sh
cp -arl public frontend
minio_pod=$(kubectl get po -n db -l app.kubernetes.io/name=minio -n db --output custom-columns=name:.metadata.name | tail -n+2)
echo $minio_pod
echo copying frontend to container.
kubectl -n db cp frontend $minio_pod:/data/
rm -rf frontend
mv .env.sample .env
docker run --rm -v /etc/passwd:/etc/passwd -u `id -u`:`id -g` -v $(pwd):/home/${USER} -w /home/${USER} --name node_build node:14-stretch-slim /bin/bash -c "yarn && yarn build"
# https://github.com/docker/cli/issues/1134#issuecomment-613516912
DOCKER_BUILDKIT=1 docker build --target=cicd -t $DOCKER_REPO/frontend:${IMAGE_TAG} .
docker tag $DOCKER_REPO/frontend:${IMAGE_TAG} $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}
docker push $DOCKER_REPO/frontend:${IMAGE_TAG}-ee
- name: Deploy to kubernetes foss
run: |
cd scripts/helmcharts/
set -x
cat <<EOF>>/tmp/image_override.yaml
frontend:
image:
tag: ${IMAGE_TAG}
EOF
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
# Update changed image tag
sed -i "/frontend/{n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,frontend} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,frontend} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
### Enterprise code deployment
- uses: azure/k8s-set-context@v1
with:
method: kubeconfig
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontextee
- name: Resetting vars file
run: |
git checkout -- scripts/helmcharts/vars.yaml
- name: Deploy to kubernetes ee
run: |
cd scripts/helmcharts/
cat <<EOF>/tmp/image_override.yaml
frontend:
image:
# We've to strip off the -ee, as helm will append it.
tag: ${IMAGE_TAG}
EOF
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
# Update changed image tag
sed -i "/frontend/{n;n;n;s/.*/ tag: ${IMAGE_TAG}/}" /tmp/image_override.yaml
cat /tmp/image_override.yaml
# Deploy command
mv openreplay/charts/{ingress-nginx,frontend} /tmp
rm -rf openreplay/charts/*
mv /tmp/{ingress-nginx,frontend} openreplay/charts/
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true --no-hooks | kubectl apply -n app -f -
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
ENVIRONMENT: staging
# - name: Debug Job
# if: ${{ failure() }}

View file

@ -44,6 +44,7 @@ jobs:
- name: Deploy to kubernetes
run: |
cd scripts/helm/
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml

View file

@ -2,6 +2,11 @@
on:
workflow_dispatch:
inputs:
build_service:
description: 'Name of a single service to build(in small letters). "all" to build everything'
required: false
default: 'false'
push:
branches:
- dev
@ -35,11 +40,16 @@ jobs:
kubeconfig: ${{ secrets.EE_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Build, tag, and Deploy to k8s
# # Caching docker images
# - uses: satackey/action-docker-layer-caching@v0.0.11
# # Ignore the failure of a step and avoid terminating the job.
# continue-on-error: true
- name: Build, tag
id: build-image
env:
DOCKER_REPO: ${{ secrets.EE_REGISTRY_URL }}
IMAGE_TAG: ee-${{ github.sha }}
IMAGE_TAG: ${{ github.sha }}-ee
ENVIRONMENT: staging
run: |
#
@ -49,34 +59,85 @@ jobs:
#
# Getting the images to build
#
git diff --name-only HEAD HEAD~1 | grep backend/services | cut -d '/' -f3 | uniq > backend/images_to_build.txt
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
set -x
touch /tmp/images_to_build.txt
tmp_param=${{ github.event.inputs.build_service }}
build_param=${tmp_param:-'false'}
case ${build_param} in
false)
{
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > /tmp/images_to_build.txt
;;
all)
ls backend/cmd > /tmp/images_to_build.txt
;;
*)
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
;;
esac
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
#
# Pushing image to registry
#
cd backend
for image in $(cat images_to_build.txt);
for image in $(cat /tmp/images_to_build.txt);
do
echo "Bulding $image"
PUSH_IMAGE=1 bash -x ./build.sh ee $image
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
done
- name: Deploying to kuberntes
env:
# We're not passing -ee flag, because helm will add that.
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Deploying image to environment.
#
cd ../scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.EE_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#jwt_secret_key.*#jwt_secret_key: \"${{ secrets.EE_JWT_SECRET }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
for image in $(cat ../../backend/images_to_build.txt);
set -x
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.EE_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.EE_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.EE_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.EE_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.EE_DOMAIN_NAME }}\"/g" vars.yaml
sed -i "s/enterpriseEditionLicense: \"\"/enterpriseEditionLicense: \"${{ secrets.EE_LICENSE_KEY }}\"/g" vars.yaml
set -x
echo > /tmp/image_override.yaml
mkdir /tmp/helmcharts
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
## Update images
for image in $(cat /tmp/images_to_build.txt);
do
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
# Deploy command
bash openreplay-cli --install $image
mv openreplay/charts/$image /tmp/helmcharts/
cat <<EOF>>/tmp/image_override.yaml
${image}:
image:
# We've to strip off the -ee, as helm will append it.
tag: ${IMAGE_TAG}
EOF
done
ls /tmp/helmcharts
rm -rf openreplay/charts/*
ls openreplay/charts
mv /tmp/helmcharts/* openreplay/charts/
ls openreplay/charts
cat /tmp/image_override.yaml
# Deploy command
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
# - name: Debug Job
# if: ${{ failure() }}

View file

@ -2,6 +2,11 @@
on:
workflow_dispatch:
inputs:
build_service:
description: 'Name of a single service to build(in small letters). "all" to build everything'
required: false
default: 'false'
push:
branches:
- dev
@ -34,7 +39,13 @@ jobs:
kubeconfig: ${{ secrets.OSS_KUBECONFIG }} # Use content of kubeconfig in secret.
id: setcontext
- name: Build, tag, and Deploy to k8s
# Caching docker images
# - uses: satackey/action-docker-layer-caching@v0.0.11
# # Ignore the failure of a step and avoid terminating the job.
# continue-on-error: true
- name: Build, tag
id: build-image
env:
DOCKER_REPO: ${{ secrets.OSS_REGISTRY_URL }}
@ -48,41 +59,82 @@ jobs:
#
# Getting the images to build
#
set -x
touch /tmp/images_to_build.txt
tmp_param=${{ github.event.inputs.build_service }}
build_param=${tmp_param:-'false'}
case ${build_param} in
false)
{
git diff --name-only HEAD HEAD~1 | grep backend/services | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/cmd|backend/services" | grep -vE ^ee/ | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep backend/pkg | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services | cut -d '/' -f3
git diff --name-only HEAD HEAD~1 | grep -E "backend/pkg|backend/internal" | grep -vE ^ee/ | cut -d '/' -f3 | uniq | while read -r pkg_name ; do
grep -rl "pkg/$pkg_name" backend/services backend/cmd | cut -d '/' -f3
done
} | uniq > backend/images_to_build.txt
} | uniq > /tmp/images_to_build.txt
;;
all)
ls backend/cmd > /tmp/images_to_build.txt
;;
*)
echo ${{github.event.inputs.build_service }} > /tmp/images_to_build.txt
;;
esac
[[ $(cat backend/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 0)
[[ $(cat /tmp/images_to_build.txt) != "" ]] || (echo "Nothing to build here"; exit 1)
#
# Pushing image to registry
#
cd backend
for image in $(cat images_to_build.txt);
for image in $(cat /tmp/images_to_build.txt);
do
echo "Bulding $image"
PUSH_IMAGE=1 bash -x ./build.sh skip $image
echo "::set-output name=image::$DOCKER_REPO/$image:$IMAGE_TAG"
done
- name: Deploying to kuberntes
env:
IMAGE_TAG: ${{ github.sha }}
run: |
#
# Deploying image to environment.
#
cd ../scripts/helm/
sed -i "s#minio_access_key.*#minio_access_key: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\" #g" vars.yaml
sed -i "s#minio_secret_key.*#minio_secret_key: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\" #g" vars.yaml
sed -i "s#domain_name.*#domain_name: \"foss.openreplay.com\" #g" vars.yaml
sed -i "s#kubeconfig.*#kubeconfig_path: ${KUBECONFIG}#g" vars.yaml
for image in $(cat ../../backend/images_to_build.txt);
cd scripts/helmcharts/
## Update secerts
sed -i "s#openReplayContainerRegistry.*#openReplayContainerRegistry: \"${{ secrets.OSS_REGISTRY_URL }}\"#g" vars.yaml
sed -i "s/postgresqlPassword: \"changeMePassword\"/postgresqlPassword: \"${{ secrets.OSS_PG_PASSWORD }}\"/g" vars.yaml
sed -i "s/accessKey: \"changeMeMinioAccessKey\"/accessKey: \"${{ secrets.OSS_MINIO_ACCESS_KEY }}\"/g" vars.yaml
sed -i "s/secretKey: \"changeMeMinioPassword\"/secretKey: \"${{ secrets.OSS_MINIO_SECRET_KEY }}\"/g" vars.yaml
sed -i "s/jwt_secret: \"SetARandomStringHere\"/jwt_secret: \"${{ secrets.OSS_JWT_SECRET }}\"/g" vars.yaml
sed -i "s/domainName: \"\"/domainName: \"${{ secrets.OSS_DOMAIN_NAME }}\"/g" vars.yaml
set -x
echo > /tmp/image_override.yaml
mkdir /tmp/helmcharts
mv openreplay/charts/ingress-nginx /tmp/helmcharts/
## Update images
for image in $(cat /tmp/images_to_build.txt);
do
sed -i "s/image_tag:.*/image_tag: \"$IMAGE_TAG\"/g" vars.yaml
# Deploy command
bash kube-install.sh --app $image
mv openreplay/charts/$image /tmp/helmcharts/
cat <<EOF>>/tmp/image_override.yaml
${image}:
image:
# We've to strip off the -ee, as helm will append it.
tag: ${IMAGE_TAG}
EOF
done
ls /tmp/helmcharts
rm -rf openreplay/charts/*
ls openreplay/charts
mv /tmp/helmcharts/* openreplay/charts/
ls openreplay/charts
cat /tmp/image_override.yaml
# Deploy command
helm template openreplay -n app openreplay -f vars.yaml -f /tmp/image_override.yaml --set ingress-nginx.enabled=false --set skipMigration=true | kubectl apply -f -
# - name: Debug Job
# if: ${{ failure() }}

6
api/.dockerignore Normal file
View file

@ -0,0 +1,6 @@
# ignore .git and .cache folders
.git
.cache
**/build.sh
**/build_*.sh
**/*deploy.sh

View file

@ -1,26 +1,33 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env
ENV APP_NAME chalice
# Add Tini
# Startup daemon
ENV TINI_VERSION=v0.19.0 \
SOURCE_MAP_VERSION=0.7.4
ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
ADD https://unpkg.com/source-map@${SOURCE_MAP_VERSION}/lib/mappings.wasm /mappings.wasm
RUN chmod +x /tini
# Installing Nodejs
RUN apt update && apt install -y curl && \
curl -fsSL https://deb.nodesource.com/setup_12.x | bash - && \
apt install -y nodejs && \
apt remove --purge -y curl && \
rm -rf /var/lib/apt/lists/* && \
cd sourcemap-reader && \
npm install
rm -rf /var/lib/apt/lists/*
WORKDIR /work_tmp
COPY requirements.txt /work_tmp/requirements.txt
RUN pip install -r /work_tmp/requirements.txt
COPY sourcemap-reader/*.json /work_tmp/
RUN cd /work_tmp && npm install
WORKDIR /work
COPY . .
RUN mv env.default .env && mv /work_tmp/node_modules sourcemap-reader/.
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh

View file

@ -1,14 +1,9 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
LABEL Maintainer="KRAIEM Taha Yassine<tahayk2@gmail.com>"
WORKDIR /work
COPY . .
RUN pip install -r requirements.txt
RUN mv .env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
ENV APP_NAME alerts
ENV pg_minconn 2
ENV pg_maxconn 10
ENV APP_NAME alerts
# Add Tini
# Startup daemon
ENV TINI_VERSION v0.19.0
@ -16,5 +11,13 @@ ARG envarg
ENV ENTERPRISE_BUILD ${envarg}
ADD https://github.com/krallin/tini/releases/download/${TINI_VERSION}/tini /tini
RUN chmod +x /tini
COPY requirements.txt /work_tmp/requirements.txt
RUN pip install -r /work_tmp/requirements.txt
WORKDIR /work
COPY . .
RUN mv env.default .env && mv app_alerts.py app.py && mv entrypoint_alerts.sh entrypoint.sh
ENTRYPOINT ["/tini", "--"]
CMD ./entrypoint.sh

View file

@ -1,4 +1,4 @@
FROM python:3.9.10-slim
FROM python:3.9.12-slim
LABEL Maintainer="Rajesh Rajendran<rjshrjndrn@gmail.com>"
WORKDIR /work
COPY . .

View file

@ -12,9 +12,9 @@ envarg="default-foss"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
exit 1
}
[[ exit -eq 1 ]] && exit 1
return
}
function build_api(){
@ -33,8 +33,15 @@ function build_api(){
docker tag ${DOCKER_REPO:-'local'}/chalice:${git_sha1} ${DOCKER_REPO:-'local'}/chalice:${tag}latest
docker push ${DOCKER_REPO:-'local'}/chalice:${tag}latest
}
echo "api docker build completed"
}
check_prereq
build_api $1
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_alerts.sh $1
echo buil_complete
source build_alerts.sh $1
[[ $1 == "ee" ]] && {
IMAGE_TAG=$IMAGE_TAG PUSH_IMAGE=$PUSH_IMAGE DOCKER_REPO=$DOCKER_REPO bash build_crons.sh $1
}
echo "api done"

View file

@ -27,12 +27,12 @@ function make_submodule() {
mkdir -p ./alerts/chalicelib/
cp -R ./chalicelib/__init__.py ./alerts/chalicelib/
mkdir -p ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,dashboard,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
cp -R ./chalicelib/core/{__init__,alerts_processor,alerts_listener,sessions,events,issues,sessions_metas,metadata,projects,users,authorizers,tenants,roles,assist,events_ios,sessions_mobs,errors,metrics,sourcemaps,sourcemaps_parser,resources,performance_event,alerts,notifications,slack,collaboration_slack,webhook}.py ./alerts/chalicelib/core/
mkdir -p ./alerts/chalicelib/utils/
cp -R ./chalicelib/utils/{__init__,TimeUTC,pg_client,helper,event_filter_definition,dev,SAML2_helper,email_helper,email_handler,smtp,s3,args_transformer,ch_client,metrics_helper}.py ./alerts/chalicelib/utils/
# -- end of generated part
}
cp -R ./{Dockerfile.alerts,requirements.txt,.env.default,entrypoint_alerts.sh} ./alerts/
cp -R ./{Dockerfile.alerts,requirements.txt,env.default,entrypoint_alerts.sh} ./alerts/
cp -R ./chalicelib/utils/html ./alerts/chalicelib/utils/html
}
@ -41,9 +41,8 @@ envarg="default-foss"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
exit 1
}
[[ exit -eq 1 ]] && exit 1
}
function build_api(){
@ -64,6 +63,7 @@ function build_api(){
docker tag ${DOCKER_REPO:-'local'}/alerts:${git_sha1} ${DOCKER_REPO:-'local'}/alerts:${tag}latest
docker push ${DOCKER_REPO:-'local'}/alerts:${tag}latest
}
echo "completed alerts build"
}
check_prereq

View file

@ -99,7 +99,7 @@ def Build(a):
j_s = True
if a["seriesId"] is not None:
a["filter"]["sort"] = "session_id"
a["filter"]["order"] = "DESC"
a["filter"]["order"] = schemas.SortOrderType.desc
a["filter"]["startDate"] = -1
a["filter"]["endDate"] = TimeUTC.now()
full_args, query_part = sessions.search_query_parts(

View file

@ -1,6 +1,7 @@
import requests
from decouple import config
import schemas
from chalicelib.core import projects
SESSION_PROJECTION_COLS = """s.project_id,
@ -19,22 +20,40 @@ SESSION_PROJECTION_COLS = """s.project_id,
"""
def get_live_sessions_ws(project_id, user_id=None):
def get_live_sessions_ws_user_id(project_id, user_id):
data = {
"filter": {"userId": user_id} if user_id else {}
}
return __get_live_sessions_ws(project_id=project_id, data=data)
def get_live_sessions_ws(project_id, body: schemas.LiveSessionsSearchPayloadSchema):
data = {
"filter": {},
"pagination": {"limit": body.limit, "page": body.page},
"sort": {"key": body.sort, "order": body.order}
}
for f in body.filters:
if f.type == schemas.LiveFilterType.metadata:
data["filter"][f.source] = f.value
else:
data["filter"][f.type.value] = f.value
return __get_live_sessions_ws(project_id=project_id, data=data)
def __get_live_sessions_ws(project_id, data):
project_key = projects.get_project_key(project_id)
params = {}
if user_id and len(user_id) > 0:
params["userId"] = user_id
try:
connected_peers = requests.get(config("assist") % config("S3_KEY") + f"/{project_key}", params,
connected_peers = requests.post(config("assist") % config("S3_KEY") + f"/{project_key}", json=data,
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
print(connected_peers.text)
return []
return {"total": 0, "sessions": []}
live_peers = connected_peers.json().get("data", [])
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
live_peers = []
live_peers = {"total": 0, "sessions": []}
except Exception as e:
print("issue getting Live-Assist response")
print(str(e))
@ -43,34 +62,55 @@ def get_live_sessions_ws(project_id, user_id=None):
print(connected_peers.text)
except:
print("couldn't get response")
live_peers = []
for s in live_peers:
live_peers = {"total": 0, "sessions": []}
_live_peers = live_peers
if "sessions" in live_peers:
_live_peers = live_peers["sessions"]
for s in _live_peers:
s["live"] = True
s["projectId"] = project_id
live_peers = sorted(live_peers, key=lambda l: l.get("timestamp", 0), reverse=True)
return live_peers
def get_live_session_by_id(project_id, session_id):
all_live = get_live_sessions_ws(project_id)
for l in all_live:
if str(l.get("sessionID")) == str(session_id):
return l
project_key = projects.get_project_key(project_id)
try:
connected_peers = requests.get(config("assist") % config("S3_KEY") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
print(connected_peers.text)
return False
connected_peers = connected_peers.json().get("data")
if connected_peers is None:
return None
connected_peers["live"] = True
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
return None
except Exception as e:
print("issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
print(connected_peers.text)
except:
print("couldn't get response")
return None
return connected_peers
def is_live(project_id, session_id, project_key=None):
if project_key is None:
project_key = projects.get_project_key(project_id)
try:
connected_peers = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}",
connected_peers = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}/{session_id}",
timeout=config("assistTimeout", cast=int, default=5))
if connected_peers.status_code != 200:
print("!! issue with the peer-server")
print(connected_peers.text)
return False
connected_peers = connected_peers.json().get("data", [])
connected_peers = connected_peers.json().get("data")
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
return False
@ -83,7 +123,35 @@ def is_live(project_id, session_id, project_key=None):
except:
print("couldn't get response")
return False
return str(session_id) in connected_peers
return str(session_id) == connected_peers
def autocomplete(project_id, q: str, key: str = None):
project_key = projects.get_project_key(project_id)
params = {"q": q}
if key:
params["key"] = key
try:
results = requests.get(config("assistList") % config("S3_KEY") + f"/{project_key}/autocomplete",
params=params, timeout=config("assistTimeout", cast=int, default=5))
if results.status_code != 200:
print("!! issue with the peer-server")
print(results.text)
return {"errors": [f"Something went wrong wile calling assist:{results.text}"]}
results = results.json().get("data", [])
except requests.exceptions.Timeout:
print("Timeout getting Assist response")
return {"errors": ["Assist request timeout"]}
except Exception as e:
print("issue getting Assist response")
print(str(e))
print("expected JSON, received:")
try:
print(results.text)
except:
print("couldn't get response")
return {"errors": ["Something went wrong wile calling assist"]}
return {"data": results}
def get_ice_servers():

View file

@ -2,7 +2,7 @@ import json
from typing import Union
import schemas
from chalicelib.core import sessions
from chalicelib.core import sessions, funnels, errors, issues
from chalicelib.utils import helper, pg_client
from chalicelib.utils.TimeUTC import TimeUTC
@ -42,7 +42,66 @@ def __try_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
return results
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema):
def __is_funnel_chart(data: schemas.TryCustomMetricsPayloadSchema):
return data.metric_type == schemas.MetricType.funnel
def __get_funnel_chart(project_id, data: schemas.TryCustomMetricsPayloadSchema):
if len(data.series) == 0:
return {
"stages": [],
"totalDropDueToIssues": 0
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
return funnels.get_top_insights_on_the_fly_widget(project_id=project_id, data=data.series[0].filter)
def __is_errors_list(data):
return data.metric_type == schemas.MetricType.table \
and data.metric_of == schemas.TableMetricOfType.errors
def __get_errors_list(project_id, user_id, data):
if len(data.series) == 0:
return {
"total": 0,
"errors": []
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data.series[0].filter.page = data.page
data.series[0].filter.limit = data.limit
return errors.search(data.series[0].filter, project_id=project_id, user_id=user_id)
def __is_sessions_list(data):
return data.metric_type == schemas.MetricType.table \
and data.metric_of == schemas.TableMetricOfType.sessions
def __get_sessions_list(project_id, user_id, data):
if len(data.series) == 0:
print("empty series")
return {
"total": 0,
"sessions": []
}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data.series[0].filter.page = data.page
data.series[0].filter.limit = data.limit
return sessions.search2_pg(data=data.series[0].filter, project_id=project_id, user_id=user_id)
def merged_live(project_id, data: schemas.TryCustomMetricsPayloadSchema, user_id=None):
if __is_funnel_chart(data):
return __get_funnel_chart(project_id=project_id, data=data)
elif __is_errors_list(data):
return __get_errors_list(project_id=project_id, user_id=user_id, data=data)
elif __is_sessions_list(data):
return __get_sessions_list(project_id=project_id, user_id=user_id, data=data)
series_charts = __try_live(project_id=project_id, data=data)
if data.view_type == schemas.MetricTimeseriesViewType.progress or data.metric_type == schemas.MetricType.table:
return series_charts
@ -75,15 +134,22 @@ def make_chart(project_id, user_id, metric_id, data: schemas.CustomMetricChartPa
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
series_charts = __try_live(project_id=project_id, data=metric)
if metric.view_type == schemas.MetricTimeseriesViewType.progress or metric.metric_type == schemas.MetricType.table:
return series_charts
results = [{}] * len(series_charts[0])
for i in range(len(results)):
for j, series_chart in enumerate(series_charts):
results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
metric.series[j].name: series_chart[i]["count"]}
return results
return merged_live(project_id=project_id, data=metric, user_id=user_id)
# if __is_funnel_chart(metric):
# return __get_funnel_chart(project_id=project_id, data=metric)
# elif __is_errors_list(metric):
# return __get_errors_list(project_id=project_id, user_id=user_id, data=metric)
#
# series_charts = __try_live(project_id=project_id, data=metric)
# if metric.view_type == schemas.MetricTimeseriesViewType.progress or metric.metric_type == schemas.MetricType.table:
# return series_charts
# results = [{}] * len(series_charts[0])
# for i in range(len(results)):
# for j, series_chart in enumerate(series_charts):
# results[i] = {**results[i], "timestamp": series_chart[i]["timestamp"],
# metric.series[j].name: series_chart[i]["count"]}
# return results
def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
@ -105,6 +171,38 @@ def get_sessions(project_id, user_id, metric_id, data: schemas.CustomMetricSessi
return results
def get_funnel_issues(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
return {"seriesId": s.series_id, "seriesName": s.name,
**funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter)}
def get_errors_list(project_id, user_id, metric_id, data: schemas.CustomMetricSessionsPayloadSchema):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
return {"seriesId": s.series_id, "seriesName": s.name,
**errors.search(data=s.filter, project_id=project_id, user_id=user_id)}
def try_sessions(project_id, user_id, data: schemas.CustomMetricSessionsPayloadSchema):
results = []
if data.series is None:
@ -130,12 +228,16 @@ def create(project_id, user_id, data: schemas.CreateCustomMetricsSchema, dashboa
_data[f"filter_{i}"] = s.filter.json()
series_len = len(data.series)
data.series = None
params = {"user_id": user_id, "project_id": project_id, **data.dict(), **_data}
params = {"user_id": user_id, "project_id": project_id,
"default_config": json.dumps(data.config.dict()),
**data.dict(), **_data}
query = cur.mogrify(f"""\
WITH m AS (INSERT INTO metrics (project_id, user_id, name, is_public,
view_type, metric_type, metric_of, metric_value, metric_format)
view_type, metric_type, metric_of, metric_value,
metric_format, default_config)
VALUES (%(project_id)s, %(user_id)s, %(name)s, %(is_public)s,
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s, %(metric_format)s)
%(view_type)s, %(metric_type)s, %(metric_of)s, %(metric_value)s,
%(metric_format)s, %(default_config)s)
RETURNING *)
INSERT
INTO metric_series(metric_id, index, name, filter)
@ -396,3 +498,41 @@ def change_state(project_id, metric_id, user_id, status):
{"metric_id": metric_id, "status": status, "user_id": user_id})
)
return get(metric_id=metric_id, project_id=project_id, user_id=user_id)
def get_funnel_sessions_by_issue(user_id, project_id, metric_id, issue_id,
data: schemas.CustomMetricSessionsPayloadSchema
# , range_value=None, start_date=None, end_date=None
):
metric = get(metric_id=metric_id, project_id=project_id, user_id=user_id, flatten=False)
if metric is None:
return None
metric: schemas.CreateCustomMetricsSchema = __merge_metric_with_data(metric=metric, data=data)
if metric is None:
return None
for s in metric.series:
s.filter.startDate = data.startTimestamp
s.filter.endDate = data.endTimestamp
s.filter.limit = data.limit
s.filter.page = data.page
issues_list = funnels.get_issues_on_the_fly_widget(project_id=project_id, data=s.filter).get("issues", {})
issues_list = issues_list.get("significant", []) + issues_list.get("insignificant", [])
issue = None
for i in issues_list:
if i.get("issueId", "") == issue_id:
issue = i
break
if issue is None:
issue = issues.get(project_id=project_id, issue_id=issue_id)
if issue is not None:
issue = {**issue,
"affectedSessions": 0,
"affectedUsers": 0,
"conversionImpact": 0,
"lostConversions": 0,
"unaffectedSessions": 0}
return {"seriesId": s.series_id, "seriesName": s.name,
"sessions": sessions.search2_pg(user_id=user_id, project_id=project_id,
issue=issue, data=s.filter)
if issue is not None else {"total": 0, "sessions": []},
"issue": issue}

View file

@ -6,8 +6,9 @@ from chalicelib.utils import helper
from chalicelib.utils import pg_client
from chalicelib.utils.TimeUTC import TimeUTC
# category name should be lower cased
CATEGORY_DESCRIPTION = {
'overview': 'High-level metrics and web vitals.',
'web vitals': 'A set of metrics that assess app performance on criteria such as load time, load performance, and stability.',
'custom': 'Previously created custom metrics by me and my team.',
'errors': 'Keep a closer eye on errors and track their type, origin and domain.',
'performance': 'Optimize your apps performance by tracking slow domains, page response times, memory consumption, CPU usage and more.',
@ -33,20 +34,20 @@ def get_templates(project_id, user_id):
cur.execute(pg_query)
rows = cur.fetchall()
for r in rows:
r["description"] = CATEGORY_DESCRIPTION.get(r["category"], "")
r["description"] = CATEGORY_DESCRIPTION.get(r["category"].lower(), "")
for w in r["widgets"]:
w["created_at"] = TimeUTC.datetime_to_timestamp(w["created_at"])
w["edited_at"] = TimeUTC.datetime_to_timestamp(w["edited_at"])
for s in w["series"]:
s["created_at"] = TimeUTC.datetime_to_timestamp(s["created_at"])
s["filter"] = helper.old_search_payload_to_flat(s["filter"])
return helper.list_to_camel_case(rows)
def create_dashboard(project_id, user_id, data: schemas.CreateDashboardSchema):
with pg_client.PostgresClient() as cur:
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned)
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s)
pg_query = f"""INSERT INTO dashboards(project_id, user_id, name, is_public, is_pinned, description)
VALUES(%(projectId)s, %(userId)s, %(name)s, %(is_public)s, %(is_pinned)s, %(description)s)
RETURNING *"""
params = {"userId": user_id, "projectId": project_id, **data.dict()}
if data.metrics is not None and len(data.metrics) > 0:
@ -137,7 +138,8 @@ def update_dashboard(project_id, user_id, dashboard_id, data: schemas.EditDashbo
row = cur.fetchone()
offset = row["count"]
pg_query = f"""UPDATE dashboards
SET name = %(name)s
SET name = %(name)s,
description= %(description)s
{", is_public = %(is_public)s" if data.is_public is not None else ""}
{", is_pinned = %(is_pinned)s" if data.is_pinned is not None else ""}
WHERE dashboards.project_id = %(projectId)s

View file

@ -425,10 +425,9 @@ def __get_sort_key(key):
def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
empty_response = {"data": {
'total': 0,
empty_response = {'total': 0,
'errors': []
}}
}
platform = None
for f in data.filters:
@ -437,6 +436,8 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
pg_sub_query = __get_basic_constraints(platform, project_key="sessions.project_id")
pg_sub_query += ["sessions.start_ts>=%(startDate)s", "sessions.start_ts<%(endDate)s", "source ='js_exception'",
"pe.project_id=%(project_id)s"]
# To ignore Script error
pg_sub_query.append("pe.message!='Script error.'")
pg_sub_query_chart = __get_basic_constraints(platform, time_constraint=False, chart=True, project_key=None)
# pg_sub_query_chart.append("source ='js_exception'")
pg_sub_query_chart.append("errors.error_id =details.error_id")
@ -463,7 +464,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
sort = __get_sort_key('datetime')
if data.sort is not None:
sort = __get_sort_key(data.sort)
order = "DESC"
order = schemas.SortOrderType.desc
if data.order is not None:
order = data.order
extra_join = ""
@ -544,7 +545,7 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
rows = cur.fetchall()
total = 0 if len(rows) == 0 else rows[0]["full_count"]
if flows:
return {"data": {"count": total}}
return {"count": total}
if total == 0:
rows = []
@ -592,11 +593,9 @@ def search(data: schemas.SearchErrorsSchema, project_id, user_id, flows=False):
and (r["message"].lower() != "script error." or len(r["stack"][0]["absPath"]) > 0))]
offset -= len(rows)
return {
"data": {
'total': total - offset,
'errors': helper.list_to_camel_case(rows)
}
}
def __save_stacktrace(error_id, data):

View file

@ -28,8 +28,8 @@ def __merge_cells(rows, start, count, replacement):
return rows
def __get_grouped_clickrage(rows, session_id):
click_rage_issues = issues.get_by_session_id(session_id=session_id, issue_type="click_rage")
def __get_grouped_clickrage(rows, session_id, project_id):
click_rage_issues = issues.get_by_session_id(session_id=session_id, issue_type="click_rage", project_id=project_id)
if len(click_rage_issues) == 0:
return rows
@ -63,7 +63,7 @@ def get_by_sessionId2_pg(session_id, project_id, group_clickrage=False):
)
rows = cur.fetchall()
if group_clickrage:
rows = __get_grouped_clickrage(rows=rows, session_id=session_id)
rows = __get_grouped_clickrage(rows=rows, session_id=session_id, project_id=project_id)
cur.execute(cur.mogrify("""
SELECT
@ -435,7 +435,15 @@ def __get_autocomplete_table(value, project_id):
query = cur.mogrify(" UNION ".join(sub_queries) + ";",
{"project_id": project_id, "value": helper.string_to_sql_like(value),
"svalue": helper.string_to_sql_like("^" + value)})
try:
cur.execute(query)
except Exception as err:
print("--------- AUTOCOMPLETE SEARCH QUERY EXCEPTION -----------")
print(query.decode('UTF-8'))
print("--------- VALUE -----------")
print(value)
print("--------------------")
raise err
results = helper.list_to_camel_case(cur.fetchall())
return results

View file

@ -251,6 +251,26 @@ def get_top_insights_on_the_fly(funnel_id, user_id, project_id, data: schemas.Fu
"totalDropDueToIssues": total_drop_due_to_issues}}
# def get_top_insights_on_the_fly_widget(project_id, data: schemas.FunnelInsightsPayloadSchema):
def get_top_insights_on_the_fly_widget(project_id, data: schemas.CustomMetricSeriesFilterSchema):
data.events = filter_stages(__parse_events(data.events))
data.events = __fix_stages(data.events)
if len(data.events) == 0:
return {"stages": [], "totalDropDueToIssues": 0}
insights, total_drop_due_to_issues = significance.get_top_insights(filter_d=data.dict(), project_id=project_id)
insights = helper.list_to_camel_case(insights)
if len(insights) > 0:
# TODO: check if this correct
if total_drop_due_to_issues > insights[0]["sessionsCount"]:
if len(insights) == 0:
total_drop_due_to_issues = 0
else:
total_drop_due_to_issues = insights[0]["sessionsCount"] - insights[-1]["sessionsCount"]
insights[-1]["dropDueToIssues"] = total_drop_due_to_issues
return {"stages": insights,
"totalDropDueToIssues": total_drop_due_to_issues}
def get_issues(project_id, user_id, funnel_id, range_value=None, start_date=None, end_date=None):
f = get(funnel_id=funnel_id, project_id=project_id, user_id=user_id, flatten=False)
if f is None:
@ -280,6 +300,19 @@ def get_issues_on_the_fly(funnel_id, user_id, project_id, data: schemas.FunnelSe
last_stage=len(data.events)))}
# def get_issues_on_the_fly_widget(project_id, data: schemas.FunnelSearchPayloadSchema):
def get_issues_on_the_fly_widget(project_id, data: schemas.CustomMetricSeriesFilterSchema):
data.events = filter_stages(data.events)
data.events = __fix_stages(data.events)
if len(data.events) < 0:
return {"issues": []}
return {
"issues": helper.dict_to_camel_case(
significance.get_issues_list(filter_d=data.dict(), project_id=project_id, first_stage=1,
last_stage=len(data.events)))}
def get(funnel_id, project_id, user_id, flatten=True, fix_stages=True):
with pg_client.PostgresClient() as cur:
cur.execute(

View file

@ -41,19 +41,23 @@ def get(project_id, issue_id):
)
cur.execute(query=query)
data = cur.fetchone()
if data is not None:
data["title"] = helper.get_issue_title(data["type"])
return helper.dict_to_camel_case(data)
def get_by_session_id(session_id, issue_type=None):
def get_by_session_id(session_id, project_id, issue_type=None):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify(f"""\
SELECT *
FROM events_common.issues
INNER JOIN public.issues USING (issue_id)
WHERE session_id = %(session_id)s {"AND type = %(type)s" if issue_type is not None else ""}
WHERE session_id = %(session_id)s
AND project_id= %(project_id)s
{"AND type = %(type)s" if issue_type is not None else ""}
ORDER BY timestamp;""",
{"session_id": session_id, "type": issue_type})
{"session_id": session_id, "project_id": project_id, "type": issue_type})
)
return helper.list_to_camel_case(cur.fetchall())

View file

@ -1,21 +1,9 @@
from chalicelib.utils import pg_client
EDITION = 'foss'
def get_status(tenant_id=None):
with pg_client.PostgresClient() as cur:
cur.execute("SELECT * FROM public.tenants;")
r = cur.fetchone()
return {
"hasActivePlan": True,
"current": {
"edition": r.get("edition", "").upper(),
"versionNumber": r.get("version_number", ""),
"license": "",
"edition": EDITION,
"expirationDate": -1
},
"count": {
"teamMember": r.get("t_users"),
"projects": r.get("t_projects"),
"capturedSessions": r.get("t_sessions")
}
}

View file

@ -1,4 +1,5 @@
from elasticsearch import Elasticsearch, RequestsHttpConnection
# from elasticsearch import Elasticsearch, RequestsHttpConnection
from elasticsearch import Elasticsearch
from chalicelib.core import log_tools
import base64
import logging
@ -58,20 +59,21 @@ def add_edit(tenant_id, project_id, data):
def __get_es_client(host, port, api_key_id, api_key, use_ssl=False, timeout=15):
scheme = "http" if host.startswith("http") else "https"
host = host.replace("http://", "").replace("https://", "")
try:
args = {
"hosts": [{"host": host, "port": port}],
"use_ssl": use_ssl,
"hosts": [{"host": host, "port": port, "scheme": scheme}],
"verify_certs": False,
"ca_certs": False,
"connection_class": RequestsHttpConnection,
"timeout": timeout
# "ca_certs": False,
# "connection_class": RequestsHttpConnection,
"request_timeout": timeout,
"api_key": (api_key_id, api_key)
}
if api_key_id is not None and len(api_key_id) > 0:
# args["http_auth"] = (username, password)
token = "ApiKey " + base64.b64encode(f"{api_key_id}:{api_key}".encode("utf-8")).decode("utf-8")
args["headers"] = {"Authorization": token}
# if api_key_id is not None and len(api_key_id) > 0:
# # args["http_auth"] = (username, password)
# token = "ApiKey " + base64.b64encode(f"{api_key_id}:{api_key}".encode("utf-8")).decode("utf-8")
# args["headers"] = {"Authorization": token}
es = Elasticsearch(
**args
)

View file

@ -83,7 +83,7 @@ def __edit(project_id, col_index, colname, new_name):
return {"errors": ["custom field not found"]}
with pg_client.PostgresClient() as cur:
if old_metas[col_index]["key"].lower() != new_name:
if old_metas[col_index]["key"] != new_name:
cur.execute(cur.mogrify(f"""UPDATE public.projects
SET {colname} = %(value)s
WHERE project_id = %(project_id)s AND deleted_at ISNULL

View file

@ -967,7 +967,7 @@ def get_pages_dom_build_time(project_id, startTimestamp=TimeUTC.now(delta_days=-
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -1069,11 +1069,11 @@ def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-
pg_sub_query.append("pages.speed_index>0")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS avg
pg_query = f"""SELECT sessions.user_country, AVG(pages.speed_index) AS value
FROM events.pages INNER JOIN public.sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY sessions.user_country
ORDER BY avg,sessions.user_country;"""
ORDER BY value, sessions.user_country;"""
params = {"project_id": project_id,
"startTimestamp": startTimestamp,
"endTimestamp": endTimestamp, **__get_constraint_values(args)}
@ -1087,7 +1087,7 @@ def get_speed_index_location(project_id, startTimestamp=TimeUTC.now(delta_days=-
avg = cur.fetchone()["avg"]
else:
avg = 0
return {"avg": avg, "chart": helper.list_to_camel_case(rows)}
return {"value": avg, "chart": helper.list_to_camel_case(rows), "unit": schemas.TemplatePredefinedUnits.millisecond}
def get_pages_response_time(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -1126,7 +1126,9 @@ def get_pages_response_time(project_id, startTimestamp=TimeUTC.now(delta_days=-1
WHERE {" AND ".join(pg_sub_query)};"""
cur.execute(cur.mogrify(pg_query, params))
avg = cur.fetchone()["avg"]
return {"value": avg, "chart": rows, "unit": schemas.TemplatePredefinedUnits.millisecond}
result = {"value": avg, "chart": rows}
helper.__time_value(result)
return result
def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -1169,7 +1171,7 @@ def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(
else:
quantiles = [0 for i in range(len(quantiles_keys))]
result = {
"avg": avg,
"value": avg,
"total": sum(r["count"] for r in rows),
"chart": [],
"percentiles": [{
@ -1177,7 +1179,8 @@ def get_pages_response_time_distribution(project_id, startTimestamp=TimeUTC.now(
"responseTime": int(quantiles[i])
} for i, v in enumerate(quantiles_keys)
],
"extremeValues": [{"count": 0}]
"extremeValues": [{"count": 0}],
"unit": schemas.TemplatePredefinedUnits.millisecond
}
rows = helper.list_to_camel_case(rows)
_99 = result["percentiles"][-1]["responseTime"]
@ -1348,7 +1351,7 @@ def get_time_to_render(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
"endTimestamp": endTimestamp, "value": url, **__get_constraint_values(args)}
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -1498,7 +1501,7 @@ def get_crashes(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
pg_sub_query_chart.append("m_issues.type = 'crash'")
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT generated_timestamp AS timestamp,
COUNT(sessions) AS count
COUNT(sessions) AS value
FROM generate_series(%(startTimestamp)s, %(endTimestamp)s, %(step_size)s) AS generated_timestamp
LEFT JOIN LATERAL (
SELECT sessions.session_id
@ -1556,7 +1559,7 @@ def get_crashes(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
versions.append({v["version"]: v["count"] / (r["total"] / 100)})
r["versions"] = versions
return {"chart": rows, "browsers": browsers}
return {"chart": rows, "browsers": browsers, "unit": schemas.TemplatePredefinedUnits.count}
def __get_neutral(rows, add_All_if_empty=True):
@ -1719,11 +1722,11 @@ def get_slowest_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
with pg_client.PostgresClient() as cur:
pg_query = f"""SELECT
resources.url_host AS domain,
AVG(resources.duration) AS avg
AVG(resources.duration) AS value
FROM events.resources INNER JOIN sessions USING (session_id)
WHERE {" AND ".join(pg_sub_query)}
GROUP BY resources.url_host
ORDER BY avg DESC
ORDER BY value DESC
LIMIT 5;"""
params = {"project_id": project_id,
"startTimestamp": startTimestamp,
@ -1738,7 +1741,7 @@ def get_slowest_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
avg = cur.fetchone()["avg"]
else:
avg = 0
return {"avg": avg, "partition": rows}
return {"value": avg, "chart": rows, "unit": schemas.TemplatePredefinedUnits.millisecond}
def get_errors_per_domains(project_id, startTimestamp=TimeUTC.now(delta_days=-1),
@ -2241,7 +2244,7 @@ def get_application_activity_avg_image_load_time(project_id, startTimestamp=Time
row = __get_application_activity_avg_image_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2300,7 +2303,7 @@ def __get_application_activity_avg_page_load_time(cur, project_id, startTimestam
cur.execute(cur.mogrify(pg_query, params))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -2316,7 +2319,7 @@ def get_application_activity_avg_page_load_time(project_id, startTimestamp=TimeU
row = __get_application_activity_avg_page_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2369,7 +2372,7 @@ def __get_application_activity_avg_request_load_time(cur, project_id, startTimes
"endTimestamp": endTimestamp, **__get_constraint_values(args)}))
row = cur.fetchone()
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return row
@ -2385,7 +2388,7 @@ def get_application_activity_avg_request_load_time(project_id, startTimestamp=Ti
row = __get_application_activity_avg_request_load_time(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2442,7 +2445,7 @@ def get_page_metrics_avg_dom_content_load_start(project_id, startTimestamp=TimeU
row = __get_page_metrics_avg_dom_content_load_start(cur, project_id, startTimestamp, endTimestamp, **args)
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2512,7 +2515,7 @@ def get_page_metrics_avg_first_contentful_pixel(project_id, startTimestamp=TimeU
if len(rows) > 0:
previous = helper.dict_to_camel_case(rows[0])
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2645,7 +2648,7 @@ def get_user_activity_avg_session_duration(project_id, startTimestamp=TimeUTC.no
previous = helper.dict_to_camel_case(row)
results["progress"] = helper.__progress(old_val=previous["value"], new_val=results["value"])
results["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(results)
return results
@ -2731,7 +2734,7 @@ def get_top_metrics_avg_response_time(project_id, startTimestamp=TimeUTC.now(del
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2772,7 +2775,7 @@ def get_top_metrics_avg_first_paint(project_id, startTimestamp=TimeUTC.now(delta
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2816,7 +2819,7 @@ def get_top_metrics_avg_dom_content_loaded(project_id, startTimestamp=TimeUTC.no
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2857,7 +2860,7 @@ def get_top_metrics_avg_till_first_bit(project_id, startTimestamp=TimeUTC.now(de
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)
@ -2899,7 +2902,7 @@ def get_top_metrics_avg_time_to_interactive(project_id, startTimestamp=TimeUTC.n
cur.execute(cur.mogrify(pg_query, params))
rows = cur.fetchall()
row["chart"] = helper.list_to_camel_case(rows)
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
helper.__time_value(row)
return helper.dict_to_camel_case(row)

View file

@ -25,6 +25,22 @@ def get_all(tenant_id, user_id):
return rows
def get_all_count(tenant_id, user_id):
with pg_client.PostgresClient() as cur:
cur.execute(
cur.mogrify("""\
SELECT COALESCE(COUNT(notifications.*),0) AS count
FROM public.notifications
LEFT JOIN (SELECT notification_id
FROM public.user_viewed_notifications
WHERE user_viewed_notifications.user_id = %(user_id)s) AS user_viewed_notifications USING (notification_id)
WHERE (notifications.user_id IS NULL OR notifications.user_id =%(user_id)s) AND user_viewed_notifications.notification_id IS NULL;""",
{"user_id": user_id})
)
row = cur.fetchone()
return row
def view_notification(user_id, notification_ids=[], tenant_id=None, startTimestamp=None, endTimestamp=None):
if (notification_ids is None or len(notification_ids) == 0) and endTimestamp is None:
return False

View file

@ -20,6 +20,5 @@ def reset(data: schemas.ForgetPasswordPayloadSchema):
invitation_link = users.generate_new_invitation(user_id=a_users["id"])
email_helper.send_forgot_password(recipient=data.email, invitation_link=invitation_link)
else:
print(f"invalid email address [{data.email}]")
return {"errors": ["invalid email address"]}
return {"data": {"state": "success"}}
print(f"!!!invalid email address [{data.email}]")
return {"data": {"state": "A reset link will be sent if this email exists in our system."}}

View file

@ -99,14 +99,16 @@ def get_by_id2_pg(project_id, session_id, user_id, full_data=False, include_fav_
duration=data["duration"])
data['metadata'] = __group_metadata(project_metadata=data.pop("projectMetadata"), session=data)
data['issues'] = issues.get_by_session_id(session_id=session_id)
data['issues'] = issues.get_by_session_id(session_id=session_id, project_id=project_id)
data['live'] = live and assist.is_live(project_id=project_id,
session_id=session_id,
project_key=data["projectKey"])
data["inDB"] = True
return data
else:
elif live:
return assist.get_live_session_by_id(project_id=project_id, session_id=session_id)
else:
return None
def __get_sql_operator(op: schemas.SearchEventOperator):
@ -203,12 +205,12 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
elif data.group_by_user:
g_sort = "count(full_sessions)"
if data.order is None:
data.order = "DESC"
data.order = schemas.SortOrderType.desc
else:
data.order = data.order.upper()
if data.sort is not None and data.sort != 'sessionsCount':
sort = helper.key_to_snake_case(data.sort)
g_sort = f"{'MIN' if data.order == 'DESC' else 'MAX'}({sort})"
g_sort = f"{'MIN' if data.order == schemas.SortOrderType.desc else 'MAX'}({sort})"
else:
sort = 'start_ts'
@ -232,7 +234,7 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
full_args)
else:
if data.order is None:
data.order = "DESC"
data.order = schemas.SortOrderType.desc
sort = 'session_id'
if data.sort is not None and data.sort != "session_id":
# sort += " " + data.order + "," + helper.key_to_snake_case(data.sort)
@ -256,9 +258,9 @@ def search2_pg(data: schemas.SessionsSearchPayloadSchema, project_id, user_id, e
cur.execute(main_query)
except Exception as err:
print("--------- SESSIONS SEARCH QUERY EXCEPTION -----------")
print(main_query)
print(main_query.decode('UTF-8'))
print("--------- PAYLOAD -----------")
print(data.dict())
print(data.json())
print("--------------------")
raise err
if errors_only:

View file

@ -5,14 +5,23 @@ from chalicelib.utils.s3 import client
def get_web(sessionId):
return client.generate_presigned_url(
return [
client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("sessions_bucket"),
'Key': str(sessionId)
},
ExpiresIn=100000
)
),
client.generate_presigned_url(
'get_object',
Params={
'Bucket': config("sessions_bucket"),
'Key': str(sessionId) + "e"
},
ExpiresIn=100000
)]
def get_ios(sessionId):

View file

@ -24,7 +24,6 @@ T_VALUES = {1: 12.706, 2: 4.303, 3: 3.182, 4: 2.776, 5: 2.571, 6: 2.447, 7: 2.36
21: 2.080, 22: 2.074, 23: 2.069, 25: 2.064, 26: 2.060, 27: 2.056, 28: 2.052, 29: 2.045, 30: 2.042}
def get_stages_and_events(filter_d, project_id) -> List[RealDictRow]:
"""
Add minimal timestamp
@ -293,7 +292,6 @@ def pearson_corr(x: list, y: list):
return r, confidence, False
def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_with_context, first_stage, last_stage):
"""
Returns two lists with binary values 0/1:
@ -363,7 +361,6 @@ def get_transitions_and_issues_of_each_type(rows: List[RealDictRow], all_issues_
return transitions, errors, all_errors, n_sess_affected
def get_affected_users_for_all_issues(rows, first_stage, last_stage):
"""
@ -415,7 +412,6 @@ def get_affected_users_for_all_issues(rows, first_stage, last_stage):
return all_issues_with_context, n_issues_dict, n_affected_users_dict, n_affected_sessions_dict, contexts
def count_sessions(rows, n_stages):
session_counts = {i: set() for i in range(1, n_stages + 1)}
for ind, row in enumerate(rows):
@ -467,7 +463,6 @@ def get_stages(stages, rows):
return stages_list
def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False):
"""
@ -544,7 +539,6 @@ def get_issues(stages, rows, first_stage=None, last_stage=None, drop_only=False)
return n_critical_issues, issues_dict, total_drop_due_to_issues
def get_top_insights(filter_d, project_id):
output = []
stages = filter_d.get("events", [])
@ -582,9 +576,8 @@ def get_top_insights(filter_d, project_id):
return stages_list, total_drop_due_to_issues
def get_issues_list(filter_d, project_id, first_stage=None, last_stage=None):
output = dict({'critical_issues_count': 0})
output = dict({"total_drop_due_to_issues": 0, "critical_issues_count": 0, "significant": [], "insignificant": []})
stages = filter_d.get("events", [])
# The result of the multi-stage query
rows = get_stages_and_events(filter_d=filter_d, project_id=project_id)

View file

@ -67,8 +67,8 @@ def create_step1(data: schemas.UserSignupSchema):
}
query = f"""\
WITH t AS (
INSERT INTO public.tenants (name, version_number, edition)
VALUES (%(organizationName)s, (SELECT openreplay_version()), 'fos')
INSERT INTO public.tenants (name, version_number)
VALUES (%(organizationName)s, (SELECT openreplay_version()))
RETURNING api_key
),
u AS (
@ -77,8 +77,8 @@ def create_step1(data: schemas.UserSignupSchema):
RETURNING user_id,email,role,name
),
au AS (INSERT
INTO public.basic_authentication (user_id, password, generated_password)
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)), FALSE)
INTO public.basic_authentication (user_id, password)
VALUES ((SELECT user_id FROM u), crypt(%(password)s, gen_salt('bf', 12)))
)
INSERT INTO public.projects (name, active)
VALUES (%(projectName)s, TRUE)

View file

@ -1,13 +1,15 @@
from chalicelib.utils import pg_client
import requests
from chalicelib.core import license
def process_data(data, edition='fos'):
def process_data(data):
return {
'edition': edition,
'edition': license.EDITION,
'tracking': data["opt_out"],
'version': data["version_number"],
'user_id': data["user_id"],
'user_id': data["tenant_key"],
'tenant_key': data["tenant_key"],
'owner_email': None if data["opt_out"] else data["email"],
'organization_name': None if data["opt_out"] else data["name"],
'users_count': data["t_users"],
@ -27,7 +29,7 @@ def compute():
t_projects=COALESCE((SELECT COUNT(*) FROM public.projects WHERE deleted_at ISNULL), 0),
t_sessions=COALESCE((SELECT COUNT(*) FROM public.sessions), 0),
t_users=COALESCE((SELECT COUNT(*) FROM public.users WHERE deleted_at ISNULL), 0)
RETURNING name,t_integrations,t_projects,t_sessions,t_users,user_id,opt_out,
RETURNING name,t_integrations,t_projects,t_sessions,t_users,tenant_key,opt_out,
(SELECT openreplay_version()) AS version_number,(SELECT email FROM public.users WHERE role = 'owner' LIMIT 1);"""
)
data = cur.fetchone()
@ -39,6 +41,7 @@ def new_client():
cur.execute(
f"""SELECT *,
(SELECT email FROM public.users WHERE role='owner' LIMIT 1) AS email
FROM public.tenants;""")
FROM public.tenants
LIMIT 1;""")
data = cur.fetchone()
requests.post('https://api.openreplay.com/os/signup', json=process_data(data))

View file

@ -1,7 +1,7 @@
import schemas
from chalicelib.utils import pg_client
from chalicelib.utils import helper
from chalicelib.core import users
from chalicelib.core import users, license
def get_by_tenant_id(tenant_id):
@ -13,7 +13,7 @@ def get_by_tenant_id(tenant_id):
name,
api_key,
created_at,
edition,
'{license.EDITION}' AS edition,
version_number,
opt_out
FROM public.tenants
@ -67,7 +67,7 @@ def update(tenant_id, user_id, data: schemas.UpdateTenantSchema):
admin = users.get(user_id=user_id, tenant_id=tenant_id)
if not admin["admin"] and not admin["superAdmin"]:
return {"error": "unauthorized"}
return {"errors": ["unauthorized, needs admin or owner"]}
if data.name is None and data.opt_out is None:
return {"errors": ["please provide 'name' of 'optOut' attribute for update"]}
changes = {}

View file

@ -4,6 +4,7 @@ import secrets
from decouple import config
from fastapi import BackgroundTasks
import schemas
from chalicelib.core import authorizers, metadata, projects
from chalicelib.core import tenants, assist
from chalicelib.utils import dev, email_helper
@ -21,10 +22,10 @@ def create_new_member(email, invitation_token, admin, name, owner=False):
query = cur.mogrify(f"""\
WITH u AS (INSERT INTO public.users (email, role, name, data)
VALUES (%(email)s, %(role)s, %(name)s, %(data)s)
RETURNING user_id,email,role,name,appearance
RETURNING user_id,email,role,name
),
au AS (INSERT INTO public.basic_authentication (user_id, generated_password, invitation_token, invited_at)
VALUES ((SELECT user_id FROM u), TRUE, %(invitation_token)s, timezone('utc'::text, now()))
au AS (INSERT INTO public.basic_authentication (user_id, invitation_token, invited_at)
VALUES ((SELECT user_id FROM u), %(invitation_token)s, timezone('utc'::text, now()))
RETURNING invitation_token
)
SELECT u.user_id,
@ -32,7 +33,6 @@ def create_new_member(email, invitation_token, admin, name, owner=False):
u.email,
u.role,
u.name,
TRUE AS change_password,
(CASE WHEN u.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN u.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN u.role = 'member' THEN TRUE ELSE FALSE END) AS member,
@ -61,7 +61,6 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
email,
role,
name,
TRUE AS change_password,
(CASE WHEN role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
@ -73,8 +72,7 @@ def restore_member(user_id, email, invitation_token, admin, name, owner=False):
result = cur.fetchone()
query = cur.mogrify("""\
UPDATE public.basic_authentication
SET generated_password = TRUE,
invitation_token = %(invitation_token)s,
SET invitation_token = %(invitation_token)s,
invited_at = timezone('utc'::text, now()),
change_pwd_expire_at = NULL,
change_pwd_token = NULL
@ -118,7 +116,7 @@ def reset_member(tenant_id, editor_id, user_id_to_update):
def update(tenant_id, user_id, changes):
AUTH_KEYS = ["password", "generatedPassword", "invitationToken", "invitedAt", "changePwdExpireAt", "changePwdToken"]
AUTH_KEYS = ["password", "invitationToken", "invitedAt", "changePwdExpireAt", "changePwdToken"]
if len(changes.keys()) == 0:
return None
@ -131,10 +129,6 @@ def update(tenant_id, user_id, changes):
sub_query_bauth.append("changed_at = timezone('utc'::text, now())")
else:
sub_query_bauth.append(f"{helper.key_to_snake_case(key)} = %({key})s")
else:
if key == "appearance":
sub_query_users.append(f"appearance = %(appearance)s::jsonb")
changes["appearance"] = json.dumps(changes[key])
else:
sub_query_users.append(f"{helper.key_to_snake_case(key)} = %({key})s")
@ -151,11 +145,9 @@ def update(tenant_id, user_id, changes):
users.email,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance;""",
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
{"user_id": user_id, **changes})
)
if len(sub_query_bauth) > 0:
@ -170,11 +162,9 @@ def update(tenant_id, user_id, changes):
users.email,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance;""",
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member;""",
{"user_id": user_id, **changes})
)
@ -244,16 +234,15 @@ def get(user_id, tenant_id):
cur.execute(
cur.mogrify(
f"""SELECT
users.user_id AS id,
users.user_id,
email,
role,
name,
basic_authentication.generated_password,
(CASE WHEN role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN role = 'member' THEN TRUE ELSE FALSE END) AS member,
appearance,
api_key
api_key,
TRUE AS has_password
FROM public.users LEFT JOIN public.basic_authentication ON users.user_id=basic_authentication.user_id
WHERE
users.user_id = %(userId)s
@ -262,7 +251,7 @@ def get(user_id, tenant_id):
{"userId": user_id})
)
r = cur.fetchone()
return helper.dict_to_camel_case(r, ignore_keys=["appearance"])
return helper.dict_to_camel_case(r)
def generate_new_api_key(user_id):
@ -281,45 +270,39 @@ def generate_new_api_key(user_id):
return helper.dict_to_camel_case(r)
def edit(user_id_to_update, tenant_id, changes, editor_id):
ALLOW_EDIT = ["name", "email", "admin", "appearance"]
def edit(user_id_to_update, tenant_id, changes: schemas.EditUserSchema, editor_id):
user = get(user_id=user_id_to_update, tenant_id=tenant_id)
if editor_id != user_id_to_update or "admin" in changes and changes["admin"] != user["admin"]:
if editor_id != user_id_to_update or changes.admin is not None and changes.admin != user["admin"]:
admin = get(tenant_id=tenant_id, user_id=editor_id)
if not admin["superAdmin"] and not admin["admin"]:
return {"errors": ["unauthorized"]}
_changes = {}
if editor_id == user_id_to_update:
if changes.admin is not None:
if user["superAdmin"]:
changes.pop("admin")
elif user["admin"] != changes["admin"]:
changes.admin = None
elif changes.admin != user["admin"]:
return {"errors": ["cannot change your own role"]}
keys = list(changes.keys())
for k in keys:
if k not in ALLOW_EDIT or changes[k] is None:
changes.pop(k)
keys = list(changes.keys())
if len(keys) > 0:
if "email" in keys and changes["email"] != user["email"]:
if email_exists(changes["email"]):
if changes.email is not None and changes.email != user["email"]:
if email_exists(changes.email):
return {"errors": ["email already exists."]}
if get_deleted_user_by_email(changes["email"]) is not None:
if get_deleted_user_by_email(changes.email) is not None:
return {"errors": ["email previously deleted."]}
if "admin" in keys:
changes["role"] = "admin" if changes.pop("admin") else "member"
if len(changes.keys()) > 0:
updated_user = update(tenant_id=tenant_id, user_id=user_id_to_update, changes=changes)
_changes["email"] = changes.email
if changes.name is not None and len(changes.name) > 0:
_changes["name"] = changes.name
if changes.admin is not None:
_changes["role"] = "admin" if changes.admin else "member"
if len(_changes.keys()) > 0:
updated_user = update(tenant_id=tenant_id, user_id=user_id_to_update, changes=_changes)
return {"data": updated_user}
return {"data": user}
def edit_appearance(user_id, tenant_id, changes):
updated_user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
return {"data": updated_user}
def get_by_email_only(email):
with pg_client.PostgresClient() as cur:
cur.execute(
@ -330,7 +313,6 @@ def get_by_email_only(email):
users.email,
users.role,
users.name,
basic_authentication.generated_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
@ -354,7 +336,6 @@ def get_by_email_reset(email, reset_token):
users.email,
users.role,
users.name,
basic_authentication.generated_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
@ -377,7 +358,7 @@ def get_members(tenant_id):
users.email,
users.role,
users.name,
basic_authentication.generated_password,
users.created_at,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
@ -393,6 +374,7 @@ def get_members(tenant_id):
if len(r):
r = helper.list_to_camel_case(r)
for u in r:
u["createdAt"] = TimeUTC.datetime_to_timestamp(u["createdAt"])
if u["invitationToken"]:
u["invitationLink"] = __get_invitation_link(u.pop("invitationToken"))
else:
@ -440,7 +422,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
auth = authenticate(email, old_password, for_change_password=True)
if auth is None:
return {"errors": ["wrong password"]}
changes = {"password": new_password, "generatedPassword": False}
changes = {"password": new_password}
user = update(tenant_id=tenant_id, user_id=user_id, changes=changes)
r = authenticate(user['email'], new_password)
tenant_id = r.pop("tenantId")
@ -466,7 +448,7 @@ def change_password(tenant_id, user_id, email, old_password, new_password):
def set_password_invitation(user_id, new_password):
changes = {"password": new_password, "generatedPassword": False,
changes = {"password": new_password,
"invitationToken": None, "invitedAt": None,
"changePwdExpireAt": None, "changePwdToken": None}
user = update(tenant_id=-1, user_id=user_id, changes=changes)
@ -575,15 +557,13 @@ def authenticate(email, password, for_change_password=False, for_plugin=False):
with pg_client.PostgresClient() as cur:
query = cur.mogrify(
f"""SELECT
users.user_id AS id,
users.user_id,
1 AS tenant_id,
users.role,
users.name,
basic_authentication.generated_password AS change_password,
(CASE WHEN users.role = 'owner' THEN TRUE ELSE FALSE END) AS super_admin,
(CASE WHEN users.role = 'admin' THEN TRUE ELSE FALSE END) AS admin,
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member,
users.appearance
(CASE WHEN users.role = 'member' THEN TRUE ELSE FALSE END) AS member
FROM public.users INNER JOIN public.basic_authentication USING(user_id)
WHERE users.email = %(email)s
AND basic_authentication.password = crypt(%(password)s, basic_authentication.password)
@ -597,16 +577,16 @@ def authenticate(email, password, for_change_password=False, for_plugin=False):
if r is not None:
if for_change_password:
return True
r = helper.dict_to_camel_case(r, ignore_keys=["appearance"])
r = helper.dict_to_camel_case(r)
query = cur.mogrify(
f"""UPDATE public.users
SET jwt_iat = timezone('utc'::text, now())
WHERE user_id = %(user_id)s
RETURNING jwt_iat;""",
{"user_id": r["id"]})
{"user_id": r["userId"]})
cur.execute(query)
return {
"jwt": authorizers.generate_jwt(r['id'], r['tenantId'],
"jwt": authorizers.generate_jwt(r['userId'], r['tenantId'],
TimeUTC.datetime_to_timestamp(cur.fetchone()["jwt_iat"]),
aud=f"plugin:{helper.get_stage_name()}" if for_plugin else f"front:{helper.get_stage_name()}"),
"email": email,

View file

@ -29,8 +29,12 @@ def edit_config(user_id, weekly_report):
def cron():
if not helper.has_smtp():
print("!!! No SMTP configuration found, ignoring weekly report")
return
with pg_client.PostgresClient(long_query=True) as cur:
params = {"3_days_ago": TimeUTC.midnight(delta_days=-3),
params = {"tomorrow": TimeUTC.midnight(delta_days=1),
"3_days_ago": TimeUTC.midnight(delta_days=-3),
"1_week_ago": TimeUTC.midnight(delta_days=-7),
"2_week_ago": TimeUTC.midnight(delta_days=-14),
"5_week_ago": TimeUTC.midnight(delta_days=-35)}
@ -43,18 +47,18 @@ def cron():
COALESCE(week_0_issues.count, 0) AS this_week_issues_count,
COALESCE(week_1_issues.count, 0) AS past_week_issues_count,
COALESCE(month_1_issues.count, 0) AS past_month_issues_count
FROM public.projects
FROM (SELECT project_id, name FROM public.projects WHERE projects.deleted_at ISNULL) AS projects
INNER JOIN LATERAL (
SELECT sessions.project_id
FROM public.sessions
WHERE sessions.project_id = projects.project_id
AND start_ts >= %(3_days_ago)s
AND start_ts < %(tomorrow)s
LIMIT 1) AS recently_active USING (project_id)
INNER JOIN LATERAL (
SELECT COALESCE(ARRAY_AGG(email), '{}') AS emails
FROM public.users
WHERE users.tenant_id = projects.tenant_id
AND users.deleted_at ISNULL
WHERE users.deleted_at ISNULL
AND users.weekly_report
) AS users ON (TRUE)
LEFT JOIN LATERAL (
@ -62,25 +66,25 @@ def cron():
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= %(1_week_ago)s
AND issues.timestamp < %(tomorrow)s
) AS week_0_issues ON (TRUE)
LEFT JOIN LATERAL (
SELECT COUNT(1) AS count
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp <= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '2 week') * 1000)::BIGINT
AND issues.timestamp <= %(1_week_ago)s
AND issues.timestamp >= %(2_week_ago)s
) AS week_1_issues ON (TRUE)
LEFT JOIN LATERAL (
SELECT COUNT(1) AS count
FROM events_common.issues
INNER JOIN public.sessions USING (session_id)
WHERE sessions.project_id = projects.project_id
AND issues.timestamp <= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '1 week') * 1000)::BIGINT
AND issues.timestamp >= (EXTRACT(EPOCH FROM DATE_TRUNC('day', now()) - INTERVAL '5 week') * 1000)::BIGINT
) AS month_1_issues ON (TRUE)
WHERE projects.deleted_at ISNULL;"""), params)
AND issues.timestamp <= %(1_week_ago)s
AND issues.timestamp >= %(5_week_ago)s
) AS month_1_issues ON (TRUE);"""), params)
projects_data = cur.fetchall()
emails_to_send = []
for p in projects_data:

View file

@ -1,12 +1,13 @@
import math
import random
import re
import string
from typing import Union
import math
import requests
import schemas
from chalicelib.utils.TimeUTC import TimeUTC
local_prefix = 'local-'
from decouple import config
@ -364,10 +365,6 @@ def has_smtp():
return config("EMAIL_HOST") is not None and len(config("EMAIL_HOST")) > 0
def get_edition():
return "ee" if "ee" in config("ENTERPRISE_BUILD", default="").lower() else "foss"
def old_search_payload_to_flat(values):
# in case the old search body was passed
if values.get("events") is not None:
@ -384,3 +381,20 @@ def custom_alert_to_front(values):
if values.get("seriesId") is not None and values["query"]["left"] == schemas.AlertColumn.custom:
values["query"]["left"] = values["seriesId"]
return values
def __time_value(row):
row["unit"] = schemas.TemplatePredefinedUnits.millisecond
factor = 1
if row["value"] > TimeUTC.MS_MINUTE:
row["value"] = row["value"] / TimeUTC.MS_MINUTE
row["unit"] = schemas.TemplatePredefinedUnits.minute
factor = TimeUTC.MS_MINUTE
elif row["value"] > 1 * 1000:
row["value"] = row["value"] / 1000
row["unit"] = schemas.TemplatePredefinedUnits.second
factor = 1000
if "chart" in row and factor > 1:
for r in row["chart"]:
r["value"] /= factor

View file

@ -5,6 +5,9 @@ import boto3
import botocore
from botocore.client import Config
if not config("S3_HOST", default=False):
client = boto3.client('s3')
else:
client = boto3.client('s3', endpoint_url=config("S3_HOST"),
aws_access_key_id=config("S3_KEY"),
aws_secret_access_key=config("S3_SECRET"),

View file

@ -33,7 +33,9 @@ class ORRoute(APIRoute):
if isinstance(response, JSONResponse):
response: JSONResponse = response
body = json.loads(response.body.decode('utf8'))
if response.status_code == 200 and body is not None and body.get("errors") is not None:
if response.status_code == 200 \
and body is not None and isinstance(body, dict) \
and body.get("errors") is not None:
if "not found" in body["errors"][0]:
response.status_code = status.HTTP_404_NOT_FOUND
else:

View file

@ -1,15 +1,15 @@
requests==2.26.0
urllib3==1.26.6
boto3==1.16.1
pyjwt==1.7.1
psycopg2-binary==2.8.6
elasticsearch==7.9.1
jira==3.1.1
requests==2.28.0
urllib3==1.26.9
boto3==1.24.11
pyjwt==2.4.0
psycopg2-binary==2.9.3
elasticsearch==8.2.3
jira==3.2.0
fastapi==0.75.0
uvicorn[standard]==0.17.5
fastapi==0.78.0
uvicorn[standard]==0.17.6
python-decouple==3.6
pydantic[email]==1.8.2
apscheduler==3.8.1
pydantic[email]==1.9.1
apscheduler==3.9.1

View file

@ -1,7 +1,8 @@
from typing import Union
from typing import Union, Optional
from decouple import config
from fastapi import Depends, Body, BackgroundTasks
from fastapi import Depends, Body, BackgroundTasks, HTTPException
from starlette import status
import schemas
from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assignments, projects, \
@ -13,7 +14,7 @@ from chalicelib.core import log_tool_rollbar, sourcemaps, events, sessions_assig
assist, heatmaps, mobile, signup, tenants, errors_favorite_viewed, boarding, notifications, webhook, users, \
custom_metrics, saved_search
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import email_helper
from chalicelib.utils import email_helper, helper, captcha
from chalicelib.utils.TimeUTC import TimeUTC
from or_dependencies import OR_context
from routers.base import get_routers
@ -21,6 +22,34 @@ from routers.base import get_routers
public_app, app, app_apikey = get_routers()
@public_app.post('/login', tags=["authentication"])
def login(data: schemas.UserLoginSchema = Body(...)):
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid captcha."
)
r = users.authenticate(data.email, data.password, for_plugin=False)
if r is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Youve entered invalid Email or Password."
)
if "errors" in r:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail=r["errors"][0]
)
r["smtp"] = helper.has_smtp()
return {
'jwt': r.pop('jwt'),
'data': {
"user": r
}
}
@app.get('/{projectId}/sessions/{sessionId}', tags=["sessions"])
@app.get('/{projectId}/sessions2/{sessionId}', tags=["sessions"])
def get_session2(projectId: int, sessionId: Union[int, str], background_tasks: BackgroundTasks,
@ -106,11 +135,13 @@ def comment_assignment(projectId: int, sessionId: int, issueId: str, data: schem
def events_search(projectId: int, q: str,
type: Union[schemas.FilterType, schemas.EventType,
schemas.PerformanceEventType, schemas.FetchFilterType,
schemas.GraphqlFilterType] = None,
key: str = None,
source: str = None, context: schemas.CurrentContext = Depends(OR_context)):
schemas.GraphqlFilterType, str] = None,
key: str = None, source: str = None, live: bool = False,
context: schemas.CurrentContext = Depends(OR_context)):
if len(q) == 0:
return {"data": []}
if live:
return assist.autocomplete(project_id=projectId, q=q, key=type.value if type is not None else None)
if type in [schemas.FetchFilterType._url]:
type = schemas.EventType.request
elif type in [schemas.GraphqlFilterType._name]:
@ -549,7 +580,7 @@ def add_metadata(projectId: int, data: schemas.MetadataBasicSchema = Body(...),
@app.put('/{projectId}/metadata/{index}', tags=["metadata"])
def edit_metadata(projectId: int, index: int, data: schemas.MetadataBasicSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return metadata.edit(tenant_id=context.tenant_id, project_id=projectId, index=int(index),
return metadata.edit(tenant_id=context.tenant_id, project_id=projectId, index=index,
new_name=data.key)
@ -743,7 +774,7 @@ def get_funnel_sessions_on_the_fly(projectId: int, funnelId: int, data: schemas.
@app.get('/{projectId}/funnels/issues/{issueId}/sessions', tags=["funnels"])
def get_issue_sessions(projectId: int, issueId: str, startDate: int = None, endDate: int = None,
def get_funnel_issue_sessions(projectId: int, issueId: str, startDate: int = None, endDate: int = None,
context: schemas.CurrentContext = Depends(OR_context)):
issue = issues.get(project_id=projectId, issue_id=issueId)
if issue is None:
@ -829,8 +860,15 @@ def all_issue_types(context: schemas.CurrentContext = Depends(OR_context)):
@app.get('/{projectId}/assist/sessions', tags=["assist"])
def sessions_live(projectId: int, userId: str = None, context: schemas.CurrentContext = Depends(OR_context)):
data = assist.get_live_sessions_ws(projectId, user_id=userId)
def get_sessions_live(projectId: int, userId: str = None, context: schemas.CurrentContext = Depends(OR_context)):
data = assist.get_live_sessions_ws_user_id(projectId, user_id=userId)
return {'data': data}
@app.post('/{projectId}/assist/sessions', tags=["assist"])
def sessions_live(projectId: int, data: schemas.LiveSessionsSearchPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = assist.get_live_sessions_ws(projectId, body=data)
return {'data': data}
@ -903,7 +941,7 @@ def edit_client(data: schemas.UpdateTenantSchema = Body(...),
@app.post('/{projectId}/errors/search', tags=['errors'])
def errors_search(projectId: int, data: schemas.SearchErrorsSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return errors.search(data, projectId, user_id=context.user_id)
return {"data": errors.search(data, projectId, user_id=context.user_id)}
@app.get('/{projectId}/errors/stats', tags=['errors'])
@ -966,6 +1004,11 @@ def get_notifications(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.get_all(tenant_id=context.tenant_id, user_id=context.user_id)}
@app.get('/notifications/count', tags=['notifications'])
def get_notifications_count(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.get_all_count(tenant_id=context.tenant_id, user_id=context.user_id)}
@app.get('/notifications/{notificationId}/view', tags=['notifications'])
def view_notifications(notificationId: int, context: schemas.CurrentContext = Depends(OR_context)):
return {"data": notifications.view_notification(notification_ids=[notificationId], user_id=context.user_id)}
@ -1071,17 +1114,10 @@ def generate_new_user_token(context: schemas.CurrentContext = Depends(OR_context
@app.put('/account', tags=["account"])
def edit_account(data: schemas.EditUserSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data.dict(),
return users.edit(tenant_id=context.tenant_id, user_id_to_update=context.user_id, changes=data,
editor_id=context.user_id)
@app.post('/account/appearance', tags=["account"])
@app.put('/account/appearance', tags=["account"])
def edit_account_appearance(data: schemas.EditUserAppearanceSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit_appearance(tenant_id=context.tenant_id, user_id=context.user_id, changes=data.dict())
@app.post('/account/password', tags=["account"])
@app.put('/account/password', tags=["account"])
def change_client_password(data: schemas.EditUserPasswordSchema = Body(...),
@ -1120,6 +1156,16 @@ def delete_saved_search(projectId: int, search_id: int, context: schemas.Current
return {"data": saved_search.delete(project_id=projectId, user_id=context.user_id, search_id=search_id)}
@app.get('/limits', tags=['accounts'])
def get_limits(context: schemas.CurrentContext = Depends(OR_context)):
return {
'data': {
"teamMember": -1,
"projects": -1,
}
}
@public_app.get('/', tags=["health"])
@public_app.post('/', tags=["health"])
@public_app.put('/', tags=["health"])

View file

@ -1,17 +1,15 @@
from typing import Optional
from decouple import config
from fastapi import Body, Depends, HTTPException, status, BackgroundTasks
from fastapi import Body, Depends, BackgroundTasks
from starlette.responses import RedirectResponse
import schemas
from chalicelib.core import assist
from chalicelib.core import integrations_manager
from chalicelib.core import sessions
from chalicelib.core import tenants, users, metadata, projects, license
from chalicelib.core import webhook
from chalicelib.core.collaboration_slack import Slack
from chalicelib.utils import captcha
from chalicelib.utils import helper
from or_dependencies import OR_context
from routers.base import get_routers
@ -24,60 +22,23 @@ def get_all_signup():
return {"data": {"tenants": tenants.tenants_exists(),
"sso": None,
"ssoProvider": None,
"edition": helper.get_edition()}}
@public_app.post('/login', tags=["authentication"])
def login(data: schemas.UserLoginSchema = Body(...)):
if helper.allow_captcha() and not captcha.is_valid(data.g_recaptcha_response):
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Invalid captcha."
)
r = users.authenticate(data.email, data.password, for_plugin=False)
if r is None:
raise HTTPException(
status_code=status.HTTP_401_UNAUTHORIZED,
detail="Youve entered invalid Email or Password."
)
tenant_id = r.pop("tenantId")
r["limits"] = {
"teamMember": -1,
"projects": -1,
"metadata": metadata.get_remaining_metadata_with_count(tenant_id)}
c = tenants.get_by_tenant_id(tenant_id)
c.pop("createdAt")
c["smtp"] = helper.has_smtp()
c["iceServers"] = assist.get_ice_servers()
r["smtp"] = c["smtp"]
r["iceServers"] = c["iceServers"]
return {
'jwt': r.pop('jwt'),
'data': {
"user": r,
"client": c
}
}
"edition": license.EDITION}}
@app.get('/account', tags=['accounts'])
def get_account(context: schemas.CurrentContext = Depends(OR_context)):
r = users.get(tenant_id=context.tenant_id, user_id=context.user_id)
t = tenants.get_by_tenant_id(context.tenant_id)
if t is not None:
t.pop("createdAt")
t["tenantName"] = t.pop("name")
return {
'data': {
**r,
"limits": {
"teamMember": -1,
"projects": -1,
"metadata": metadata.get_remaining_metadata_with_count(context.tenant_id)
},
**t,
**license.get_status(context.tenant_id),
"smtp": helper.has_smtp(),
"iceServers": assist.get_ice_servers()
# "iceServers": assist.get_ice_servers()
}
}
@ -181,7 +142,7 @@ def change_password_by_invitation(data: schemas.EditPasswordByInvitationSchema =
@app.post('/client/members/{memberId}', tags=["client"])
def edit_member(memberId: int, data: schemas.EditMemberSchema,
context: schemas.CurrentContext = Depends(OR_context)):
return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data.dict(),
return users.edit(tenant_id=context.tenant_id, editor_id=context.user_id, changes=data,
user_id_to_update=memberId)
@ -199,28 +160,11 @@ def search_sessions_by_metadata(key: str, value: str, projectId: Optional[int] =
m_key=key, project_id=projectId)}
@app.get('/plans', tags=["plan"])
def get_current_plan(context: schemas.CurrentContext = Depends(OR_context)):
return {
"data": license.get_status(context.tenant_id)
}
@public_app.get('/general_stats', tags=["private"], include_in_schema=False)
def get_general_stats():
return {"data": {"sessions:": sessions.count_all()}}
@app.get('/client', tags=['projects'])
def get_client(context: schemas.CurrentContext = Depends(OR_context)):
r = tenants.get_by_tenant_id(context.tenant_id)
if r is not None:
r.pop("createdAt")
return {
'data': r
}
@app.get('/projects', tags=['projects'])
def get_projects(context: schemas.CurrentContext = Depends(OR_context)):
return {"data": projects.get_projects(tenant_id=context.tenant_id, recording_state=True, gdpr=True, recorded=True,

View file

@ -1,15 +1,3 @@
from chalicelib.core import weekly_report, jobs
async def run_scheduled_jobs() -> None:
jobs.execute_jobs()
async def weekly_report2() -> None:
weekly_report.cron()
cron_jobs = [
{"func": run_scheduled_jobs, "trigger": "interval", "seconds": 60, "misfire_grace_time": 20},
{"func": weekly_report2, "trigger": "cron", "day_of_week": "mon", "hour": 5, "misfire_grace_time": 60 * 60}
]

View file

@ -1,10 +1,21 @@
from chalicelib.core import telemetry
from chalicelib.core import weekly_report, jobs
def telemetry_cron() -> None:
async def run_scheduled_jobs() -> None:
jobs.execute_jobs()
async def weekly_report2() -> None:
weekly_report.cron()
async def telemetry_cron() -> None:
telemetry.compute()
cron_jobs = [
{"func": telemetry_cron, "trigger": "cron", "day_of_week": "*"}
{"func": telemetry_cron, "trigger": "cron", "day_of_week": "*"},
{"func": run_scheduled_jobs, "trigger": "interval", "seconds": 60, "misfire_grace_time": 20},
{"func": weekly_report2, "trigger": "cron", "day_of_week": "mon", "hour": 5, "misfire_grace_time": 60 * 60}
]

View file

@ -1,7 +1,7 @@
from fastapi import Body, Depends
import schemas
from chalicelib.core import dashboards, custom_metrics
from chalicelib.core import dashboards, custom_metrics, funnels
from or_dependencies import OR_context
from routers.base import get_routers
@ -102,18 +102,29 @@ def get_templates(projectId: int, context: schemas.CurrentContext = Depends(OR_c
@app.put('/{projectId}/custom_metrics/try', tags=["customMetrics"])
def try_custom_metric(projectId: int, data: schemas.TryCustomMetricsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
return {"data": custom_metrics.merged_live(project_id=projectId, data=data)}
return {"data": custom_metrics.merged_live(project_id=projectId, data=data, user_id=context.user_id)}
@app.post('/{projectId}/metrics/try/sessions', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/try/sessions', tags=["customMetrics"])
def try_custom_metric_sessions(projectId: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
def try_custom_metric_sessions(projectId: int, data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.try_sessions(project_id=projectId, user_id=context.user_id, data=data)
return {"data": data}
@app.post('/{projectId}/metrics/try/issues', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/try/issues', tags=["customMetrics"])
def try_custom_metric_funnel_issues(projectId: int, data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
if len(data.series) == 0:
return {"data": []}
data.series[0].filter.startDate = data.startTimestamp
data.series[0].filter.endDate = data.endTimestamp
data = funnels.get_issues_on_the_fly_widget(project_id=projectId, data=data.series[0].filter)
return {"data": data}
@app.post('/{projectId}/metrics', tags=["dashboard"])
@app.put('/{projectId}/metrics', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics', tags=["customMetrics"])
@ -131,7 +142,7 @@ def get_custom_metrics(projectId: int, context: schemas.CurrentContext = Depends
@app.get('/{projectId}/metrics/{metric_id}', tags=["dashboard"])
@app.get('/{projectId}/custom_metrics/{metric_id}', tags=["customMetrics"])
def get_custom_metric(projectId: int, metric_id: int, context: schemas.CurrentContext = Depends(OR_context)):
def get_custom_metric(projectId: int, metric_id: str, context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get(project_id=projectId, user_id=context.user_id, metric_id=metric_id)
if data is None:
return {"errors": ["custom metric not found"]}
@ -149,6 +160,42 @@ def get_custom_metric_sessions(projectId: int, metric_id: int,
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/issues', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/issues', tags=["customMetrics"])
def get_custom_metric_funnel_issues(projectId: int, metric_id: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_funnel_issues(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/issues/{issueId}/sessions', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/issues/{issueId}/sessions', tags=["customMetrics"])
def get_metric_funnel_issue_sessions(projectId: int, metric_id: int, issueId: str,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_funnel_sessions_by_issue(project_id=projectId, user_id=context.user_id,
metric_id=metric_id, issue_id=issueId, data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/errors', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/errors', tags=["customMetrics"])
def get_custom_metric_errors_list(projectId: int, metric_id: int,
data: schemas.CustomMetricSessionsPayloadSchema = Body(...),
context: schemas.CurrentContext = Depends(OR_context)):
data = custom_metrics.get_errors_list(project_id=projectId, user_id=context.user_id, metric_id=metric_id,
data=data)
if data is None:
return {"errors": ["custom metric not found"]}
return {"data": data}
@app.post('/{projectId}/metrics/{metric_id}/chart', tags=["dashboard"])
@app.post('/{projectId}/custom_metrics/{metric_id}/chart', tags=["customMetrics"])
def get_custom_metric_chart(projectId: int, metric_id: int, data: schemas.CustomMetricChartPayloadSchema = Body(...),

View file

@ -12,7 +12,8 @@ public_app, app, app_apikey = get_routers()
@app_apikey.get('/v1/{projectKey}/users/{userId}/sessions', tags=["api"])
def get_user_sessions(projectKey: str, userId: str, start_date: int = None, end_date: int = None):
projectId = projects.get_internal_project_id(projectKey)
if projectId is None:
return {"errors": ["invalid projectKey"]}
return {
'data': sessions.get_user_sessions(
project_id=projectId,
@ -26,6 +27,8 @@ def get_user_sessions(projectKey: str, userId: str, start_date: int = None, end_
@app_apikey.get('/v1/{projectKey}/sessions/{sessionId}/events', tags=["api"])
def get_session_events(projectKey: str, sessionId: int):
projectId = projects.get_internal_project_id(projectKey)
if projectId is None:
return {"errors": ["invalid projectKey"]}
return {
'data': events.get_by_sessionId2_pg(
project_id=projectId,
@ -37,6 +40,8 @@ def get_session_events(projectKey: str, sessionId: int):
@app_apikey.get('/v1/{projectKey}/users/{userId}', tags=["api"])
def get_user_details(projectKey: str, userId: str):
projectId = projects.get_internal_project_id(projectKey)
if projectId is None:
return {"errors": ["invalid projectKey"]}
return {
'data': sessions.get_session_user(
project_id=projectId,
@ -48,6 +53,8 @@ def get_user_details(projectKey: str, userId: str):
@app_apikey.delete('/v1/{projectKey}/users/{userId}', tags=["api"])
def schedule_to_delete_user_data(projectKey: str, userId: str):
projectId = projects.get_internal_project_id(projectKey)
if projectId is None:
return {"errors": ["invalid projectKey"]}
data = {"action": "delete_user_data",
"reference_id": userId,
"description": f"Delete user sessions of userId = {userId}",
@ -61,6 +68,8 @@ def schedule_to_delete_user_data(projectKey: str, userId: str):
@app_apikey.get('/v1/{projectKey}/jobs', tags=["api"])
def get_jobs(projectKey: str):
projectId = projects.get_internal_project_id(projectKey)
if projectId is None:
return {"errors": ["invalid projectKey"]}
return {
'data': jobs.get_all(project_id=projectId)
}

View file

@ -12,7 +12,7 @@ def attribute_to_camel_case(snake_str):
def transform_email(email: str) -> str:
return email.lower() if isinstance(email, str) else email
return email.lower().strip() if isinstance(email, str) else email
class _Grecaptcha(BaseModel):
@ -37,16 +37,11 @@ class UserSignupSchema(UserLoginSchema):
class EditUserSchema(BaseModel):
name: Optional[str] = Field(None)
email: Optional[EmailStr] = Field(None)
admin: Optional[bool] = Field(False)
appearance: Optional[dict] = Field({})
admin: Optional[bool] = Field(None)
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditUserAppearanceSchema(BaseModel):
appearance: dict = Field(...)
class ForgetPasswordPayloadSchema(_Grecaptcha):
email: EmailStr = Field(...)
@ -132,13 +127,11 @@ class CreateMemberSchema(BaseModel):
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditMemberSchema(BaseModel):
class EditMemberSchema(EditUserSchema):
name: str = Field(...)
email: EmailStr = Field(...)
admin: bool = Field(False)
_transform_email = validator('email', pre=True, allow_reuse=True)(transform_email)
class EditPasswordByInvitationSchema(BaseModel):
invitation: str = Field(...)
@ -486,6 +479,10 @@ class IssueType(str, Enum):
js_exception = 'js_exception'
class MetricFormatType(str, Enum):
session_count = 'sessionCount'
class __MixedSearchFilter(BaseModel):
is_event: bool = Field(...)
@ -618,17 +615,28 @@ class _PaginatedSchema(BaseModel):
page: int = Field(default=1, gt=0)
class SortOrderType(str, Enum):
asc = "ASC"
desc = "DESC"
class SessionsSearchPayloadSchema(_PaginatedSchema):
events: List[_SessionSearchEventSchema] = Field([])
filters: List[SessionSearchFilterSchema] = Field([])
startDate: int = Field(None)
endDate: int = Field(None)
sort: str = Field(default="startTs")
order: Literal["asc", "desc"] = Field(default="desc")
order: SortOrderType = Field(default=SortOrderType.desc)
events_order: Optional[SearchEventOrder] = Field(default=SearchEventOrder._then)
group_by_user: bool = Field(default=False)
bookmarked: bool = Field(default=False)
@root_validator(pre=True)
def transform_order(cls, values):
if values.get("order") is not None:
values["order"] = values["order"].upper()
return values
class Config:
alias_generator = attribute_to_camel_case
@ -757,8 +765,7 @@ class MobileSignPayloadSchema(BaseModel):
keys: List[str] = Field(...)
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema):
# class CustomMetricSeriesFilterSchema(SessionsSearchPayloadSchema):
class CustomMetricSeriesFilterSchema(FlatSessionsSearchPayloadSchema, SearchErrorsSchema):
startDate: Optional[int] = Field(None)
endDate: Optional[int] = Field(None)
sort: Optional[str] = Field(None)
@ -790,6 +797,8 @@ class MetricTableViewType(str, Enum):
class MetricType(str, Enum):
timeseries = "timeseries"
table = "table"
predefined = "predefined"
funnel = "funnel"
class TableMetricOfType(str, Enum):
@ -800,6 +809,8 @@ class TableMetricOfType(str, Enum):
user_id = FilterType.user_id.value
issues = FilterType.issue.value
visited_url = EventType.location.value
sessions = "SESSIONS"
errors = IssueType.js_exception.value
class TimeseriesMetricOfType(str, Enum):
@ -815,7 +826,7 @@ class CustomMetricSessionsPayloadSchema(FlatSessionsSearch, _PaginatedSchema):
alias_generator = attribute_to_camel_case
class CustomMetricChartPayloadSchema(CustomMetricSessionsPayloadSchema):
class CustomMetricChartPayloadSchema(CustomMetricSessionsPayloadSchema, _PaginatedSchema):
density: int = Field(7)
class Config:
@ -830,7 +841,7 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
metric_type: MetricType = Field(MetricType.timeseries)
metric_of: Union[TableMetricOfType, TimeseriesMetricOfType] = Field(TableMetricOfType.user_id)
metric_value: List[IssueType] = Field([])
metric_format: Optional[str] = Field(None)
metric_format: Optional[MetricFormatType] = Field(None)
# metricFraction: float = Field(None, gt=0, lt=1)
# This is used to handle wrong values sent by the UI
@ -863,8 +874,23 @@ class TryCustomMetricsPayloadSchema(CustomMetricChartPayloadSchema):
alias_generator = attribute_to_camel_case
class CustomMetricsConfigSchema(BaseModel):
col: Optional[int] = Field(default=2)
row: Optional[int] = Field(default=2)
position: Optional[int] = Field(default=0)
class CreateCustomMetricsSchema(TryCustomMetricsPayloadSchema):
series: List[CustomMetricCreateSeriesSchema] = Field(..., min_items=1)
config: CustomMetricsConfigSchema = Field(default=CustomMetricsConfigSchema())
@root_validator(pre=True)
def transform_series(cls, values):
if values.get("series") is not None and len(values["series"]) > 1 and values.get(
"metric_type") == MetricType.funnel.value:
values["series"] = [values["series"][0]]
return values
class CustomMetricUpdateSeriesSchema(CustomMetricCreateSeriesSchema):
@ -888,6 +914,7 @@ class SavedSearchSchema(FunnelSchema):
class CreateDashboardSchema(BaseModel):
name: str = Field(..., min_length=1)
description: Optional[str] = Field(default='')
is_public: bool = Field(default=False)
is_pinned: bool = Field(default=False)
metrics: Optional[List[int]] = Field(default=[])
@ -966,6 +993,7 @@ class TemplatePredefinedKeys(str, Enum):
class TemplatePredefinedUnits(str, Enum):
millisecond = "ms"
second = "s"
minute = "min"
memory = "mb"
frame = "f/s"
@ -980,3 +1008,62 @@ class CustomMetricAndTemplate(BaseModel):
class Config:
alias_generator = attribute_to_camel_case
class LiveFilterType(str, Enum):
user_os = FilterType.user_os.value
user_browser = FilterType.user_browser.value
user_device = FilterType.user_device.value
user_country = FilterType.user_country.value
user_id = FilterType.user_id.value
user_anonymous_id = FilterType.user_anonymous_id.value
rev_id = FilterType.rev_id.value
platform = FilterType.platform.value
page_title = "PAGETITLE"
session_id = "SESSIONID"
metadata = "METADATA"
user_UUID = "USERUUID"
tracker_version = "TRACKERVERSION"
user_browser_version = "USERBROWSERVERSION"
user_device_type = "USERDEVICETYPE",
class LiveSessionSearchFilterSchema(BaseModel):
value: Union[List[str], str] = Field(...)
type: LiveFilterType = Field(...)
source: Optional[str] = Field(None)
@root_validator
def validator(cls, values):
if values.get("type") is not None and values["type"] == LiveFilterType.metadata.value:
assert values.get("source") is not None, "source should not be null for METADATA type"
assert len(values.get("source")) > 0, "source should not be empty for METADATA type"
return values
class LiveSessionsSearchPayloadSchema(_PaginatedSchema):
filters: List[LiveSessionSearchFilterSchema] = Field([])
sort: Union[LiveFilterType, str] = Field(default="TIMESTAMP")
order: SortOrderType = Field(default=SortOrderType.desc)
@root_validator(pre=True)
def transform(cls, values):
if values.get("order") is not None:
values["order"] = values["order"].upper()
if values.get("filters") is not None:
i = 0
while i < len(values["filters"]):
if values["filters"][i]["value"] is None or len(values["filters"][i]["value"]) == 0:
del values["filters"][i]
else:
i += 1
for i in values["filters"]:
if i.get("type") == LiveFilterType.platform.value:
i["type"] = LiveFilterType.user_device_type.value
if values.get("sort") is not None:
if values["sort"].lower() == "startts":
values["sort"] = "TIMESTAMP"
return values
class Config:
alias_generator = attribute_to_camel_case

6
backend/.dockerignore Normal file
View file

@ -0,0 +1,6 @@
# ignore .git and .cache folders
.git
.cache
**/build.sh
**/build_*.sh
**/*deploy.sh

View file

@ -10,13 +10,15 @@ RUN go mod download
FROM prepare AS build
COPY cmd cmd
COPY pkg pkg
COPY services services
COPY internal internal
ARG SERVICE_NAME
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/services/$SERVICE_NAME
RUN CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o service -tags musl openreplay/backend/cmd/$SERVICE_NAME
FROM alpine
FROM alpine AS entrypoint
RUN apk add --no-cache ca-certificates
ENV TZ=UTC \
@ -25,10 +27,9 @@ ENV TZ=UTC \
MAXMINDDB_FILE=/root/geoip.mmdb \
UAPARSER_FILE=/root/regexes.yaml \
HTTP_PORT=80 \
BEACON_SIZE_LIMIT=7000000 \
KAFKA_USE_SSL=true \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=3000 \
REDIS_STREAMS_MAX_LEN=10000 \
TOPIC_RAW_WEB=raw \
TOPIC_RAW_IOS=raw-ios \
TOPIC_CACHE=cache \
@ -39,13 +40,25 @@ ENV TZ=UTC \
GROUP_DB=db \
GROUP_ENDER=ender \
GROUP_CACHE=cache \
GROUP_HEURISTICS=heuristics \
AWS_REGION_WEB=eu-central-1 \
AWS_REGION_IOS=eu-west-1 \
AWS_REGION_ASSETS=eu-central-1 \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
ASSETS_HEADERS="{ \"Cookie\": \"ABv=3;\" }" \
FS_CLEAN_HRS=72 \
LOG_QUEUE_STATS_INTERVAL_SEC=60
FILE_SPLIT_SIZE=500000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60 \
DB_BATCH_QUEUE_LIMIT=20 \
DB_BATCH_SIZE_LIMIT=10000000 \
PARTITIONS_NUMBER=16 \
QUEUE_MESSAGE_SIZE_LIMIT=1048576 \
BEACON_SIZE_LIMIT=1000000 \
USE_FAILOVER=false \
GROUP_STORAGE_FAILOVER=failover \
TOPIC_STORAGE_FAILOVER=storage-failover
ARG SERVICE_NAME

View file

@ -1,4 +1,4 @@
FROM golang:1.13-alpine3.10 AS prepare
FROM golang:1.18-alpine3.15 AS prepare
RUN apk add --no-cache git openssh openssl-dev pkgconf gcc g++ make libc-dev bash
@ -10,13 +10,13 @@ RUN go mod download
FROM prepare AS build
COPY cmd cmd
COPY pkg pkg
COPY services services
COPY internal internal
RUN for name in alerts assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/services/$name; done
RUN for name in assets db ender http integrations sink storage;do CGO_ENABLED=1 GOOS=linux GOARCH=amd64 go build -o bin/$name -tags musl openreplay/backend/cmd/$name; done
FROM alpine
FROM alpine AS entrypoint
#FROM pygmy/alpine-tini:latest
RUN apk add --no-cache ca-certificates
@ -26,8 +26,9 @@ ENV TZ=UTC \
MAXMINDDB_FILE=/root/geoip.mmdb \
UAPARSER_FILE=/root/regexes.yaml \
HTTP_PORT=80 \
BEACON_SIZE_LIMIT=1000000 \
BEACON_SIZE_LIMIT=7000000 \
KAFKA_USE_SSL=true \
KAFKA_MAX_POLL_INTERVAL_MS=400000 \
REDIS_STREAMS_MAX_LEN=3000 \
TOPIC_RAW_WEB=raw \
TOPIC_RAW_IOS=raw-ios \
@ -42,10 +43,11 @@ ENV TZ=UTC \
AWS_REGION_WEB=eu-central-1 \
AWS_REGION_IOS=eu-west-1 \
AWS_REGION_ASSETS=eu-central-1 \
CACHE_ASSETS=false \
CACHE_ASSETS=true \
ASSETS_SIZE_LIMIT=6291456 \
FS_CLEAN_HRS=12
FS_CLEAN_HRS=12 \
FILE_SPLIT_SIZE=300000 \
LOG_QUEUE_STATS_INTERVAL_SEC=60
RUN mkdir $FS_DIR
#VOLUME [ $FS_DIR ] # Uncomment in case of using Bind mount.

28
backend/build.sh Normal file → Executable file
View file

@ -13,9 +13,19 @@ ee="false"
check_prereq() {
which docker || {
echo "Docker not installed, please install docker."
exit=1
exit 1
}
[[ exit -eq 1 ]] && exit 1
return
}
function build_service() {
image="$1"
echo "BUILDING $image"
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --platform linux/amd64 --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
return
}
function build_api(){
@ -25,21 +35,15 @@ function build_api(){
ee="true"
}
[[ $2 != "" ]] && {
image="$2"
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
build_service $2
return
}
for image in $(ls services);
for image in $(ls cmd);
do
docker build -t ${DOCKER_REPO:-'local'}/$image:${git_sha1} --build-arg SERVICE_NAME=$image .
[[ $PUSH_IMAGE -eq 1 ]] && {
docker push ${DOCKER_REPO:-'local'}/$image:${git_sha1}
}
build_service $image
echo "::set-output name=image::${DOCKER_REPO:-'local'}/$image:${git_sha1}"
done
echo "backend build completed"
}
check_prereq

View file

@ -1,45 +1,49 @@
package main
import (
"context"
"log"
"time"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"syscall"
"time"
"openreplay/backend/pkg/env"
"openreplay/backend/internal/assets"
"openreplay/backend/internal/assets/cacher"
config "openreplay/backend/internal/config/assets"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/services/assets/cacher"
)
func main() {
metrics := monitoring.New("assets")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
GROUP_CACHE := env.String("GROUP_CACHE")
TOPIC_CACHE := env.String("TOPIC_CACHE")
cfg := config.New()
cacher := cacher.NewCacher(
env.String("AWS_REGION"),
env.String("S3_BUCKET_ASSETS"),
env.String("ASSETS_ORIGIN"),
env.Int("ASSETS_SIZE_LIMIT"),
)
cacher := cacher.NewCacher(cfg, metrics)
totalAssets, err := metrics.RegisterCounter("assets_total")
if err != nil {
log.Printf("can't create assets_total metric: %s", err)
}
consumer := queue.NewMessageConsumer(
GROUP_CACHE,
[]string{TOPIC_CACHE},
cfg.GroupCache,
[]string{cfg.TopicCache},
func(sessionID uint64, message messages.Message, e *types.Meta) {
switch msg := message.(type) {
case *messages.AssetCache:
cacher.CacheURL(sessionID, msg.URL)
totalAssets.Add(context.Background(), 1)
case *messages.ErrorEvent:
if msg.Source != "js_exception" {
return
}
sourceList, err := extractJSExceptionSources(&msg.Payload)
sourceList, err := assets.ExtractJSExceptionSources(&msg.Payload)
if err != nil {
log.Printf("Error on source extraction: %v", err)
return
@ -50,14 +54,15 @@ func main() {
}
},
true,
cfg.MessageSizeLimit,
)
tick := time.Tick(20 * time.Minute)
log.Printf("Cacher service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
log.Printf("Cacher service started\n")
tick := time.Tick(20 * time.Minute)
for {
select {
case sig := <-sigchan:
@ -66,6 +71,7 @@ func main() {
os.Exit(0)
case err := <-cacher.Errors:
log.Printf("Error while caching: %v", err)
// TODO: notify user
case <-tick:
cacher.UpdateTimeouts()
default:

141
backend/cmd/db/main.go Normal file
View file

@ -0,0 +1,141 @@
package main
import (
"errors"
"log"
"openreplay/backend/internal/config/db"
"openreplay/backend/internal/db/datasaver"
"openreplay/backend/pkg/handlers"
custom2 "openreplay/backend/pkg/handlers/custom"
"openreplay/backend/pkg/monitoring"
"openreplay/backend/pkg/sessions"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
func main() {
metrics := monitoring.New("db")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := db.New()
// Init database
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, cfg.BatchQueueLimit, cfg.BatchSizeLimit, metrics), cfg.ProjectExpirationTimeoutMs)
defer pg.Close()
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor {
return []handlers.MessageProcessor{
&custom2.EventMapper{},
custom2.NewInputEventBuilder(),
custom2.NewPageEventBuilder(),
}
}
// Create handler's aggregator
builderMap := sessions.NewBuilderMap(handlersFabric)
// Init modules
saver := datasaver.New(pg)
saver.InitStats()
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
// Handler logic
handler := func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
// Just save session data into db without additional checks
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
session, err := pg.GetSession(sessionID)
if session == nil {
if err != nil && !errors.Is(err, cache.NilSessionInCacheError) {
log.Printf("Error on session retrieving from cache: %v, SessionID: %v, Message: %v", err, sessionID, msg)
}
return
}
// Save statistics to db
err = saver.InsertStats(session, msg)
if err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message: %v", err, session, msg)
}
// Handle heuristics and save to temporary queue in memory
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
// Process saved heuristics messages as usual messages above in the code
builderMap.IterateSessionReadyMessages(sessionID, func(msg messages.Message) {
// TODO: DRY code (carefully with the return statement logic)
if err := saver.InsertMessage(sessionID, msg); err != nil {
if !postgres.IsPkeyViolation(err) {
log.Printf("Message Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
return
}
if err := saver.InsertStats(session, msg); err != nil {
log.Printf("Stats Insertion Error %v; Session: %v, Message %v", err, session, msg)
}
})
}
// Init consumer
consumer := queue.NewMessageConsumer(
cfg.GroupDB,
[]string{
cfg.TopicRawWeb,
cfg.TopicAnalytics,
},
handler,
false,
cfg.MessageSizeLimit,
)
log.Printf("Db service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
commitTick := time.Tick(cfg.CommitBatchTimeout)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
consumer.Close()
os.Exit(0)
case <-commitTick:
// Send collected batches to db
pg.CommitBatches()
if err := saver.CommitStats(); err != nil {
log.Printf("Error on stats commit: %v", err)
}
// TODO?: separate stats & regular messages
if err := consumer.Commit(); err != nil {
log.Printf("Error on consumer commit: %v", err)
}
default:
// Handle new message from queue
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err) // TODO: is always fatal?
}
}
}
}

105
backend/cmd/ender/main.go Normal file
View file

@ -0,0 +1,105 @@
package main
import (
"log"
"openreplay/backend/internal/config/ender"
"openreplay/backend/internal/sessionender"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/monitoring"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/intervals"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
)
//
func main() {
metrics := monitoring.New("ender")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
// Load service configuration
cfg := ender.New()
pg := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), cfg.ProjectExpirationTimeoutMs)
defer pg.Close()
// Init all modules
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
sessions, err := sessionender.New(metrics, intervals.EVENTS_SESSION_END_TIMEOUT, cfg.PartitionsNumber)
if err != nil {
log.Printf("can't init ender service: %s", err)
return
}
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
consumer := queue.NewMessageConsumer(
cfg.GroupEnder,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
switch msg.(type) {
case *messages.SessionStart, *messages.SessionEnd:
// Skip several message types
return
}
// Test debug
if msg.Meta().Timestamp == 0 {
log.Printf("ZERO TS, sessID: %d, msgType: %d", sessionID, msg.TypeID())
}
statsLogger.Collect(sessionID, meta)
sessions.UpdateSession(sessionID, meta.Timestamp, msg.Meta().Timestamp)
},
false,
cfg.MessageSizeLimit,
)
log.Printf("Ender service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
producer.Close(cfg.ProducerTimeout)
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
log.Printf("can't commit messages with offset: %s", err)
}
consumer.Close()
os.Exit(0)
case <-tick:
// Find ended sessions and send notification to other services
sessions.HandleEndedSessions(func(sessionID uint64, timestamp int64) bool {
msg := &messages.SessionEnd{Timestamp: uint64(timestamp)}
if err := pg.InsertSessionEnd(sessionID, msg.Timestamp); err != nil {
log.Printf("can't save sessionEnd to database, sessID: %d, err: %s", sessionID, err)
return false
}
if err := producer.Produce(cfg.TopicRawWeb, sessionID, messages.Encode(msg)); err != nil {
log.Printf("can't send sessionEnd to topic: %s; sessID: %d", err, sessionID)
return false
}
return true
})
producer.Flush(cfg.ProducerTimeout)
if err := consumer.CommitBack(intervals.EVENTS_BACK_COMMIT_GAP); err != nil {
log.Printf("can't commit messages with offset: %s", err)
}
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
}
}
}
}

View file

@ -0,0 +1,92 @@
package main
import (
"log"
"openreplay/backend/internal/config/heuristics"
"openreplay/backend/pkg/handlers"
web2 "openreplay/backend/pkg/handlers/web"
"openreplay/backend/pkg/intervals"
logger "openreplay/backend/pkg/log"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/sessions"
"os"
"os/signal"
"syscall"
"time"
)
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
// Load service configuration
cfg := heuristics.New()
// HandlersFabric returns the list of message handlers we want to be applied to each incoming message.
handlersFabric := func() []handlers.MessageProcessor {
return []handlers.MessageProcessor{
// web handlers
&web2.ClickRageDetector{},
&web2.CpuIssueDetector{},
&web2.DeadClickDetector{},
&web2.MemoryIssueDetector{},
&web2.NetworkIssueDetector{},
&web2.PerformanceAggregator{},
// iOS's handlers
//&ios2.AppNotResponding{},
//&ios2.ClickRageDetector{},
//&ios2.PerformanceAggregator{},
// Other handlers (you can add your custom handlers here)
//&custom.CustomHandler{},
}
}
// Create handler's aggregator
builderMap := sessions.NewBuilderMap(handlersFabric)
// Init logger
statsLogger := logger.NewQueueStats(cfg.LoggerTimeout)
// Init producer and consumer for data bus
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
consumer := queue.NewMessageConsumer(
cfg.GroupHeuristics,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
statsLogger.Collect(sessionID, meta)
builderMap.HandleMessage(sessionID, msg, msg.Meta().Index)
},
false,
cfg.MessageSizeLimit,
)
log.Printf("Heuristics service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(intervals.EVENTS_COMMIT_INTERVAL * time.Millisecond)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
producer.Close(cfg.ProducerTimeout)
consumer.Commit()
consumer.Close()
os.Exit(0)
case <-tick:
builderMap.IterateReadyMessages(func(sessionID uint64, readyMsg messages.Message) {
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(readyMsg))
})
producer.Flush(cfg.ProducerTimeout)
consumer.Commit()
default:
if err := consumer.ConsumeNext(); err != nil {
log.Fatalf("Error on consuming: %v", err)
}
}
}
}

64
backend/cmd/http/main.go Normal file
View file

@ -0,0 +1,64 @@
package main
import (
"log"
"openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/router"
"openreplay/backend/internal/http/server"
"openreplay/backend/internal/http/services"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"syscall"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/queue"
)
func main() {
metrics := monitoring.New("http")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := http.New()
// Connect to queue
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000)
// Connect to database
dbConn := cache.NewPGCache(postgres.NewConn(cfg.Postgres, 0, 0, metrics), 1000*60*20)
defer dbConn.Close()
// Build all services
services := services.New(cfg, producer, dbConn)
// Init server's routes
router, err := router.NewRouter(cfg, services, metrics)
if err != nil {
log.Fatalf("failed while creating engine: %s", err)
}
// Init server
server, err := server.New(router.GetHandler(), cfg.HTTPHost, cfg.HTTPPort, cfg.HTTPTimeout)
if err != nil {
log.Fatalf("failed while creating server: %s", err)
}
// Run server
go func() {
if err := server.Start(); err != nil {
log.Fatalf("Server error: %v\n", err)
}
}()
log.Printf("Server successfully started on port %v\n", cfg.HTTPPort)
// Wait stop signal to shut down server gracefully
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
<-sigchan
log.Printf("Shutting down the server\n")
server.Stop()
}

View file

@ -2,6 +2,9 @@ package main
import (
"log"
config "openreplay/backend/internal/config/integrations"
"openreplay/backend/internal/integrations/clientManager"
"openreplay/backend/pkg/monitoring"
"time"
"os"
@ -9,23 +12,24 @@ import (
"syscall"
"openreplay/backend/pkg/db/postgres"
"openreplay/backend/pkg/env"
"openreplay/backend/pkg/intervals"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/token"
"openreplay/backend/services/integrations/clientManager"
)
//
func main() {
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
TOPIC_RAW_WEB := env.String("TOPIC_RAW_WEB")
POSTGRES_STRING := env.String("POSTGRES_STRING")
metrics := monitoring.New("integrations")
pg := postgres.NewConn(POSTGRES_STRING)
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New()
pg := postgres.NewConn(cfg.PostgresURI, 0, 0, metrics)
defer pg.Close()
tokenizer := token.NewTokenizer(env.String("TOKEN_SECRET"))
tokenizer := token.NewTokenizer(cfg.TokenSecret)
manager := clientManager.NewManager()
@ -42,10 +46,10 @@ func main() {
}
})
producer := queue.NewProducer()
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(15000)
listener, err := postgres.NewIntegrationsListener(POSTGRES_STRING)
listener, err := postgres.NewIntegrationsListener(cfg.PostgresURI)
if err != nil {
log.Printf("Postgres listener error: %v\n", err)
log.Fatalf("Postgres listener error")
@ -70,7 +74,7 @@ func main() {
log.Printf("Requesting all...\n")
manager.RequestAll()
case event := <-manager.Events:
log.Printf("New integration event: %+v\n", *event.RawErrorEvent)
log.Printf("New integration event: %+v\n", *event.IntegrationEvent)
sessionID := event.SessionID
if sessionID == 0 {
sessData, err := tokenizer.Parse(event.Token)
@ -80,8 +84,7 @@ func main() {
}
sessionID = sessData.ID
}
// TODO: send to ready-events topic. Otherwise it have to go through the events worker.
producer.Produce(TOPIC_RAW_WEB, sessionID, messages.Encode(event.RawErrorEvent))
producer.Produce(cfg.TopicAnalytics, sessionID, messages.Encode(event.IntegrationEvent))
case err := <-manager.Errors:
log.Printf("Integration error: %v\n", err)
case i := <-manager.RequestDataUpdates:

141
backend/cmd/sink/main.go Normal file
View file

@ -0,0 +1,141 @@
package main
import (
"context"
"encoding/binary"
"log"
"openreplay/backend/internal/sink/assetscache"
"openreplay/backend/internal/sink/oswriter"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/monitoring"
"time"
"os"
"os/signal"
"syscall"
"openreplay/backend/internal/config/sink"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/url/assets"
)
func main() {
metrics := monitoring.New("sink")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := sink.New()
if _, err := os.Stat(cfg.FsDir); os.IsNotExist(err) {
log.Fatalf("%v doesn't exist. %v", cfg.FsDir, err)
}
writer := oswriter.NewWriter(cfg.FsUlimit, cfg.FsDir)
producer := queue.NewProducer(cfg.MessageSizeLimit, true)
defer producer.Close(cfg.ProducerCloseTimeout)
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
assetMessageHandler := assetscache.New(cfg, rewriter, producer)
counter := storage.NewLogCounter()
totalMessages, err := metrics.RegisterCounter("messages_total")
if err != nil {
log.Printf("can't create messages_total metric: %s", err)
}
savedMessages, err := metrics.RegisterCounter("messages_saved")
if err != nil {
log.Printf("can't create messages_saved metric: %s", err)
}
messageSize, err := metrics.RegisterHistogram("messages_size")
if err != nil {
log.Printf("can't create messages_size metric: %s", err)
}
consumer := queue.NewMessageConsumer(
cfg.GroupSink,
[]string{
cfg.TopicRawWeb,
},
func(sessionID uint64, message Message, _ *types.Meta) {
// Process assets
message = assetMessageHandler.ParseAssets(sessionID, message)
totalMessages.Add(context.Background(), 1)
// Filter message
typeID := message.TypeID()
// Send SessionEnd trigger to storage service
switch message.(type) {
case *SessionEnd:
if err := producer.Produce(cfg.TopicTrigger, sessionID, Encode(message)); err != nil {
log.Printf("can't send SessionEnd to trigger topic: %s; sessID: %d", err, sessionID)
}
return
}
if !IsReplayerType(typeID) {
return
}
// If message timestamp is empty, use at least ts of session start
ts := message.Meta().Timestamp
if ts == 0 {
log.Printf("zero ts; sessID: %d, msg: %+v", sessionID, message)
} else {
// Log ts of last processed message
counter.Update(sessionID, time.UnixMilli(ts))
}
value := message.Encode()
var data []byte
if IsIOSType(typeID) {
data = value
} else {
data = make([]byte, len(value)+8)
copy(data[8:], value[:])
binary.LittleEndian.PutUint64(data[0:], message.Meta().Index)
}
if err := writer.Write(sessionID, data); err != nil {
log.Printf("Writer error: %v\n", err)
}
messageSize.Record(context.Background(), float64(len(data)))
savedMessages.Add(context.Background(), 1)
},
false,
cfg.MessageSizeLimit,
)
log.Printf("Sink service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
tick := time.Tick(30 * time.Second)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
consumer.Close()
os.Exit(0)
case <-tick:
if err := writer.SyncAll(); err != nil {
log.Fatalf("Sync error: %v\n", err)
}
counter.Print()
if err := consumer.Commit(); err != nil {
log.Printf("can't commit messages: %s", err)
}
default:
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err)
}
}
}
}

View file

@ -0,0 +1,82 @@
package main
import (
"log"
"openreplay/backend/pkg/failover"
"openreplay/backend/pkg/monitoring"
"os"
"os/signal"
"strconv"
"syscall"
"time"
config "openreplay/backend/internal/config/storage"
"openreplay/backend/internal/storage"
"openreplay/backend/pkg/messages"
"openreplay/backend/pkg/queue"
"openreplay/backend/pkg/queue/types"
s3storage "openreplay/backend/pkg/storage"
)
func main() {
metrics := monitoring.New("storage")
log.SetFlags(log.LstdFlags | log.LUTC | log.Llongfile)
cfg := config.New()
s3 := s3storage.NewS3(cfg.S3Region, cfg.S3Bucket)
srv, err := storage.New(cfg, s3, metrics)
if err != nil {
log.Printf("can't init storage service: %s", err)
return
}
counter := storage.NewLogCounter()
sessionFinder, err := failover.NewSessionFinder(cfg, srv)
if err != nil {
log.Fatalf("can't init sessionFinder module: %s", err)
}
consumer := queue.NewMessageConsumer(
cfg.GroupStorage,
[]string{
cfg.TopicTrigger,
},
func(sessionID uint64, msg messages.Message, meta *types.Meta) {
switch m := msg.(type) {
case *messages.SessionEnd:
if err := srv.UploadKey(strconv.FormatUint(sessionID, 10), 5); err != nil {
sessionFinder.Find(sessionID, m.Timestamp)
}
// Log timestamp of last processed session
counter.Update(sessionID, time.UnixMilli(meta.Timestamp))
}
},
true,
cfg.MessageSizeLimit,
)
log.Printf("Storage service started\n")
sigchan := make(chan os.Signal, 1)
signal.Notify(sigchan, syscall.SIGINT, syscall.SIGTERM)
counterTick := time.Tick(time.Second * 30)
for {
select {
case sig := <-sigchan:
log.Printf("Caught signal %v: terminating\n", sig)
sessionFinder.Stop()
consumer.Close()
os.Exit(0)
case <-counterTick:
go counter.Print()
default:
err := consumer.ConsumeNext()
if err != nil {
log.Fatalf("Error on consumption: %v", err)
}
}
}
}

View file

@ -4,7 +4,7 @@ go 1.18
require (
cloud.google.com/go/logging v1.4.2
github.com/ClickHouse/clickhouse-go v1.4.3
github.com/ClickHouse/clickhouse-go v1.5.4
github.com/aws/aws-sdk-go v1.35.23
github.com/btcsuite/btcutil v1.0.2
github.com/elastic/go-elasticsearch/v7 v7.13.1
@ -17,21 +17,33 @@ require (
github.com/klauspost/pgzip v1.2.5
github.com/oschwald/maxminddb-golang v1.7.0
github.com/pkg/errors v0.9.1
github.com/sethvargo/go-envconfig v0.7.0
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420
google.golang.org/api v0.50.0
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0
go.opentelemetry.io/otel v1.7.0
go.opentelemetry.io/otel/exporters/prometheus v0.30.0
go.opentelemetry.io/otel/metric v0.30.0
go.opentelemetry.io/otel/sdk/metric v0.30.0
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2
google.golang.org/api v0.81.0
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2
)
require (
cloud.google.com/go v0.84.0 // indirect
cloud.google.com/go v0.100.2 // indirect
cloud.google.com/go/compute v1.6.1 // indirect
cloud.google.com/go/iam v0.3.0 // indirect
cloud.google.com/go/storage v1.14.0 // indirect
github.com/beorn7/perks v1.0.1 // indirect
github.com/cespare/xxhash/v2 v2.1.2 // indirect
github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58 // indirect
github.com/confluentinc/confluent-kafka-go v1.7.0 // indirect
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e // indirect
github.com/confluentinc/confluent-kafka-go v1.9.0 // indirect
github.com/go-logr/logr v1.2.3 // indirect
github.com/go-logr/stdr v1.2.2 // indirect
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da // indirect
github.com/golang/protobuf v1.5.2 // indirect
github.com/google/go-cmp v0.5.6 // indirect
github.com/googleapis/gax-go/v2 v2.0.5 // indirect
github.com/google/go-cmp v0.5.8 // indirect
github.com/googleapis/gax-go/v2 v2.4.0 // indirect
github.com/jackc/chunkreader/v2 v2.0.1 // indirect
github.com/jackc/pgio v1.0.0 // indirect
github.com/jackc/pgpassfile v1.0.0 // indirect
@ -40,21 +52,26 @@ require (
github.com/jackc/pgtype v1.3.0 // indirect
github.com/jackc/puddle v1.1.0 // indirect
github.com/jmespath/go-jmespath v0.4.0 // indirect
github.com/jstemmer/go-junit-report v0.9.1 // indirect
github.com/klauspost/compress v1.11.9 // indirect
github.com/kr/pretty v0.3.0 // indirect
github.com/matttproud/golang_protobuf_extensions v1.0.1 // indirect
github.com/prometheus/client_golang v1.12.1 // indirect
github.com/prometheus/client_model v0.2.0 // indirect
github.com/prometheus/common v0.32.1 // indirect
github.com/prometheus/procfs v0.7.3 // indirect
go.opencensus.io v0.23.0 // indirect
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 // indirect
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 // indirect
golang.org/x/mod v0.4.2 // indirect
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 // indirect
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c // indirect
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 // indirect
golang.org/x/text v0.3.6 // indirect
golang.org/x/tools v0.1.4 // indirect
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 // indirect
go.opentelemetry.io/otel/sdk v1.7.0 // indirect
go.opentelemetry.io/otel/trace v1.7.0 // indirect
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 // indirect
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 // indirect
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 // indirect
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a // indirect
golang.org/x/text v0.3.7 // indirect
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df // indirect
google.golang.org/appengine v1.6.7 // indirect
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 // indirect
google.golang.org/grpc v1.38.0 // indirect
google.golang.org/protobuf v1.26.0 // indirect
gopkg.in/yaml.v2 v2.2.8 // indirect
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd // indirect
google.golang.org/grpc v1.46.2 // indirect
google.golang.org/protobuf v1.28.0 // indirect
gopkg.in/yaml.v2 v2.4.0 // indirect
gopkg.in/yaml.v3 v3.0.0 // indirect
)

View file

@ -15,20 +15,36 @@ cloud.google.com/go v0.62.0/go.mod h1:jmCYTdRCQuc1PHIIJ/maLInMho30T/Y0M4hTdTShOY
cloud.google.com/go v0.65.0/go.mod h1:O5N8zS7uWy9vkA9vayVHs65eM1ubvY4h553ofrNHObY=
cloud.google.com/go v0.72.0/go.mod h1:M+5Vjvlc2wnp6tjzE102Dw08nGShTscUx2nZMufOKPI=
cloud.google.com/go v0.74.0/go.mod h1:VV1xSbzvo+9QJOxLDaJfTjx5e+MePCpCWwvftOeQmWk=
cloud.google.com/go v0.75.0/go.mod h1:VGuuCn7PG0dwsd5XPVm2Mm3wlh3EL55/79EKB6hlPTY=
cloud.google.com/go v0.78.0/go.mod h1:QjdrLG0uq+YwhjoVOLsS1t7TW8fs36kLs4XO5R5ECHg=
cloud.google.com/go v0.79.0/go.mod h1:3bzgcEeQlzbuEAYu4mrWhKqWjmpprinYgKJLgKHnbb8=
cloud.google.com/go v0.81.0/go.mod h1:mk/AM35KwGk/Nm2YSeZbxXdrNK3KZOYHmLkOqC2V6E0=
cloud.google.com/go v0.83.0/go.mod h1:Z7MJUsANfY0pYPdw0lbnivPx4/vhy/e2FEkSkF7vAVY=
cloud.google.com/go v0.84.0 h1:hVhK90DwCdOAYGME/FJd9vNIZye9HBR6Yy3fu4js3N8=
cloud.google.com/go v0.84.0/go.mod h1:RazrYuxIK6Kb7YrzzhPoLmCVzl7Sup4NrbKPg8KHSUM=
cloud.google.com/go v0.87.0/go.mod h1:TpDYlFy7vuLzZMMZ+B6iRiELaY7z/gJPaqbMx6mlWcY=
cloud.google.com/go v0.90.0/go.mod h1:kRX0mNRHe0e2rC6oNakvwQqzyDmg57xJ+SZU1eT2aDQ=
cloud.google.com/go v0.93.3/go.mod h1:8utlLll2EF5XMAV15woO4lSbWQlk8rer9aLOfLh7+YI=
cloud.google.com/go v0.94.1/go.mod h1:qAlAugsXlC+JWO+Bke5vCtc9ONxjQT3drlTTnAplMW4=
cloud.google.com/go v0.97.0/go.mod h1:GF7l59pYBVlXQIBLx3a761cZ41F9bBH3JUlihCt2Udc=
cloud.google.com/go v0.99.0/go.mod h1:w0Xx2nLzqWJPuozYQX+hFfCSI8WioryfRDzkoI/Y2ZA=
cloud.google.com/go v0.100.2 h1:t9Iw5QH5v4XtlEQaCtUY7x6sCABps8sW0acw7e2WQ6Y=
cloud.google.com/go v0.100.2/go.mod h1:4Xra9TjzAeYHrl5+oeLlzbM2k3mjVhZh4UqTZ//w99A=
cloud.google.com/go/bigquery v1.0.1/go.mod h1:i/xbL2UlR5RvWAURpBYZTtm/cXjCha9lbfbpx4poX+o=
cloud.google.com/go/bigquery v1.3.0/go.mod h1:PjpwJnslEMmckchkHFfq+HTD2DmtT67aNFKH1/VBDHE=
cloud.google.com/go/bigquery v1.4.0/go.mod h1:S8dzgnTigyfTmLBfrtrhyYhwRxG72rYxvftPBK2Dvzc=
cloud.google.com/go/bigquery v1.5.0/go.mod h1:snEHRnqQbz117VIFhE8bmtwIDY80NLUZUMb4Nv6dBIg=
cloud.google.com/go/bigquery v1.7.0/go.mod h1://okPTzCYNXSlb24MZs83e2Do+h+VXtc4gLoIoXIAPc=
cloud.google.com/go/bigquery v1.8.0/go.mod h1:J5hqkt3O0uAFnINi6JXValWIb1v0goeZM77hZzJN/fQ=
cloud.google.com/go/compute v0.1.0/go.mod h1:GAesmwr110a34z04OlxYkATPBEfVhkymfTBXtfbBFow=
cloud.google.com/go/compute v1.3.0/go.mod h1:cCZiE1NHEtai4wiufUhW8I8S1JKkAnhnQJWM7YD99wM=
cloud.google.com/go/compute v1.5.0/go.mod h1:9SMHyhJlzhlkJqrPAc839t2BZFTSk6Jdj6mkzQJeu0M=
cloud.google.com/go/compute v1.6.0/go.mod h1:T29tfhtVbq1wvAPo0E3+7vhgmkOYeXjhFvz/FMzPu0s=
cloud.google.com/go/compute v1.6.1 h1:2sMmt8prCn7DPaG4Pmh0N3Inmc8cT8ae5k1M6VJ9Wqc=
cloud.google.com/go/compute v1.6.1/go.mod h1:g85FgpzFvNULZ+S8AYq87axRKuf2Kh7deLqV/jJ3thU=
cloud.google.com/go/datastore v1.0.0/go.mod h1:LXYbyblFSglQ5pkeyhO+Qmw7ukd3C+pD7TKLgZqpHYE=
cloud.google.com/go/datastore v1.1.0/go.mod h1:umbIZjpQpHh4hmRpGhH4tLFup+FVzqBi1b3c64qFpCk=
cloud.google.com/go/iam v0.3.0 h1:exkAomrVUuzx9kWFI1wm3KI0uoDeUFPB4kKGzx6x+Gc=
cloud.google.com/go/iam v0.3.0/go.mod h1:XzJPvDayI+9zsASAFO68Hk07u3z+f+JrT2xXNdp4bnY=
cloud.google.com/go/logging v1.4.2 h1:Mu2Q75VBDQlW1HlBMjTX4X84UFR73G1TiLlRYc/b7tA=
cloud.google.com/go/logging v1.4.2/go.mod h1:jco9QZSx8HiVVqLJReq7z7bVdj0P1Jb9PDFs63T+axo=
cloud.google.com/go/pubsub v1.0.1/go.mod h1:R0Gpsv3s54REJCy4fxDixWD93lHJMoZTyQ2kNxGRt3I=
@ -39,16 +55,30 @@ cloud.google.com/go/storage v1.0.0/go.mod h1:IhtSnM/ZTZV8YYJWCY8RULGVqBDmpoyjwiy
cloud.google.com/go/storage v1.5.0/go.mod h1:tpKbwo567HUNpVclU5sGELwQWBDZ8gh0ZeosJ0Rtdos=
cloud.google.com/go/storage v1.6.0/go.mod h1:N7U0C8pVQ/+NIKOBQyamJIeKQKkZ+mxpohlUTyfDhBk=
cloud.google.com/go/storage v1.8.0/go.mod h1:Wv1Oy7z6Yz3DshWRJFhqM/UCfaWIRTdp0RXyy7KQOVs=
cloud.google.com/go/storage v1.10.0 h1:STgFzyU5/8miMl0//zKh2aQeTyeaUH3WN9bSUiJ09bA=
cloud.google.com/go/storage v1.10.0/go.mod h1:FLPqc6j+Ki4BU591ie1oL6qBQGu2Bl/tZ9ullr3+Kg0=
cloud.google.com/go/storage v1.14.0 h1:6RRlFMv1omScs6iq2hfE3IvgE+l6RfJPampq8UZc5TU=
cloud.google.com/go/storage v1.14.0/go.mod h1:GrKmX003DSIwi9o29oFT7YDnHYwZoctc3fOKtUw0Xmo=
dmitri.shuralyov.com/gpu/mtl v0.0.0-20190408044501-666a987793e9/go.mod h1:H6x//7gZCb22OMCxBHrMx7a5I7Hp++hsVxbQ4BYO7hU=
github.com/BurntSushi/toml v0.3.1/go.mod h1:xHWCNGjB5oqiDr8zfno3MHue2Ht5sIBksp03qcyfWMU=
github.com/BurntSushi/xgb v0.0.0-20160522181843-27f122750802/go.mod h1:IVnqGOEym/WlBOVXweHU+Q+/VP0lqqI8lqeDx9IjBqo=
github.com/ClickHouse/clickhouse-go v1.4.3 h1:iAFMa2UrQdR5bHJ2/yaSLffZkxpcOYQMCUuKeNXGdqc=
github.com/ClickHouse/clickhouse-go v1.4.3/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/ClickHouse/clickhouse-go v1.5.4 h1:cKjXeYLNWVJIx2J1K6H2CqyRmfwVJVY1OV1coaaFcI0=
github.com/ClickHouse/clickhouse-go v1.5.4/go.mod h1:EaI/sW7Azgz9UATzd5ZdZHRUhHgv5+JMS9NSr2smCJI=
github.com/OneOfOne/xxhash v1.2.2/go.mod h1:HSdplMjZKSmBqAxg5vPj2TmRDmfkzw+cTzAElWljhcU=
github.com/aead/siphash v1.0.1/go.mod h1:Nywa3cDsYNNK3gaciGTWPwHt0wlpNV15vwmswBAUSII=
github.com/alecthomas/template v0.0.0-20160405071501-a0175ee3bccc/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/template v0.0.0-20190718012654-fb15b899a751/go.mod h1:LOuyumcjzFXgccqObfd/Ljyb9UuFJ6TxHnclSeseNhc=
github.com/alecthomas/units v0.0.0-20151022065526-2efee857e7cf/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190717042225-c3de453c63f4/go.mod h1:ybxpYRFXyAe+OPACYpWeL0wqObRcbAqCMya13uyzqw0=
github.com/alecthomas/units v0.0.0-20190924025748-f65c72e2690d/go.mod h1:rBZYJk541a8SKzHPHnH3zbiI+7dagKZ0cgpgrD7Fyho=
github.com/antihax/optional v1.0.0/go.mod h1:uupD/76wgC+ih3iEmQUL+0Ugr19nfwCT1kdvxnR2qWY=
github.com/aws/aws-sdk-go v1.35.23 h1:SCP0d0XvyJTDmfnHEQPvBaYi3kea1VNUo7uQmkVgFts=
github.com/aws/aws-sdk-go v1.35.23/go.mod h1:tlPOdRjfxPBpNIwqDj61rmsnA85v9jc0Ps9+muhnW+k=
github.com/benbjohnson/clock v1.3.0 h1:ip6w0uFQkncKQ979AypyG0ER7mqUSBdKLOgAle/AT8A=
github.com/benbjohnson/clock v1.3.0/go.mod h1:J11/hYXuz8f4ySSvYwY0FKfm+ezbsZBKZxNJlLklBHA=
github.com/beorn7/perks v0.0.0-20180321164747-3a771d992973/go.mod h1:Dwedo/Wpr24TaqPxmxbtue+5NUziq4I4S80YR8gNf3Q=
github.com/beorn7/perks v1.0.0/go.mod h1:KWe93zE9D1o94FZ5RNwFwVgaQK1VOXiVxmqh+CedLV8=
github.com/beorn7/perks v1.0.1 h1:VlbKKnNfV8bJzeqoa4cOKqO6bYr3WgKZxO8Z16+hsOM=
github.com/beorn7/perks v1.0.1/go.mod h1:G2ZrVWU2WbWT9wwq4/hrbKbnv/1ERSJQ0ibhJ6rlkpw=
github.com/bkaradzic/go-lz4 v1.0.0 h1:RXc4wYsyz985CkXXeX04y4VnZFGG8Rd43pRaHsOXAKk=
github.com/bkaradzic/go-lz4 v1.0.0/go.mod h1:0YdlkowM3VswSROI7qDxhRvJ3sLhlFrRRwjwegp5jy4=
github.com/btcsuite/btcd v0.20.1-beta/go.mod h1:wVuoA8VJLEcwgqHBwHmzLRazpKxTv13Px/pDuV7OomQ=
@ -62,6 +92,10 @@ github.com/btcsuite/snappy-go v0.0.0-20151229074030-0bdef8d06723/go.mod h1:8woku
github.com/btcsuite/websocket v0.0.0-20150119174127-31079b680792/go.mod h1:ghJtEyQwv5/p4Mg4C0fgbePVuGr935/5ddU9Z3TmDRY=
github.com/btcsuite/winsvc v1.0.0/go.mod h1:jsenWakMcC0zFBFurPLEAyrnc/teJEM1O46fmI40EZs=
github.com/census-instrumentation/opencensus-proto v0.2.1/go.mod h1:f6KPmirojxKA12rnyqOA5BBL4O983OfeGPqjHWSTneU=
github.com/cespare/xxhash v1.1.0/go.mod h1:XrSqR1VqqWfGrhpAt58auRo0WTKS1nRRg3ghfAqPWnc=
github.com/cespare/xxhash/v2 v2.1.1/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/cespare/xxhash/v2 v2.1.2 h1:YRXhKfTDauu4ajMg1TPgFO5jnlC2HCbmLXMcTG5cbYE=
github.com/cespare/xxhash/v2 v2.1.2/go.mod h1:VGX0DQ3Q6kWi7AoAeZDth3/j3BFtOZR5XLFGgcrjCOs=
github.com/chzyer/logex v1.1.10/go.mod h1:+Ywpsq7O8HXn0nuIou7OrIPyXbp3wmkHB+jjWRnGsAI=
github.com/chzyer/readline v0.0.0-20180603132655-2972be24d48e/go.mod h1:nSuG5e5PlCu98SY8svDHJxuZscDgtXS6KTTbou5AhLI=
github.com/chzyer/test v0.0.0-20180213035817-a1ea475d72b1/go.mod h1:Q3SI9o4m/ZMnBNeIyt5eFwwo7qiLfzFZmjNmxjkiQlU=
@ -71,13 +105,20 @@ github.com/cloudflare/golz4 v0.0.0-20150217214814-ef862a3cdc58/go.mod h1:EOBUe0h
github.com/cncf/udpa/go v0.0.0-20191209042840-269d4d468f6f/go.mod h1:M8M6+tZqaGXZJjfX53e64911xZQV5JYwmTeXPW+k8Sc=
github.com/cncf/udpa/go v0.0.0-20200629203442-efcf912fb354/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20201120205902-5459f2c99403/go.mod h1:WmhPx2Nbnhtbo57+VJT5O0JRkEi1Wbu0z5j0R8u5Hbk=
github.com/cncf/udpa/go v0.0.0-20210930031921-04548b0d99d4/go.mod h1:6pvJx4me5XPnfI9Z40ddWsdw2W/uZgQLFXToKeRcDiI=
github.com/cncf/xds/go v0.0.0-20210312221358-fbca930ec8ed/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210805033703-aa0b78936158/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20210922020428-25de7278fc84/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211001041855-01bcc9b48dfe/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cncf/xds/go v0.0.0-20211011173535-cb28da3451f1/go.mod h1:eXthEFrGJvWHgFFCl3hGmgk+/aYT6PnTQLykKQRLhEs=
github.com/cockroachdb/apd v1.1.0 h1:3LFP3629v+1aKXU5Q37mxmRxX/pIu1nijXydLShEq5I=
github.com/cockroachdb/apd v1.1.0/go.mod h1:8Sl8LxpKi29FqWXR16WEFZRNSz3SoPzUzeMeY4+DwBQ=
github.com/confluentinc/confluent-kafka-go v1.7.0 h1:tXh3LWb2Ne0WiU3ng4h5qiGA9XV61rz46w60O+cq8bM=
github.com/confluentinc/confluent-kafka-go v1.7.0/go.mod h1:u2zNLny2xq+5rWeTQjFHbDzzNuba4P1vo31r9r4uAdg=
github.com/confluentinc/confluent-kafka-go v1.9.0 h1:d1k62oAuQVxgdMdiDQnpkABbtIWTBwXHpDcyGQUw5QQ=
github.com/confluentinc/confluent-kafka-go v1.9.0/go.mod h1:WDFs+KlhHITEoCzEfHSNgj5aP7vjajyYbZpvTEGs1sE=
github.com/coreos/go-systemd v0.0.0-20190321100706-95778dfbb74e/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/coreos/go-systemd v0.0.0-20190719114852-fd7a80b32e1f/go.mod h1:F5haX7vjVVG0kc13fIWeqUViNPyEJxv/OmvnBo0Yme4=
github.com/creack/pty v1.1.7/go.mod h1:lj5s0c3V2DBrqTV7llrYr5NG6My20zk30Fl46Y7DoTY=
github.com/creack/pty v1.1.9/go.mod h1:oKZEueFk5CKHvIhNR5MUki03XCEU+Q6VDXinZuGJ33E=
github.com/davecgh/go-spew v0.0.0-20171005155431-ecdeabc65495/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.0/go.mod h1:J7Y8YcW2NihsgmVo/mv3lAwl/skON4iLHjSsI+c5H38=
github.com/davecgh/go-spew v1.1.1 h1:vj9j/u1bqnvCEfJOwUhtlOARqs3+rkHYY13jYWTU97c=
@ -90,22 +131,39 @@ github.com/envoyproxy/go-control-plane v0.9.4/go.mod h1:6rpuAdCZL397s3pYoYcLgu1m
github.com/envoyproxy/go-control-plane v0.9.7/go.mod h1:cwu0lG7PUMfa9snN8LXBig5ynNVH9qI8YYLbd1fK2po=
github.com/envoyproxy/go-control-plane v0.9.9-0.20201210154907-fd9021fe5dad/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210217033140-668b12f5399d/go.mod h1:cXg6YxExXjJnVBQHBLXeUAgxn2UodCpnH306RInaBQk=
github.com/envoyproxy/go-control-plane v0.9.9-0.20210512163311-63b5d3c536b0/go.mod h1:hliV/p42l8fGbc6Y9bQ70uLwIvmJyVE5k4iMKlh8wCQ=
github.com/envoyproxy/go-control-plane v0.9.10-0.20210907150352-cf90f659a021/go.mod h1:AFq3mo9L8Lqqiid3OhADV3RfLJnjiw63cSpi+fDTRC0=
github.com/envoyproxy/go-control-plane v0.10.2-0.20220325020618-49ff273808a1/go.mod h1:KJwIaB5Mv44NWtYuAOFCVOjcI94vtpEz2JU/D2v6IjE=
github.com/envoyproxy/protoc-gen-validate v0.1.0/go.mod h1:iSmxcyjqTsJpI2R4NaDN7+kN2VEUnK/pcBlmesArF7c=
github.com/fsnotify/fsnotify v1.4.7/go.mod h1:jwhsz4b93w/PPRr/qN1Yymfu8t87LnFCMoQvtojpjFo=
github.com/ghodss/yaml v1.0.0/go.mod h1:4dBDuWmgqj2HViK6kFavaiC9ZROes6MMH2rRYeMEF04=
github.com/go-gl/glfw v0.0.0-20190409004039-e6da0acd62b1/go.mod h1:vR7hzQXu2zJy9AVAgeJqvqgH9Q5CA+iKCZ2gyEVpxRU=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20191125211704-12ad95a8df72/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-gl/glfw/v3.3/glfw v0.0.0-20200222043503-6f7a984d4dc4/go.mod h1:tQ2UAYgL5IevRw8kRxooKSPJfGvJ9fJQFa0TUsXzTg8=
github.com/go-kit/kit v0.8.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/kit v0.9.0/go.mod h1:xBxKIO96dXMWWy0MnWVtmwkA9/13aqxPnvrjFYMA2as=
github.com/go-kit/log v0.1.0/go.mod h1:zbhenjAZHb184qTLMA9ZjW7ThYL0H2mk7Q6pNt4vbaY=
github.com/go-logfmt/logfmt v0.3.0/go.mod h1:Qt1PoO58o5twSAckw1HlFXLmHsOX5/0LbT9GBnD5lWE=
github.com/go-logfmt/logfmt v0.4.0/go.mod h1:3RMwSq7FuexP4Kalkev3ejPJsZTpXXBr9+V4qmtdjCk=
github.com/go-logfmt/logfmt v0.5.0/go.mod h1:wCYkCAKZfumFQihp8CzCvQ3paCTfi41vtzG1KdI/P7A=
github.com/go-logr/logr v1.2.2/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/logr v1.2.3 h1:2DntVwHkVopvECVRSlL5PSo9eG+cAkDCuckLubN+rq0=
github.com/go-logr/logr v1.2.3/go.mod h1:jdQByPbusPIv2/zmleS9BjJVeZ6kBagPoEUsqbVz/1A=
github.com/go-logr/stdr v1.2.2 h1:hSWxHoqTgW2S2qGc0LTAI563KZ5YKYRhT3MFKZMbjag=
github.com/go-logr/stdr v1.2.2/go.mod h1:mMo/vtBO5dYbehREoey6XUKy/eSumjCCveDpRre4VKE=
github.com/go-redis/redis v6.15.9+incompatible h1:K0pv1D7EQUjfyoMql+r/jZqCLizCGKFlFgcHWWmHQjg=
github.com/go-redis/redis v6.15.9+incompatible/go.mod h1:NAIEuMOZ/fxfXJIrKDQDz8wamY7mA7PouImQ2Jvg6kA=
github.com/go-sql-driver/mysql v1.4.0/go.mod h1:zAC/RDZ24gD3HViQzih4MyKcchzm+sOG5ZlKdlhCg5w=
github.com/go-stack/stack v1.8.0/go.mod h1:v0f6uXyyMGvRgIKkXu+yp6POWl0qKG85gN/melR3HDY=
github.com/gofrs/uuid v3.2.0+incompatible h1:y12jRkkFxsd7GpqdSZ+/KCs/fJbqpEXSGd4+jfEaewE=
github.com/gofrs/uuid v3.2.0+incompatible/go.mod h1:b2aQJv3Z4Fp6yNu3cdSllBxTCLRxnplIgP/c0N/04lM=
github.com/gogo/protobuf v1.1.1/go.mod h1:r8qH/GZQm5c6nD/R0oafs1akxWv10x8SbQlK7atdtwQ=
github.com/golang/glog v0.0.0-20160126235308-23def4e6c14b/go.mod h1:SBH7ygxi8pfUlaOkMMuAQtPIUF8ecWP5IEl/CR7VP2Q=
github.com/golang/groupcache v0.0.0-20190702054246-869f871628b6/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20191227052852-215e87163ea7/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e h1:1r7pUrabqp18hOBcwBwiTsbnFeTZHV9eER/QT5JVZxY=
github.com/golang/groupcache v0.0.0-20200121045136-8c9f03a8e57e/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da h1:oI5xCqsCo564l8iNU+DwB5epxmsaqB+rhGL0m5jtYqE=
github.com/golang/groupcache v0.0.0-20210331224755-41bb18bfe9da/go.mod h1:cIg4eruTrX1D+g88fzRXU5OdNfaM+9IcxsU14FzY7Hc=
github.com/golang/mock v1.1.1/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.2.0/go.mod h1:oTYuIxOrZwtPieC+H1uAHpcLFnEyAGVDL/k47Jfbm0A=
github.com/golang/mock v1.3.1/go.mod h1:sBzyDLLjw3U8JLTeZvSv8jJB+tU5PVekmnlKIyFUx0Y=
@ -114,6 +172,7 @@ github.com/golang/mock v1.4.1/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt
github.com/golang/mock v1.4.3/go.mod h1:UOMv5ysSaYNkG+OFQykRIcU/QvvxJf3p21QfJ2Bt3cw=
github.com/golang/mock v1.4.4/go.mod h1:l3mdAwkq5BuhzHwde/uurv3sEJeZMXNpwsxVWU71h+4=
github.com/golang/mock v1.5.0/go.mod h1:CWnOUgYIOo4TcNZ0wHX3YZCqsaM1I1Jvs6v3mP3KVu8=
github.com/golang/mock v1.6.0/go.mod h1:p6yTPP+5HYm5mzsMV8JkE6ZKdX+/wYM6Hr+LicevLPs=
github.com/golang/protobuf v1.2.0/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.1/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
github.com/golang/protobuf v1.3.2/go.mod h1:6lQm79b+lXiMfvg/cZm0SGofjICqVBUtrP5yJMmIC1U=
@ -146,8 +205,11 @@ github.com/google/go-cmp v0.5.2/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/
github.com/google/go-cmp v0.5.3/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.4/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.5/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.6 h1:BKbKCqvP6I+rmFHt06ZmyQtvB8xAkWdhFyr0ZUNZcxQ=
github.com/google/go-cmp v0.5.6/go.mod h1:v8dTdLbMG2kIc/vJvl+f65V22dbkXbowE6jgT/gNBxE=
github.com/google/go-cmp v0.5.7/go.mod h1:n+brtR0CgQNWTVd5ZUFpTBC8YFBDLK/h/bpaJ8/DtOE=
github.com/google/go-cmp v0.5.8 h1:e6P7q2lk1O+qJJb4BtCQXlK8vWEO8V1ZeuEdJNOqZyg=
github.com/google/go-cmp v0.5.8/go.mod h1:17dUlkBOakJ0+DkrSSNjCkIjxS6bF9zb3elmeNGIjoY=
github.com/google/gofuzz v1.0.0/go.mod h1:dBl0BpW6vV/+mYPU4Po3pmUjxk6FQPldtuIdl/M65Eg=
github.com/google/martian v2.1.0+incompatible/go.mod h1:9I4somxYTbIHy5NJKHRl3wXiIaQGbYVAs8BPL6v8lEs=
github.com/google/martian/v3 v3.0.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
github.com/google/martian/v3 v3.1.0/go.mod h1:y5Zk1BBys9G+gd6Jrk0W3cC1+ELVxBWuIGO+w/tUAp0=
@ -161,17 +223,26 @@ github.com/google/pprof v0.0.0-20200430221834-fc25d7d30c6d/go.mod h1:ZgVRPoUq/hf
github.com/google/pprof v0.0.0-20200708004538-1a94d8640e99/go.mod h1:ZgVRPoUq/hfqzAqh7sHMqb3I9Rq5C59dIz2SbBwJ4eM=
github.com/google/pprof v0.0.0-20201023163331-3e6fc7fc9c4c/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201203190320-1bf35d6f28c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20201218002935-b9804c9f04c2/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210122040257-d980be63207e/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210226084205-cbba55b83ad5/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210601050228-01bbb1931b22/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210609004039-a478d1d731e9/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/pprof v0.0.0-20210720184732-4bb14d4b1be1/go.mod h1:kpwsk12EmLew5upagYY7GY0pfYCcupk39gWOCRROcvE=
github.com/google/renameio v0.1.0/go.mod h1:KWCgfxg9yswjAJkECMjeO8J8rahYeXnNhOm40UhjYkI=
github.com/google/uuid v1.1.2 h1:EVhdT+1Kseyi1/pUmXKaFxYsDNy9RQYkMWRH68J/W7Y=
github.com/google/uuid v1.1.2/go.mod h1:TIyPZe4MgqvfeYDBFedMoGGpEw/LqOeaOT+nhxU+yHo=
github.com/googleapis/gax-go/v2 v2.0.4/go.mod h1:0Wqv26UfaUD9n4G6kQubkQ+KchISgw+vpHVxEJEs9eg=
github.com/googleapis/gax-go/v2 v2.0.5 h1:sjZBwGj9Jlw33ImPtvFviGYvseOtDM7hkSKB7+Tv3SM=
github.com/googleapis/gax-go/v2 v2.0.5/go.mod h1:DWXyrwAJ9X0FpwwEdw+IPEYBICEFu5mhpdKc/us6bOk=
github.com/googleapis/gax-go/v2 v2.1.0/go.mod h1:Q3nei7sK6ybPYH7twZdmQpAd1MKb7pfu6SK+H1/DsU0=
github.com/googleapis/gax-go/v2 v2.1.1/go.mod h1:hddJymUZASv3XPyGkUpKj8pPO47Rmb0eJc8R6ouapiM=
github.com/googleapis/gax-go/v2 v2.2.0/go.mod h1:as02EH8zWkzwUoLbBaFeQ+arQaj/OthfcblKl4IGNaM=
github.com/googleapis/gax-go/v2 v2.3.0/go.mod h1:b8LNqSzNabLiUpXKkY7HAR5jr6bIT99EXz9pXxye9YM=
github.com/googleapis/gax-go/v2 v2.4.0 h1:dS9eYAjhrE2RjmzYw2XAPvcXfmcQLtFEQWn0CR82awk=
github.com/googleapis/gax-go/v2 v2.4.0/go.mod h1:XOTVJ59hdnfJLIP/dh8n5CGryZR2LxK9wbMD5+iXC6c=
github.com/gorilla/mux v1.8.0 h1:i40aqfkR1h2SlN9hojwV5ZA91wcXFOvkdNIeFDP5koI=
github.com/gorilla/mux v1.8.0/go.mod h1:DVbg23sWSpFRCP0SfiEN6jmj59UnW/n46BH5rLB71So=
github.com/grpc-ecosystem/grpc-gateway v1.16.0/go.mod h1:BDjrQk3hbvj6Nolgz8mAMFbcEtjT1g+wF4CSlocrBnw=
github.com/hashicorp/golang-lru v0.5.0/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hashicorp/golang-lru v0.5.1/go.mod h1:/m3WP610KZHVQ1SGc6re/UDhFvYD7pJ4Ao+sR/qLZy8=
github.com/hpcloud/tail v1.0.0 h1:nfCOvKYfkgYP8hkirhJocXT2+zOD8yUNjXaWfTlyFKI=
@ -227,10 +298,16 @@ github.com/jmespath/go-jmespath v0.4.0/go.mod h1:T8mJZnbsbmF+m6zOOFylbeCJqk5+pHW
github.com/jmespath/go-jmespath/internal/testify v1.5.1 h1:shLQSRRSCCPj3f2gpwzGwWFoC7ycTf1rcQZHOlsJ6N8=
github.com/jmespath/go-jmespath/internal/testify v1.5.1/go.mod h1:L3OGu8Wl2/fWfCI6z80xFu9LTZmf1ZRjMHUOPmWr69U=
github.com/jmoiron/sqlx v1.2.0/go.mod h1:1FEQNm3xlJgrMD+FBdI9+xvCksHtbpVBBw5dYhBSsks=
github.com/jpillora/backoff v1.0.0/go.mod h1:J/6gKK9jxlEcS3zixgDgUAsiuZ7yrSoa/FX5e0EB2j4=
github.com/jrick/logrotate v1.0.0/go.mod h1:LNinyqDIJnpAur+b8yyulnQw/wDuN1+BYKlTRt3OuAQ=
github.com/json-iterator/go v1.1.6/go.mod h1:+SdeFBvtyEkXs7REEP0seUULqWtbJapLOCVDaaPEHmU=
github.com/json-iterator/go v1.1.10/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.11/go.mod h1:KdQUCv79m/52Kvf8AW2vK1V8akMuk1QjK/uOdHXbAo4=
github.com/json-iterator/go v1.1.12/go.mod h1:e30LSqwooZae/UwlEbR2852Gd8hjQvJoHmT4TnhNGBo=
github.com/jstemmer/go-junit-report v0.0.0-20190106144839-af01ea7f8024/go.mod h1:6v2b51hI/fHJwM22ozAgKL4VKDeJcHhJFhtBdhmNjmU=
github.com/jstemmer/go-junit-report v0.9.1 h1:6QPYqodiu3GuPL+7mfx+NwDdp2eTkp9IfEUpgAwUN0o=
github.com/jstemmer/go-junit-report v0.9.1/go.mod h1:Brl9GWCQeLvo8nXZwPNNblvFj/XSXhF0NWZEnDohbsk=
github.com/julienschmidt/httprouter v1.2.0/go.mod h1:SYymIcj16QtmaHHD7aYtjjsJG7VTCxuUUipMqKk8s4w=
github.com/julienschmidt/httprouter v1.3.0/go.mod h1:JR6WtHb+2LUe8TCKY3cZOxFyyO8IZAc4RVcycCCAKdM=
github.com/kisielk/gotool v1.0.0/go.mod h1:XhKaO+MFFWcvkIS/tQcRk01m1F5IRFswLeQ+oQHNcck=
github.com/kkdai/bstream v0.0.0-20161212061736-f391b8402d23/go.mod h1:J+Gs4SYgM6CZQHDETBtE9HaSEkGmuNXF86RwHhHUvq4=
github.com/klauspost/compress v1.11.9 h1:5OCMOdde1TCT2sookEuVeEZzA8bmRSFV3AwPDZAG8AA=
@ -239,12 +316,16 @@ github.com/klauspost/pgzip v1.2.5 h1:qnWYvvKqedOF2ulHpMG72XQol4ILEJ8k2wwRl/Km8oE
github.com/klauspost/pgzip v1.2.5/go.mod h1:Ch1tH69qFZu15pkjo5kYi6mth2Zzwzt50oCQKQE9RUs=
github.com/konsorten/go-windows-terminal-sequences v1.0.1/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/konsorten/go-windows-terminal-sequences v1.0.2/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/pretty v0.1.0 h1:L/CwN0zerZDmRFUapSPitk6f+Q3+0za1rQkzVuMiMFI=
github.com/konsorten/go-windows-terminal-sequences v1.0.3/go.mod h1:T0+1ngSBFLxvqU3pZ+m/2kptfBszLMUkC4ZK/EgS/cQ=
github.com/kr/logfmt v0.0.0-20140226030751-b84e30acd515/go.mod h1:+0opPa2QZZtGFBFZlji/RkVcI2GknAs/DXo4wKdlNEc=
github.com/kr/pretty v0.1.0/go.mod h1:dAy3ld7l9f0ibDNOQOHHMYYIIbhfbHSm3C4ZsoJORNo=
github.com/kr/pretty v0.3.0 h1:WgNl7dwNpEZ6jJ9k1snq4pZsg7DOEN8hP9Xw0Tsjwk0=
github.com/kr/pretty v0.3.0/go.mod h1:640gp4NfQd8pI5XOwp5fnNeVWj67G7CFk/SaSQn7NBk=
github.com/kr/pty v1.1.1/go.mod h1:pFQYn66WHrOpPYNljwOMqo10TkYh1fy3cYio2l3bCsQ=
github.com/kr/pty v1.1.8/go.mod h1:O1sed60cT9XZ5uDucP5qwvh+TE3NnUj51EiZO/lmSfw=
github.com/kr/text v0.1.0 h1:45sCR5RtlFHMR4UwH9sdQ5TC8v0qDQCHnXt+kaKSTVE=
github.com/kr/text v0.1.0/go.mod h1:4Jbv+DJW3UT/LiOwJeYQe1efqtUx/iVham/4vfdArNI=
github.com/kr/text v0.2.0 h1:5Nx0Ya0ZqY2ygV366QzturHI13Jq95ApcVaJBhpS+AY=
github.com/kr/text v0.2.0/go.mod h1:eLer722TekiGuMkidMxC/pM04lWEeraHUUmBw8l2grE=
github.com/lib/pq v1.0.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.1.0/go.mod h1:5WUZQaWbwv1U+lTReE5YruASi9Al49XbQIvNi/34Woo=
github.com/lib/pq v1.2.0 h1:LXpIM/LZ5xGFhOpXAQUIMM1HdyqzVYM13zNdjCEEcA0=
@ -256,6 +337,15 @@ github.com/mattn/go-isatty v0.0.7/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hd
github.com/mattn/go-isatty v0.0.8/go.mod h1:Iq45c/XA43vh69/j3iqttzPXn0bhXyGjM0Hdxcsrc5s=
github.com/mattn/go-isatty v0.0.9/go.mod h1:YNRxwqDuOph6SZLI9vUUz6OYw3QyUt7WiY2yME+cCiQ=
github.com/mattn/go-sqlite3 v1.9.0/go.mod h1:FPy6KqzDD04eiIsT53CuJW3U88zkxoIYsOqkbpncsNc=
github.com/matttproud/golang_protobuf_extensions v1.0.1 h1:4hp9jkHxhMHkqkrB3Ix0jegS5sx/RkqARlsWZ6pIwiU=
github.com/matttproud/golang_protobuf_extensions v1.0.1/go.mod h1:D8He9yQNgCq6Z5Ld7szi9bcBfOoFv/3dc6xSMkL2PC0=
github.com/modern-go/concurrent v0.0.0-20180228061459-e0a39a4cb421/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/concurrent v0.0.0-20180306012644-bacd9c7ef1dd/go.mod h1:6dJC0mAP4ikYIbvyc7fijjWJddQyLn8Ig3JB5CqoB9Q=
github.com/modern-go/reflect2 v0.0.0-20180701023420-4b7aa43c6742/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.1/go.mod h1:bx2lNnkwVCuqBIxFjflWJWanXIb3RllmbCylyMrvgv0=
github.com/modern-go/reflect2 v1.0.2/go.mod h1:yWuevngMOJpCy52FWWMvUC8ws7m/LJsjYzDa0/r8luk=
github.com/mwitkow/go-conntrack v0.0.0-20161129095857-cc309e4a2223/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/mwitkow/go-conntrack v0.0.0-20190716064945-2f068394615f/go.mod h1:qRWi+5nqEBWmkhHvq77mSJWrCKwh8bxhgT7d/eI7P4U=
github.com/onsi/ginkgo v1.6.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
github.com/onsi/ginkgo v1.7.0 h1:WSHQ+IS43OoUrWtD1/bbclrwK8TTH5hzp+umCiuxHgs=
github.com/onsi/ginkgo v1.7.0/go.mod h1:lLunBs/Ym6LB5Z9jYTR76FiuTmxDTDusOGeTQH+WWjE=
@ -265,21 +355,51 @@ github.com/oschwald/maxminddb-golang v1.7.0 h1:JmU4Q1WBv5Q+2KZy5xJI+98aUwTIrPPxZ
github.com/oschwald/maxminddb-golang v1.7.0/go.mod h1:RXZtst0N6+FY/3qCNmZMBApR19cdQj43/NM9VkrNAis=
github.com/pierrec/lz4 v2.0.5+incompatible h1:2xWsjqPFWcplujydGg4WmhC/6fZqK42wMM8aXeqhl0I=
github.com/pierrec/lz4 v2.0.5+incompatible/go.mod h1:pdkljMzZIN41W+lC3N2tnIh5sFi+IEE17M5jbnwPHcY=
github.com/pkg/errors v0.8.0/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.8.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pkg/errors v0.9.1 h1:FEBLx1zS214owpjy7qsBeixbURkuhQAwrK5UwLGTwt4=
github.com/pkg/errors v0.9.1/go.mod h1:bwawxfHBFNV+L2hUp1rHADufV3IMtnDRdf1r5NINEl0=
github.com/pmezard/go-difflib v1.0.0 h1:4DBwDE0NGyQoBHbLQYPwSUPoCMWR5BEzIk/f1lZbAQM=
github.com/pmezard/go-difflib v1.0.0/go.mod h1:iKH77koFhYxTK1pcRnkKkqfTogsbg7gZNVY4sRDYZ/4=
github.com/prometheus/client_golang v0.9.1/go.mod h1:7SWBe2y4D6OKWSNQJUaRYU/AaXPKyh/dDVn+NZz0KFw=
github.com/prometheus/client_golang v1.0.0/go.mod h1:db9x61etRT2tGnBNRi70OPL5FsnadC4Ky3P0J6CfImo=
github.com/prometheus/client_golang v1.7.1/go.mod h1:PY5Wy2awLA44sXw4AOSfFBetzPP4j5+D6mVACh+pe2M=
github.com/prometheus/client_golang v1.11.0/go.mod h1:Z6t4BnS23TR94PD6BsDNk8yVqroYurpAkEiz0P2BEV0=
github.com/prometheus/client_golang v1.12.1 h1:ZiaPsmm9uiBeaSMRznKsCDNtPCS0T3JVDGF+06gjBzk=
github.com/prometheus/client_golang v1.12.1/go.mod h1:3Z9XVyYiZYEO+YQWt3RD2R3jrbd179Rt297l4aS6nDY=
github.com/prometheus/client_model v0.0.0-20180712105110-5c3871d89910/go.mod h1:MbSGuTsp3dbXC40dX6PRTWyKYBIrTGTE9sqQNg2J8bo=
github.com/prometheus/client_model v0.0.0-20190129233127-fd36f4220a90/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.0.0-20190812154241-14fe0d1b01d4/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/client_model v0.2.0 h1:uq5h0d+GuxiXLJLNABMgp2qUWDPiLvgCzz2dUR+/W/M=
github.com/prometheus/client_model v0.2.0/go.mod h1:xMI15A0UPsDsEKsMN9yxemIoYk6Tm2C1GtYGdfGttqA=
github.com/prometheus/common v0.4.1/go.mod h1:TNfzLD0ON7rHzMJeJkieUDPYmFC7Snx/y86RQel1bk4=
github.com/prometheus/common v0.10.0/go.mod h1:Tlit/dnDKsSWFlCLTWaA1cyBgKHSMdTB80sz/V91rCo=
github.com/prometheus/common v0.26.0/go.mod h1:M7rCNAaPfAosfx8veZJCuw84e35h3Cfd9VFqTh1DIvc=
github.com/prometheus/common v0.32.1 h1:hWIdL3N2HoUx3B8j3YN9mWor0qhY/NlEKZEaXxuIRh4=
github.com/prometheus/common v0.32.1/go.mod h1:vu+V0TpY+O6vW9J44gczi3Ap/oXXR10b+M/gUGO4Hls=
github.com/prometheus/procfs v0.0.0-20181005140218-185b4288413d/go.mod h1:c3At6R/oaqEKCNdg8wHV1ftS6bRYblBhIjjI8uT2IGk=
github.com/prometheus/procfs v0.0.2/go.mod h1:TjEm7ze935MbeOT/UhFTIMYKhuLP4wbCsTZCD3I8kEA=
github.com/prometheus/procfs v0.1.3/go.mod h1:lV6e/gmhEcM9IjHGsFOCxxuZ+z1YqCvr4OA4YeYWdaU=
github.com/prometheus/procfs v0.6.0/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/prometheus/procfs v0.7.3 h1:4jVXhlkAyzOScmCkXBTOLRLTz8EeU+eyjrwB/EPq0VU=
github.com/prometheus/procfs v0.7.3/go.mod h1:cz+aTbrPOrUb4q7XlbU9ygM+/jj0fzG6c1xBZuNvfVA=
github.com/rogpeppe/fastuuid v1.2.0/go.mod h1:jVj6XXZzXRy/MSR5jhDC/2q6DgLz+nrA6LYCDYWNEvQ=
github.com/rogpeppe/go-internal v1.3.0/go.mod h1:M8bDsm7K2OlrFYOpmOWEs/qY81heoFRclV5y23lUDJ4=
github.com/rogpeppe/go-internal v1.6.1 h1:/FiVV8dS/e+YqF2JvO3yXRFbBLTIuSDkuC7aBOAvL+k=
github.com/rogpeppe/go-internal v1.6.1/go.mod h1:xXDCJY+GAPziupqXw64V24skbSoqbTEfhy4qGm1nDQc=
github.com/rs/xid v1.2.1/go.mod h1:+uKXf+4Djp6Md1KODXJxgGQPKngRmWyn10oCKFzNHOQ=
github.com/rs/zerolog v1.13.0/go.mod h1:YbFCdg8HfsridGWAh22vktObvhZbQsZXe4/zB0OKkWU=
github.com/rs/zerolog v1.15.0/go.mod h1:xYTKnLHcpfU2225ny5qZjxnj9NvkumZYjJHlAThCjNc=
github.com/satori/go.uuid v1.2.0/go.mod h1:dA0hQrYB0VpLJoorglMZABFdXlWrHn1NEOzdhQKdks0=
github.com/sethvargo/go-envconfig v0.7.0 h1:P/ljQXSRjgAgsnIripHs53Jg/uNVXu2FYQ9yLSDappA=
github.com/sethvargo/go-envconfig v0.7.0/go.mod h1:00S1FAhRUuTNJazWBWcJGvEHOM+NO6DhoRMAOX7FY5o=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24 h1:pntxY8Ary0t43dCZ5dqY4YTJCObLY1kIXl0uzMv+7DE=
github.com/shopspring/decimal v0.0.0-20180709203117-cd690d0c9e24/go.mod h1:M+9NzErvs504Cn4c5DxATwIqPbtswREoFCre64PpcG4=
github.com/sirupsen/logrus v1.2.0/go.mod h1:LxeOpSwHxABJmUn/MG1IvRgCAasNZTLOkJPxbbu5VWo=
github.com/sirupsen/logrus v1.4.1/go.mod h1:ni0Sbl8bgC9z8RoU9G6nDWqqs/fq4eDPysMBDgk/93Q=
github.com/sirupsen/logrus v1.4.2/go.mod h1:tLMulIdttU9McNUspp0xgXVQah82FyeX6MwdIuYE2rE=
github.com/sirupsen/logrus v1.6.0/go.mod h1:7uNnSEd1DgxDLC74fIahvMZmmYsHGZGEOFrfsX/uA88=
github.com/spaolacci/murmur3 v0.0.0-20180118202830-f09979ecbc72/go.mod h1:JwIasOWyU6f++ZhiEuf87xNszmSA2myDM2Kzu9HwQUA=
github.com/stretchr/objx v0.1.0/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.1.1/go.mod h1:HFkY916IF+rwdDfMAkV7OtwuqBVzrE8GR6GFx+wExME=
github.com/stretchr/objx v0.2.0/go.mod h1:qt09Ya8vawLte6SNmTgCsAVtYtaKzEcn8ATUoHMkEqE=
@ -287,8 +407,10 @@ github.com/stretchr/testify v1.2.2/go.mod h1:a8OnRcib4nhh0OaRAV+Yts87kKdq0PP7pXf
github.com/stretchr/testify v1.3.0/go.mod h1:M5WIy9Dh21IEIfnGCwXGc5bZfKNJtfHm1UVUgZn+9EI=
github.com/stretchr/testify v1.4.0/go.mod h1:j7eGeouHqKxXV5pUuKE4zz7dFj8WfuZ+81PSLYec5m4=
github.com/stretchr/testify v1.5.1/go.mod h1:5W2xD1RspED5o8YsWQXVCued0rvSQ+mT+I5cxcmMvtA=
github.com/stretchr/testify v1.6.1 h1:hDPOHmpOpP40lSULcqw7IrRb/u7w6RpDC9399XyoNd0=
github.com/stretchr/testify v1.6.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.0/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/stretchr/testify v1.7.1 h1:5TQK59W5E3v0r2duFAb7P95B6hEeOyEnHRa8MjYSMTY=
github.com/stretchr/testify v1.7.1/go.mod h1:6Fq8oRcR53rry900zMqJjRRixrwX3KX962/h/Wwjteg=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce h1:fb190+cK2Xz/dvi9Hv8eCYJYvIGUTN2/KLq1pT6CjEc=
github.com/tomasen/realip v0.0.0-20180522021738-f0c99a92ddce/go.mod h1:o8v6yHRoik09Xen7gje4m9ERNah1d1PPsVq1VEx9vE4=
github.com/ua-parser/uap-go v0.0.0-20200325213135-e1c09f13e2fe h1:aj/vX5epIlQQBEocKoM9nSAiNpakdQzElc8SaRFPu+I=
@ -307,12 +429,26 @@ go.opencensus.io v0.22.4/go.mod h1:yxeiOL68Rb0Xd1ddK5vPZ/oVn4vY4Ynel7k9FzqtOIw=
go.opencensus.io v0.22.5/go.mod h1:5pWMHQbX5EPX2/62yrJeAkowc+lfs/XD7Uxpq3pI6kk=
go.opencensus.io v0.23.0 h1:gqCw0LfLxScz8irSi8exQc7fyQ0fKQU/qnC/X8+V/1M=
go.opencensus.io v0.23.0/go.mod h1:XItmlyltB5F7CS4xOC1DcqMoFqwtC6OG2xF7mCv7P7E=
go.opentelemetry.io/otel v1.7.0 h1:Z2lA3Tdch0iDcrhJXDIlC94XE+bxok1F9B+4Lz/lGsM=
go.opentelemetry.io/otel v1.7.0/go.mod h1:5BdUoMIz5WEs0vt0CUEMtSSaTSHBBVwrhnz7+nrD5xk=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0 h1:YXo5ZY5nofaEYMCMTTMaRH2cLDZB8+0UGuk5RwMfIo0=
go.opentelemetry.io/otel/exporters/prometheus v0.30.0/go.mod h1:qN5feW+0/d661KDtJuATEmHtw5bKBK7NSvNEP927zSs=
go.opentelemetry.io/otel/metric v0.30.0 h1:Hs8eQZ8aQgs0U49diZoaS6Uaxw3+bBE3lcMUKBFIk3c=
go.opentelemetry.io/otel/metric v0.30.0/go.mod h1:/ShZ7+TS4dHzDFmfi1kSXMhMVubNoP0oIaBp70J6UXU=
go.opentelemetry.io/otel/sdk v1.7.0 h1:4OmStpcKVOfvDOgCt7UriAPtKolwIhxpnSNI/yK+1B0=
go.opentelemetry.io/otel/sdk v1.7.0/go.mod h1:uTEOTwaqIVuTGiJN7ii13Ibp75wJmYUDe374q6cZwUU=
go.opentelemetry.io/otel/sdk/metric v0.30.0 h1:XTqQ4y3erR2Oj8xSAOL5ovO5011ch2ELg51z4fVkpME=
go.opentelemetry.io/otel/sdk/metric v0.30.0/go.mod h1:8AKFRi5HyvTR0RRty3paN1aMC9HMT+NzcEhw/BLkLX8=
go.opentelemetry.io/otel/trace v1.7.0 h1:O37Iogk1lEkMRXewVtZ1BBTVn5JEp8GrJvP92bJqC6o=
go.opentelemetry.io/otel/trace v1.7.0/go.mod h1:fzLSB9nqR2eXzxPXb2JW9IKE+ScyXA48yyE4TNvoHqU=
go.opentelemetry.io/proto/otlp v0.7.0/go.mod h1:PqfVotwruBrMGOCsRd/89rSnXhoiJIqeYNgFYFoEGnI=
go.uber.org/atomic v1.3.2/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/atomic v1.4.0/go.mod h1:gD2HeocX3+yG+ygLZcrzQJaqmWj9AIm7n08wl/qW/PE=
go.uber.org/multierr v1.1.0/go.mod h1:wR5kodmAFQ0UK8QlbwjlSNy0Z68gJhDJUG5sjR94q/0=
go.uber.org/zap v1.9.1/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
go.uber.org/zap v1.10.0/go.mod h1:vwi/ZaCAaUcBkycHslxD9B2zi4UTXhF60s6SWpuDF0Q=
golang.org/x/crypto v0.0.0-20170930174604-9419663f5a44/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20180904163835-0709b304e793/go.mod h1:6SG95UA2DQfeDnfUPMdvaQW0Q7yPrPDi9nlGo2tz2b4=
golang.org/x/crypto v0.0.0-20190308221718-c2843e01d9a2/go.mod h1:djNgcEr1/C05ACkg1iLfiJU5Ep61QUkGW8qpdssI0+w=
golang.org/x/crypto v0.0.0-20190411191339-88737f569e3a/go.mod h1:WFFai1msRO1wXaEeE5yQxYXgSfI8pQAWXbQop6sCtWE=
golang.org/x/crypto v0.0.0-20190510104115-cbcb75029529/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
@ -321,8 +457,9 @@ golang.org/x/crypto v0.0.0-20190820162420-60c769a6c586/go.mod h1:yigFU9vqHzYiE8U
golang.org/x/crypto v0.0.0-20191011191535-87dc89f01550/go.mod h1:yigFU9vqHzYiE8UmvKecakEJjdnWj3jj499lnFckfCI=
golang.org/x/crypto v0.0.0-20200115085410-6d4e4cb37c7d/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200323165209-0ec3e9974c59/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9 h1:psW17arqaxU48Z5kZ0CQnkZWQJsqcURM6tKiBApRjXI=
golang.org/x/crypto v0.0.0-20200622213623-75b288015ac9/go.mod h1:LzIPMQfyMNhhGPhUkYOs5KpL4U8rLKemX1yGLhDgUto=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4 h1:kUhD7nTDoI3fVd9G4ORWrbV5NY0liEs/Jg2pv5f+bBA=
golang.org/x/crypto v0.0.0-20220411220226-7b82a4e95df4/go.mod h1:IxCIyHEi3zRg3s0A5j5BB6A9Jmi73HwBIUl50j+osU4=
golang.org/x/exp v0.0.0-20190121172915-509febef88a4/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190306152737-a1d7652674e8/go.mod h1:CJ0aWSM057203Lf6IL+f9T1iT9GByDxfZKAQTCR3kQA=
golang.org/x/exp v0.0.0-20190510132918-efd6b22b2522/go.mod h1:ZjyILWgesfNpC6sMxTJOJm9Kp84zZh5NQWvqDGG3Qr8=
@ -346,7 +483,6 @@ golang.org/x/lint v0.0.0-20191125180803-fdd1cda4f05f/go.mod h1:5qLYkcX4OjUUV8bRu
golang.org/x/lint v0.0.0-20200130185559-910be7a94367/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20200302205851-738671d3881b/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20201208152925-83fdc39ff7b5/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616 h1:VLliZ0d+/avPrXXH+OakdXhpJuEoBZuwh1m2j7U6Iug=
golang.org/x/lint v0.0.0-20210508222113-6edffad5e616/go.mod h1:3xt1FjdF8hUf6vQPIChWIBhFzV8gjjsPE/fR3IyQdNY=
golang.org/x/mobile v0.0.0-20190312151609-d3739f865fa6/go.mod h1:z+o9i4GpDbdi3rU15maQ/Ox0txvL9dWGYEHz965HBQE=
golang.org/x/mobile v0.0.0-20190719004257-d2bd2a29d028/go.mod h1:E/iHnbuqvinMTCcRqshq8CkpyQDoeVncDDYHnLhea+o=
@ -358,11 +494,11 @@ golang.org/x/mod v0.2.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.3.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.0/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.1/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/mod v0.4.2 h1:Gz96sIWK3OalVv/I/qNygP42zyoKp3xptRVCWRFEBvo=
golang.org/x/mod v0.4.2/go.mod h1:s0Qsj1ACt9ePp/hMypM3fl4fZqREWJwdYDEqhRiZZUA=
golang.org/x/net v0.0.0-20180724234803-3673e40ba225/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180826012351-8a410e7b638d/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20180906233101-161cd47e91fd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20181114220301-adae6a3d119a/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190108225652-1e06a53dbb7e/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190213061140-3a22650c66bd/go.mod h1:mL1N/T3taQHkDXs73rZJwtUhF3w3ftmwwsq0BUmARs4=
golang.org/x/net v0.0.0-20190311183353-d8887717615a/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
@ -370,6 +506,7 @@ golang.org/x/net v0.0.0-20190404232315-eb5bcb51f2a3/go.mod h1:t9HGtf8HONx5eT2rtn
golang.org/x/net v0.0.0-20190501004415-9ce7a6920f09/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190503192946-f4e77d36d62c/go.mod h1:t9HGtf8HONx5eT2rtn7q6eTqICYqUVnKs3thJo3Qplg=
golang.org/x/net v0.0.0-20190603091049-60506f45cf65/go.mod h1:HSz+uSET+XFnRR8LxR5pz3Of3rY3CfYBVs4xY44aLks=
golang.org/x/net v0.0.0-20190613194153-d28f0bde5980/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190620200207-3b0461eec859/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190628185345-da137c7871d7/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
golang.org/x/net v0.0.0-20190724013045-ca1201d0de80/go.mod h1:z5CRVTTTmAJ677TzLLGU+0bjPO0LkuOLi4/5GtJWs/s=
@ -392,12 +529,20 @@ golang.org/x/net v0.0.0-20201021035429-f5854403a974/go.mod h1:sp8m0HH+o8qH0wwXwY
golang.org/x/net v0.0.0-20201031054903-ff519b6c9102/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201110031124-69a78807bb2b/go.mod h1:sp8m0HH+o8qH0wwXwYZr8TS3Oi6o0r6Gce1SSxlDquU=
golang.org/x/net v0.0.0-20201209123823-ac852fbbde11/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20201224014010-6772e930b67b/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210119194325-5f4716e94777/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210226172049-e18ecbb05110/go.mod h1:m0MpNAwzfU5UDzcl9v0D8zg8gWTRqZa9RBIspLL5mdg=
golang.org/x/net v0.0.0-20210316092652-d523dce5a7f4/go.mod h1:RBQZq4jEuRlivfhVLdyRGr576XBO4/greRjx4P4O3yc=
golang.org/x/net v0.0.0-20210405180319-a5a99cb37ef4/go.mod h1:p54w0d4576C0XHj96bSt6lcn1PtDYWL6XObtHCRCNQM=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420 h1:a8jGStKg0XqKDlKqjLrXn0ioF5MH36pT7Z0BRTqLhbk=
golang.org/x/net v0.0.0-20210503060351-7fd8e65b6420/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20210525063256-abc453219eb5/go.mod h1:9nx3DQGgdP8bBQD5qxJ1jj9UTztislL4KSBs9R2vV5Y=
golang.org/x/net v0.0.0-20220127200216-cd36cc0744dd/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220225172249-27dd8689420f/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220325170049-de3da57026de/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220412020605-290c469a71a5/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220425223048-2871e0cb64e4/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2 h1:NWy5+hlRbC7HK+PmcXVUmW1IMyFce7to56IUvhUFm7Y=
golang.org/x/net v0.0.0-20220520000938-2e3eb7b945c2/go.mod h1:CfG3xpIq0wQ8r1q4Su4UZFWDARRcnwPjda9FqA0JpMk=
golang.org/x/oauth2 v0.0.0-20180821212333-d2e6202438be/go.mod h1:N/0e6XlmueqKjAGxoOufVs8QHGRruUQn6yWY3a++T0U=
golang.org/x/oauth2 v0.0.0-20190226205417-e64efc72b421/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
golang.org/x/oauth2 v0.0.0-20190604053449-0f29369cfe45/go.mod h1:gOpvHmFTYa4IltrdGE7lF6nIHvwfUNPOp7c8zoXwtLw=
@ -411,8 +556,14 @@ golang.org/x/oauth2 v0.0.0-20210220000619-9bb904979d93/go.mod h1:KelEdhl1UZF7XfJ
golang.org/x/oauth2 v0.0.0-20210313182246-cd4f82c27b84/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210427180440-81ed05c6b58c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210514164344-f6687ab2804c/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914 h1:3B43BWw0xEBsLZ/NO1VALz6fppU3481pik+2Ksv45z8=
golang.org/x/oauth2 v0.0.0-20210628180205-a41e5a781914/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210805134026-6f1e6394065a/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20210819190943-2bc19b11175f/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20211104180415-d3ed0bb246c8/go.mod h1:KelEdhl1UZF7XfJ4dDtk6s++YSgaE7mD/BuKKDLBl4A=
golang.org/x/oauth2 v0.0.0-20220223155221-ee480838109b/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220309155454-6242fa91716a/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5 h1:OSnWWcOd/CtWQC2cYSBgbTSJv3ciqd8r54ySIW2y3RE=
golang.org/x/oauth2 v0.0.0-20220411215720-9780585627b5/go.mod h1:DAh4E804XQdzx2j+YRIaUnCqCV2RuMz24cGBJ5QYIrc=
golang.org/x/sync v0.0.0-20180314180146-1d60e4601c6f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181108010431-42b317875d0f/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20181221193216-37e7f081c4d4/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
@ -423,11 +574,13 @@ golang.org/x/sync v0.0.0-20200317015054-43a5402ce75a/go.mod h1:RxMgew5VJxzue5/jJ
golang.org/x/sync v0.0.0-20200625203802-6e8e738ad208/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201020160332-67f06af15bc9/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20201207232520-09787c993a3a/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c h1:5KslGYwFpkhGh+Q16bwMP3cOontH8FOep7tGV86Y7SQ=
golang.org/x/sync v0.0.0-20210220032951-036812b2e83c/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29 h1:w8s32wxx3sY+OjLlv9qltkLU5yvJzxjjgiHWLjdIcw4=
golang.org/x/sync v0.0.0-20220513210516-0976fa681c29/go.mod h1:RxMgew5VJxzue5/jJTE5uejpjVlOe/izrB70Jof72aM=
golang.org/x/sys v0.0.0-20180830151530-49385e6e1522/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180905080454-ebe1bf3edb33/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20180909124046-d0be0721c37e/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20181116152217-5ac8a444bdc5/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190215142949-d0b11bdaac8a/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190222072716-a9d3bda3a223/go.mod h1:STP8DvDyc/dI5b8T5hshtkjS+E42TnysNCUPdjciGhY=
golang.org/x/sys v0.0.0-20190312061237-fead79001313/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -445,6 +598,7 @@ golang.org/x/sys v0.0.0-20191001151750-bb3f8db39f24/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20191204072324-ce4227a45e2e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191224085550-c709ea063b76/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20191228213918-04cbcbbfeed8/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200106162015-b016eb3dc98e/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200113162924-86b910548bc1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200122134326-e047566fdf82/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200202164722-d101bd2416d5/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -457,6 +611,8 @@ golang.org/x/sys v0.0.0-20200501052902-10377860bb8e/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20200511232937-7e40ca221e25/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200515095857-1151b9dac4a9/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200523222454-059865788121/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200615200032-f1bc736245b1/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200625212154-ddb9806d33ae/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200803210538-64077c9b5642/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200905004654-be1d3432aa8f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20200930185726-fdedc70b468f/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
@ -464,19 +620,40 @@ golang.org/x/sys v0.0.0-20201119102817-f84b799fce68/go.mod h1:h1NjWce9XRLGQEsW7w
golang.org/x/sys v0.0.0-20201201145000-ef89a241ccb3/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210104204734-6f8348627aad/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210119212857-b64e53b001e4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210124154548-22da62e12c0c/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210220050731-9a76102bfb43/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210225134936-a50acf3fe073/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210305230114-8fe3ee5dd75b/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210315160823-c6e025ad8005/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210320140829-1e4c9ba3b0c4/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210330210617-4fbd30eecc44/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423082822-04245dca01da/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210423185535-09eb48e85fd7/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210503080704-8803ae5d1324/go.mod h1:h1NjWce9XRLGQEsW7wpKNCjG9DtNlClVuFLEZdDNbEs=
golang.org/x/sys v0.0.0-20210510120138-977fb7262007/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210514084401-e8d321eab015/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603081109-ebe580a85c40/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210603125802-9665404d3644/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22 h1:RqytpXGR1iVNX7psjB3ff8y7sNFinVFvkx1c8SjBkio=
golang.org/x/sys v0.0.0-20210615035016-665e8c7367d1/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210616094352-59db8d763f22/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210630005230-0f9fa26af87c/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210806184541-e5e7981a1069/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210823070655-63515b42dcdf/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20210908233432-aa78b53d3365/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211124211545-fe61309f8881/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211210111614-af8b64212486/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20211216021012-1d35b9e2eb4e/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220114195835-da31bd327af9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220128215802-99c3d69c2c27/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220209214540-3681064d5158/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220227234510-4e6760a101f9/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220328115105-d36c6a25d886/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220412211240-33da011f77ad/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220502124256-b6088ccd6cba/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a h1:dGzPydgVsqGcTRVwiLJ1jVbufYwmzD3LfVPLKsKg+0k=
golang.org/x/sys v0.0.0-20220520151302-bc2c85ada10a/go.mod h1:oPkhp1MJrh7nUepCBck5+mAzfO9JrbApNNgaTdGDITg=
golang.org/x/term v0.0.0-20201126162022-7de9c90e9dd1/go.mod h1:bj7SfCRtBDWHUb9snDiAeCFNEtKQo2Wmx5Cou7ajbmo=
golang.org/x/term v0.0.0-20210927222741-03fcf44c2211/go.mod h1:jbD1KX2456YbFQfuXm/mYQcufACuNUgVhRMnK/tPxf8=
golang.org/x/text v0.0.0-20170915032832-14c0d48ead0c/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.0/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
golang.org/x/text v0.3.1-0.20180807135948-17ff2d5776d2/go.mod h1:NqM8EUOU14njkJ3fqMW+pc6Ldnwhi/IjpwHt7yyuwOQ=
@ -484,8 +661,9 @@ golang.org/x/text v0.3.2/go.mod h1:bEr9sfX3Q8Zfm5fL9x+3itogRgK3+ptLWKqgva+5dAk=
golang.org/x/text v0.3.3/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.4/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.5/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.6 h1:aRYxNxv6iGQlyVaZmk6ZgYEDa+Jg18DxebPSrd6bg1M=
golang.org/x/text v0.3.6/go.mod h1:5Zoc/QRtKVWzQhOtBMvqHzDpF6irO9z98xDceosuGiQ=
golang.org/x/text v0.3.7 h1:olpwvP2KacW1ZWvsR7uQhoyTYvKAupfQrRGBFM352Gk=
golang.org/x/text v0.3.7/go.mod h1:u+2+/6zg+i71rQMx5EYifcz6MCKuco9NR6JIITiCfzQ=
golang.org/x/time v0.0.0-20181108054448-85acf8d2951c/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20190308202827-9d24e82272b4/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
golang.org/x/time v0.0.0-20191024005414-555d28b269f0/go.mod h1:tRJNPiyCQ0inRvYxbN9jk5I+vvW/OXSQhTDSoE431IQ=
@ -536,19 +714,22 @@ golang.org/x/tools v0.0.0-20201110124207-079ba7bd75cd/go.mod h1:emZCQorbCU4vsT4f
golang.org/x/tools v0.0.0-20201201161351-ac6f37ff4c2a/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20201208233053-a543418bbed2/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210105154028-b0ab187a4818/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.0.0-20210108195828-e2f9c7f1fc8e/go.mod h1:emZCQorbCU4vsT4fOWvOPXz4eW1wZW4PmDk9uLelYpA=
golang.org/x/tools v0.1.0/go.mod h1:xkSsbof2nBLbhDlRMhhhyNLN/zl3eTqcnHD5viDpcZ0=
golang.org/x/tools v0.1.1/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.2/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.3/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.4 h1:cVngSRcfgyZCzys3KYOpCFa+4dqX/Oub9tAq00ttGVs=
golang.org/x/tools v0.1.4/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/tools v0.1.5/go.mod h1:o0xws9oXOQQZyjljx8fwUC0k7L1pTE6eaCbjGeHmOkk=
golang.org/x/xerrors v0.0.0-20190410155217-1f06c39b4373/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190513163551-3ee3066db522/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20190717185122-a985d3407aa7/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191011141410-1b5146add898/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20191204190536-9bdfabe68543/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1 h1:go1bK/D/BFZV2I8cIQd1NKEZ+0owSTG1fDTci4IqFcE=
golang.org/x/xerrors v0.0.0-20200804184101-5ec99f83aff1/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220411194840-2f41105eb62f/go.mod h1:I/5z698sn9Ka8TeJc9MKroUUfqBBauWjQqLJ2OPfmY0=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df h1:5Pf6pFKu98ODmgnpvkJ3kFUOQGGLIzLIkbzUHp47618=
golang.org/x/xerrors v0.0.0-20220517211312-f3a8303e98df/go.mod h1:K8+ghG5WaK9qNqU5K3HdILfMLy1f3aNYFI/wnl100a8=
google.golang.org/api v0.4.0/go.mod h1:8k5glujaEP+g9n7WNsDg8QP6cUVNI86fCNMcbazEtwE=
google.golang.org/api v0.7.0/go.mod h1:WtwebWUNSVBH/HAw79HIFXZNqEvBhG+Ra+ax0hx3E3M=
google.golang.org/api v0.8.0/go.mod h1:o4eAsZoiT+ibD93RtjEohWalFOjRDx6CVaqeizhEnKg=
@ -573,8 +754,22 @@ google.golang.org/api v0.43.0/go.mod h1:nQsDGjRXMo4lvh5hP0TKqF244gqhGcr/YSIykhUk
google.golang.org/api v0.46.0/go.mod h1:ceL4oozhkAiTID8XMmJBsIxID/9wMXJVVFXPg4ylg3I=
google.golang.org/api v0.47.0/go.mod h1:Wbvgpq1HddcWVtzsVLyfLp8lDg6AA241LmgIL59tHXo=
google.golang.org/api v0.48.0/go.mod h1:71Pr1vy+TAZRPkPs/xlCf5SsU8WjuAWv1Pfjbtukyy4=
google.golang.org/api v0.50.0 h1:LX7NFCFYOHzr7WHaYiRUpeipZe9o5L8T+2F4Z798VDw=
google.golang.org/api v0.50.0/go.mod h1:4bNT5pAuq5ji4SRZm+5QIkjny9JAyVD/3gaSihNefaw=
google.golang.org/api v0.51.0/go.mod h1:t4HdrdoNgyN5cbEfm7Lum0lcLDLiise1F8qDKX00sOU=
google.golang.org/api v0.54.0/go.mod h1:7C4bFFOvVDGXjfDTAsgGwDgAxRDeQ4X8NvUedIt6z3k=
google.golang.org/api v0.55.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.56.0/go.mod h1:38yMfeP1kfjsl8isn0tliTjIb1rJXcQi4UXlbqivdVE=
google.golang.org/api v0.57.0/go.mod h1:dVPlbZyBo2/OjBpmvNdpn2GRm6rPy75jyU7bmhdrMgI=
google.golang.org/api v0.61.0/go.mod h1:xQRti5UdCmoCEqFxcz93fTl338AVqDgyaDRuOZ3hg9I=
google.golang.org/api v0.63.0/go.mod h1:gs4ij2ffTRXwuzzgJl/56BdwJaA194ijkfn++9tDuPo=
google.golang.org/api v0.67.0/go.mod h1:ShHKP8E60yPsKNw/w8w+VYaj9H6buA5UqDp8dhbQZ6g=
google.golang.org/api v0.70.0/go.mod h1:Bs4ZM2HGifEvXwd50TtW70ovgJffJYw2oRCOFU/SkfA=
google.golang.org/api v0.71.0/go.mod h1:4PyU6e6JogV1f9eA4voyrTY2batOLdgZ5qZ5HOCc4j8=
google.golang.org/api v0.74.0/go.mod h1:ZpfMZOVRMywNyvJFeqL9HRWBgAuRfSjJFpe9QtRRyDs=
google.golang.org/api v0.75.0/go.mod h1:pU9QmyHLnzlpar1Mjt4IbapUCy8J+6HD6GeELN69ljA=
google.golang.org/api v0.78.0/go.mod h1:1Sg78yoMLOhlQTeF+ARBoytAcH1NNyyl390YMy6rKmw=
google.golang.org/api v0.81.0 h1:o8WF5AvfidafWbFjsRyupxyEQJNUWxLZJCK5NXrxZZ8=
google.golang.org/api v0.81.0/go.mod h1:FA6Mb/bZxj706H2j+j2d6mHEEaHBmbbWnkfvmorOCko=
google.golang.org/appengine v1.1.0/go.mod h1:EbEs0AVv82hx2wNQdGPgUI5lhzA/G0D9YwlJXL52JkM=
google.golang.org/appengine v1.4.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
google.golang.org/appengine v1.5.0/go.mod h1:xpcJRLb0r/rnEns0DIKYYv+WjYCduHsrkT7/EB5XEv4=
@ -606,6 +801,7 @@ google.golang.org/genproto v0.0.0-20200312145019-da6875a35672/go.mod h1:55QSHmfG
google.golang.org/genproto v0.0.0-20200331122359-1ee6d9798940/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200430143042-b979b6f78d84/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200511104702-f5ebc3bea380/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200513103714-09dca8ec2884/go.mod h1:55QSHmfGQM9UVYDPBsyGGes0y52j32PQ3BqQfXhyH3c=
google.golang.org/genproto v0.0.0-20200515170657-fc4c6c6a6587/go.mod h1:YsZOwe1myG/8QRHRsmBRE1LrgQY60beZKjly0O1fX9U=
google.golang.org/genproto v0.0.0-20200526211855-cb27e3aa2013/go.mod h1:NbSheEEYHJ7i3ixzK3sjbqSGDJWnxyFXZblF3eUsNvo=
google.golang.org/genproto v0.0.0-20200618031413-b414f8b61790/go.mod h1:jDfRM7FcilCzHH/e9qn6dsT145K34l5v+OpcnNgKAAA=
@ -617,7 +813,9 @@ google.golang.org/genproto v0.0.0-20201109203340-2640f1f9cdfb/go.mod h1:FWY/as6D
google.golang.org/genproto v0.0.0-20201201144952-b05cb90ed32e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201210142538-e3217bee35cc/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20201214200347-8c77b98c765d/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210108203827-ffc7fda8c3d7/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210222152913-aa3ee6e6a81c/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210226172003-ab064af71705/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210303154014-9728d6b83eeb/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210310155132-4ce2db91004e/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
google.golang.org/genproto v0.0.0-20210319143718-93e7006c17a6/go.mod h1:FWY/as6DDZQgahTzZj3fqbO1CbirC29ZNUFHwi0/+no=
@ -628,8 +826,37 @@ google.golang.org/genproto v0.0.0-20210517163617-5e0236093d7a/go.mod h1:P3QM42oQ
google.golang.org/genproto v0.0.0-20210602131652-f16073e35f0c/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210604141403-392c879c8b08/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210608205507-b6d2f5bf0d7d/go.mod h1:UODoCrxHCcBojKKwX1terBiRUaqAsFqJiF615XL43r0=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84 h1:R1r5J0u6Cx+RNl/6mezTw6oA14cmKC96FeUwL6A9bd4=
google.golang.org/genproto v0.0.0-20210624195500-8bfb893ecb84/go.mod h1:SzzZ/N+nwJDaO1kznhnlzqS8ocJICar6hYhVyhi++24=
google.golang.org/genproto v0.0.0-20210713002101-d411969a0d9a/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210716133855-ce7ef5c701ea/go.mod h1:AxrInvYm1dci+enl5hChSFPOmmUF1+uAa/UsgNRWd7k=
google.golang.org/genproto v0.0.0-20210728212813-7823e685a01f/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210805201207-89edb61ffb67/go.mod h1:ob2IJxKrgPT52GcgX759i1sleT07tiKowYBGbczaW48=
google.golang.org/genproto v0.0.0-20210813162853-db860fec028c/go.mod h1:cFeNkxwySK631ADgubI+/XFU/xp8FD5KIVV4rj8UC5w=
google.golang.org/genproto v0.0.0-20210821163610-241b8fcbd6c8/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210828152312-66f60bf46e71/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210831024726-fe130286e0e2/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210903162649-d08c68adba83/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210909211513-a8c4777a87af/go.mod h1:eFjDcFEctNawg4eG61bRv87N7iHBWyVhJu7u1kqDUXY=
google.golang.org/genproto v0.0.0-20210924002016-3dee208752a0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211118181313-81c1377c94b1/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211206160659-862468c7d6e0/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211208223120-3a66f561d7aa/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20211221195035-429b39de9b1c/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220126215142-9970aeb2e350/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220207164111-0872dc986b00/go.mod h1:5CzLGKJ67TSI2B9POpiiyGha0AjJvZIUgRMt1dSmuhc=
google.golang.org/genproto v0.0.0-20220218161850-94dd64e39d7c/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220222213610-43724f9ea8cf/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220304144024-325a89244dc8/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220310185008-1973136f34c6/go.mod h1:kGP+zUP2Ddo0ayMi4YuN7C3WZyJvGLZRh8Z5wnAqvEI=
google.golang.org/genproto v0.0.0-20220324131243-acbaeb5b85eb/go.mod h1:hAL49I2IFola2sVEjAn7MEwsja0xp51I0tlGAf9hz4E=
google.golang.org/genproto v0.0.0-20220407144326-9054f6ed7bac/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220413183235-5e96e2839df9/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220414192740-2d67ff6cf2b4/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220421151946-72621c1f0bd3/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220429170224-98d788798c3e/go.mod h1:8w6bsBMX6yCPbAVTeqQHvzxW0EIFigd5lZyahWgyfDo=
google.golang.org/genproto v0.0.0-20220505152158-f39f71e6c8f3/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd h1:e0TwkXOdbnH/1x5rc5MZ/VYyiZ4v+RdVfrGMqEwT68I=
google.golang.org/genproto v0.0.0-20220519153652-3a47de7e79bd/go.mod h1:RAyBrSAP7Fh3Nc84ghnVLDPuV51xc9agzmm4Ph6i0Q4=
google.golang.org/grpc v1.19.0/go.mod h1:mqu4LbDTu4XGKhr4mRzUsmM4RtVoemTSY81AxZiDr8c=
google.golang.org/grpc v1.20.1/go.mod h1:10oTOabMzJvdu6/UiuZezV6QK5dSlG84ov/aaiqXj38=
google.golang.org/grpc v1.21.1/go.mod h1:oYelfM1adQP15Ek0mdvEgi9Df8B9CZIaU1084ijfRaM=
@ -643,6 +870,7 @@ google.golang.org/grpc v1.29.1/go.mod h1:itym6AZVZYACWQqET3MqgPpjcuV5QH3BxFS3Iji
google.golang.org/grpc v1.30.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.0/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.31.1/go.mod h1:N36X2cJ7JwdamYAgDz+s+rVMFjt3numwzf/HckM8pak=
google.golang.org/grpc v1.33.1/go.mod h1:fr5YgcSWrqhRRxogOsw7RzIpsmvOZ6IcH4kBYTpR3n0=
google.golang.org/grpc v1.33.2/go.mod h1:JMHMWHQWaTccqQQlmk3MJZS+GWXOdAesneDmEnv2fbc=
google.golang.org/grpc v1.34.0/go.mod h1:WotjhfgOW/POjDeRt8vscBtXq+2VjORFy659qA51WJ8=
google.golang.org/grpc v1.35.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
@ -650,8 +878,16 @@ google.golang.org/grpc v1.36.0/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAG
google.golang.org/grpc v1.36.1/go.mod h1:qjiiYl8FncCW8feJPdyg3v6XW24KsRHe+dy9BAGRRjU=
google.golang.org/grpc v1.37.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.37.1/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.38.0 h1:/9BgsAsa5nWe26HqOlvlgJnqBuktYOLCgjCPqsa56W0=
google.golang.org/grpc v1.38.0/go.mod h1:NREThFqKR1f3iQ6oBuvc5LadQuXVGo9rkm5ZGrQdJfM=
google.golang.org/grpc v1.39.0/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.39.1/go.mod h1:PImNr+rS9TWYb2O4/emRugxiyHZ5JyHW5F+RPnDzfrE=
google.golang.org/grpc v1.40.0/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.40.1/go.mod h1:ogyxbiOoUXAkP+4+xa6PZSE9DZgIHtSpzjDTB9KAK34=
google.golang.org/grpc v1.44.0/go.mod h1:k+4IHHFw41K8+bbowsex27ge2rCb65oeWqe4jJ590SU=
google.golang.org/grpc v1.45.0/go.mod h1:lN7owxKUQEqMfSyQikvvk5tf/6zMPsrK+ONuO11+0rQ=
google.golang.org/grpc v1.46.0/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc v1.46.2 h1:u+MLGgVf7vRdjEYZ8wDFhAVNmhkbJ5hmrA1LMWK1CAQ=
google.golang.org/grpc v1.46.2/go.mod h1:vN9eftEi1UMyUsIF80+uQXhHjbXYbm0uXoFCACuMGWk=
google.golang.org/grpc/cmd/protoc-gen-go-grpc v1.1.0/go.mod h1:6Kw0yEErY5E/yWrBtf03jp27GLLJujG4z/JK95pnjjw=
google.golang.org/protobuf v0.0.0-20200109180630-ec00e32a8dfd/go.mod h1:DFci5gLYBciE7Vtevhsrf46CRTquxDuWsQurQQe4oz8=
google.golang.org/protobuf v0.0.0-20200221191635-4d8936d0db64/go.mod h1:kwYJMbMJ01Woi6D6+Kah6886xMZcty6N08ah7+eCXa0=
@ -664,13 +900,17 @@ google.golang.org/protobuf v1.23.1-0.20200526195155-81db48ad09cc/go.mod h1:EGpAD
google.golang.org/protobuf v1.24.0/go.mod h1:r/3tXBNzIEhYS9I1OUVjXDlt8tc493IdKGjtUeSXeh4=
google.golang.org/protobuf v1.25.0/go.mod h1:9JNX74DMeImyA3h4bdi1ymwjUzf21/xIlbajtzgsN7c=
google.golang.org/protobuf v1.26.0-rc.1/go.mod h1:jlhhOSvTdKEhbULTjvd4ARK9grFBp09yW+WbY/TyQbw=
google.golang.org/protobuf v1.26.0 h1:bxAC2xTBsZGibn2RTntX0oH50xLsqy1OxA9tTL3p/lk=
google.golang.org/protobuf v1.26.0/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.27.1/go.mod h1:9q0QmTI4eRPtz6boOQmLYwt+qCgq0jsYwAQnmE0givc=
google.golang.org/protobuf v1.28.0 h1:w43yiav+6bVFTBQFZX0r7ipe9JQ1QsbMgHwbBziscLw=
google.golang.org/protobuf v1.28.0/go.mod h1:HV8QOd/L58Z+nl8r43ehVNZIU/HEI6OcFqwMG9pJV4I=
gopkg.in/alecthomas/kingpin.v2 v2.2.6/go.mod h1:FMv+mEhP44yOT+4EoQTLFTRgOQ1FBLkstjWtayDeSgw=
gopkg.in/check.v1 v0.0.0-20161208181325-20d25e280405/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127 h1:qIbj1fsPNlZgppZ+VLlY7N33q108Sa+fhmuc+sWQYwY=
gopkg.in/check.v1 v1.0.0-20180628173108-788fd7840127/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0 h1:+RlmciBLDd/XwM1iudiG3HtCg45purnsOxEoY/+JZdQ=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.7.0/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15 h1:YR8cESwS4TdDjEe65xsg0ogRM/Nc3DYOhEAlW+xobZo=
gopkg.in/check.v1 v1.0.0-20190902080502-41f04d3bba15/go.mod h1:Co6ibVJAznAaIkqp8huTwlJQCZ016jof/cbN4VW5Yz0=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2 h1:QAgN6OC0o7dwvyz+HML6GYm+0Pk54O91+oxGqJ/5z8I=
gopkg.in/confluentinc/confluent-kafka-go.v1 v1.8.2/go.mod h1:ZdI3yfYmdNSLQPNCpO1y00EHyWaHG5EnQEyL/ntAegY=
gopkg.in/errgo.v2 v2.1.0/go.mod h1:hNsd1EY+bozCKY1Ytp96fpM3vjJbqLJn88ws8XvfDNI=
gopkg.in/fsnotify.v1 v1.4.7 h1:xOHLXZwVvI9hhs+cLKq5+I5onOuwQLhQwiu63xxlHs4=
gopkg.in/fsnotify.v1 v1.4.7/go.mod h1:Tz8NjZHkW78fSQdbUxIjBTcgA1z1m8ZHf0WmKUhAMys=
@ -679,10 +919,16 @@ gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7 h1:uRGJdciOHaEIrze2W8Q3AKkep
gopkg.in/tomb.v1 v1.0.0-20141024135613-dd632973f1e7/go.mod h1:dt/ZhP58zS4L8KSrWDmTeBkI65Dw0HsyUHuEVlX15mw=
gopkg.in/yaml.v2 v2.2.1/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.2/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8 h1:obN1ZagJSUGI0Ek/LBmuj4SNLPfIny3KsKFopxRdj10=
gopkg.in/yaml.v2 v2.2.3/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.4/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.5/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.2.8/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c h1:dUUwHk2QECo/6vqA44rthZ8ie2QXMNeKRTHCNY2nXvo=
gopkg.in/yaml.v2 v2.3.0/go.mod h1:hI93XBmqTisBFMUTm0b8Fm+jr3Dg1NNxqwp+5A1VGuI=
gopkg.in/yaml.v2 v2.4.0 h1:D8xgwECY7CYvx+Y2n4sBz93Jn9JRvxdiyyo8CTfuKaY=
gopkg.in/yaml.v2 v2.4.0/go.mod h1:RDklbk79AGWmwhnvt/jBztapEOGDOx6ZbXqjP6csGnQ=
gopkg.in/yaml.v3 v3.0.0-20200313102051-9f266ea9e77c/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
gopkg.in/yaml.v3 v3.0.0 h1:hjy8E9ON/egN1tAYqKb61G10WtihqetD4sz2H+8nIeA=
gopkg.in/yaml.v3 v3.0.0/go.mod h1:K4uyk7z7BCEPqu6E+C64Yfv1cQ7kz7rIZviUmN+EgEM=
honnef.co/go/tools v0.0.0-20190102054323-c2f93a96b099/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190106161140-3f1c8253044a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=
honnef.co/go/tools v0.0.0-20190418001031-e561f6794a2a/go.mod h1:rf3lG4BRIbNafJWhAfAdb/ePZxsR/4RtNHQocxwk9r4=

View file

@ -1,18 +1,23 @@
package cacher
import (
"context"
"crypto/tls"
"fmt"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"io"
"io/ioutil"
"log"
"mime"
"net/http"
"openreplay/backend/pkg/monitoring"
"path/filepath"
"strings"
"time"
"github.com/pkg/errors"
config "openreplay/backend/internal/config/assets"
"openreplay/backend/pkg/storage"
"openreplay/backend/pkg/url/assets"
)
@ -26,26 +31,38 @@ type cacher struct {
rewriter *assets.Rewriter // Read only
Errors chan error
sizeLimit int
downloadedAssets syncfloat64.Counter
requestHeaders map[string]string
}
func NewCacher(region string, bucket string, origin string, sizeLimit int) *cacher {
rewriter := assets.NewRewriter(origin)
func NewCacher(cfg *config.Config, metrics *monitoring.Metrics) *cacher {
rewriter := assets.NewRewriter(cfg.AssetsOrigin)
if metrics == nil {
log.Fatalf("metrics are empty")
}
downloadedAssets, err := metrics.RegisterCounter("assets_downloaded")
if err != nil {
log.Printf("can't create downloaded_assets metric: %s", err)
}
return &cacher{
timeoutMap: newTimeoutMap(),
s3: storage.NewS3(region, bucket),
s3: storage.NewS3(cfg.AWSRegion, cfg.S3BucketAssets),
httpClient: &http.Client{
Timeout: time.Duration(6) * time.Second,
Transport: &http.Transport{
Proxy: http.ProxyFromEnvironment,
TLSClientConfig: &tls.Config{InsecureSkipVerify: true},
},
},
rewriter: rewriter,
Errors: make(chan error),
sizeLimit: sizeLimit,
sizeLimit: cfg.AssetsSizeLimit,
downloadedAssets: downloadedAssets,
requestHeaders: cfg.AssetsRequestHeaders,
}
}
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, context string, isJS bool) {
func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, urlContext string, isJS bool) {
var cachePath string
if isJS {
cachePath = assets.GetCachePathForJS(requestURL)
@ -61,26 +78,28 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, conte
}
req, _ := http.NewRequest("GET", requestURL, nil)
req.Header.Set("Cookie", "ABv=3;")
req.Header.Set("User-Agent", "Mozilla/5.0 (Windows NT 6.1; rv:31.0) Gecko/20100101 Firefox/31.0")
for k, v := range c.requestHeaders {
req.Header.Set(k, v)
}
res, err := c.httpClient.Do(req)
if err != nil {
c.Errors <- errors.Wrap(err, context)
c.Errors <- errors.Wrap(err, urlContext)
return
}
defer res.Body.Close()
if res.StatusCode != 200 {
if res.StatusCode >= 400 {
// TODO: retry
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), context)
c.Errors <- errors.Wrap(fmt.Errorf("Status code is %v, ", res.StatusCode), urlContext)
return
}
data, err := ioutil.ReadAll(io.LimitReader(res.Body, int64(c.sizeLimit+1)))
if err != nil {
c.Errors <- errors.Wrap(err, context)
c.Errors <- errors.Wrap(err, urlContext)
return
}
if len(data) > c.sizeLimit {
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), context)
c.Errors <- errors.Wrap(errors.New("Maximum size exceeded"), urlContext)
return
}
@ -98,23 +117,24 @@ func (c *cacher) cacheURL(requestURL string, sessionID uint64, depth byte, conte
// TODO: implement in streams
err = c.s3.Upload(strings.NewReader(strData), cachePath, contentType, false)
if err != nil {
c.Errors <- errors.Wrap(err, context)
c.Errors <- errors.Wrap(err, urlContext)
return
}
c.downloadedAssets.Add(context.Background(), 1)
if isCSS {
if depth > 0 {
for _, extractedURL := range assets.ExtractURLsFromCSS(string(data)) {
if fullURL, cachable := assets.GetFullCachableURL(requestURL, extractedURL); cachable {
go c.cacheURL(fullURL, sessionID, depth-1, context+"\n -> "+fullURL, false)
go c.cacheURL(fullURL, sessionID, depth-1, urlContext+"\n -> "+fullURL, false)
}
}
if err != nil {
c.Errors <- errors.Wrap(err, context)
c.Errors <- errors.Wrap(err, urlContext)
return
}
} else {
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), context)
c.Errors <- errors.Wrap(errors.New("Maximum recursion cache depth exceeded"), urlContext)
return
}
}
@ -129,18 +149,6 @@ func (c *cacher) CacheURL(sessionID uint64, fullURL string) {
go c.cacheURL(fullURL, sessionID, MAX_CACHE_DEPTH, fullURL, false)
}
// func (c *cacher) CacheURL(sessionID uint64, baseURL string, relativeURL string) {
// if fullURL, cachable := assets.GetFullCachableURL(baseURL, relativeURL); cachable {
// c.CacheURL(sessionID, fullURL)
// }
// }
// func (c *cacher) CacheCSSLinks(baseURL string, css string, sessionID uint64) {
// for _, extractedURL := range assets.ExtractURLsFromCSS(css) {
// c.CacheURL(sessionID, baseURL, extractedURL)
// }
// }
func (c *cacher) UpdateTimeouts() {
c.timeoutMap.deleteOutdated()
}

View file

@ -1,17 +1,15 @@
package main
package assets
import (
"encoding/json"
"strings"
)
type frame struct {
FileName string `json:"fileName"`
}
func extractJSExceptionSources(payload *string) ([]string, error) {
func ExtractJSExceptionSources(payload *string) ([]string, error) {
var frameList []frame
err := json.Unmarshal([]byte(*payload), &frameList)
if err != nil {

View file

@ -0,0 +1,23 @@
package assets
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
)
type Config struct {
common.Config
GroupCache string `env:"GROUP_CACHE,required"`
TopicCache string `env:"TOPIC_CACHE,required"`
AWSRegion string `env:"AWS_REGION,required"`
S3BucketAssets string `env:"S3_BUCKET_ASSETS,required"`
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
AssetsSizeLimit int `env:"ASSETS_SIZE_LIMIT,required"`
AssetsRequestHeaders map[string]string `env:"ASSETS_REQUEST_HEADERS"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,14 @@
package common
type Config struct {
ConfigFilePath string `env:"CONFIG_FILE_PATH"`
MessageSizeLimit int `env:"QUEUE_MESSAGE_SIZE_LIMIT,default=1048576"`
}
type Configer interface {
GetConfigPath() string
}
func (c *Config) GetConfigPath() string {
return c.ConfigFilePath
}

View file

@ -0,0 +1,111 @@
package configurator
import (
"context"
"encoding/json"
"fmt"
"io"
"log"
"os"
"reflect"
"strconv"
"strings"
"time"
"github.com/sethvargo/go-envconfig"
"openreplay/backend/internal/config/common"
)
func readFile(path string) (map[string]string, error) {
if path == "" {
return nil, fmt.Errorf("file path is empty")
}
file, err := os.Open(path)
if err != nil {
return nil, fmt.Errorf("can't open file: %s", err)
}
defer file.Close()
data, err := io.ReadAll(file)
if err != nil {
return nil, fmt.Errorf("can't read file: %s", err)
}
log.Println(data)
res := make(map[string]string)
lines := strings.Split(string(data), "\n")
for _, line := range lines {
env := strings.Split(line, "=")
res[env[0]] = env[1]
}
return res, nil
}
func parseFile(a interface{}, path string) {
envs, err := readFile(path)
if err != nil {
log.Printf("can't parse config file: %s", err)
return
}
val := reflect.ValueOf(a).Elem()
for i := 0; i < val.NumField(); i++ {
envName := val.Type().Field(i).Tag.Get("env")
if envName == "" {
continue
}
envName = strings.Split(envName, ",")[0]
if envName == "" {
continue
}
fmt.Println(envName, val.Type().Field(i).Type.String())
if value, ok := envs[envName]; ok {
switch val.Type().Field(i).Type.String() {
case "string":
val.Field(i).SetString(value)
case "int", "int8", "int16", "int32", "int64":
intValue, err := strconv.Atoi(value)
if err != nil {
log.Printf("can't parse int value: %s", err)
continue
}
val.Field(i).SetInt(int64(intValue))
case "uint", "uint8", "uint16", "uint32", "uint64":
uintValue, err := strconv.Atoi(value)
if err != nil {
log.Printf("can't parse uint value: %s", err)
continue
}
val.Field(i).SetUint(uint64(uintValue))
case "bool":
boolValue, err := strconv.ParseBool(value)
if err != nil {
log.Printf("can't parse bool value: %s", err)
continue
}
val.Field(i).SetBool(boolValue)
case "time.Duration":
d, err := time.ParseDuration(value)
if err != nil {
log.Printf("can't parse time.Duration value: %s", err)
continue
}
val.Field(i).SetInt(int64(d))
case "map[string]string":
var stringMap map[string]string
if err := json.Unmarshal([]byte(value), &stringMap); err != nil {
log.Printf("can't parse map[string]string value: %s", err)
continue
}
val.Field(i).Set(reflect.ValueOf(stringMap))
default:
log.Println("unknown config type: ", val.Type().Field(i).Type.String())
}
}
}
}
func Process(cfg common.Configer) {
if err := envconfig.Process(context.Background(), cfg); err != nil {
log.Fatalf("error while processing env vars: %s", err)
}
parseFile(cfg, cfg.GetConfigPath())
}

View file

@ -0,0 +1,26 @@
package db
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"time"
)
type Config struct {
common.Config
Postgres string `env:"POSTGRES_STRING,required"`
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
GroupDB string `env:"GROUP_DB,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
CommitBatchTimeout time.Duration `env:"COMMIT_BATCH_TIMEOUT,default=15s"`
BatchQueueLimit int `env:"DB_BATCH_QUEUE_LIMIT,required"`
BatchSizeLimit int `env:"DB_BATCH_SIZE_LIMIT,required"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,23 @@
package ender
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
)
type Config struct {
common.Config
Postgres string `env:"POSTGRES_STRING,required"`
ProjectExpirationTimeoutMs int64 `env:"PROJECT_EXPIRATION_TIMEOUT_MS,default=1200000"`
GroupEnder string `env:"GROUP_ENDER,required"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
PartitionsNumber int `env:"PARTITIONS_NUMBER,required"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,22 @@
package heuristics
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
)
type Config struct {
common.Config
GroupHeuristics string `env:"GROUP_HEURISTICS,required"`
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
LoggerTimeout int `env:"LOG_QUEUE_STATS_INTERVAL_SEC,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
ProducerTimeout int `env:"PRODUCER_TIMEOUT,default=2000"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,33 @@
package http
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"openreplay/backend/pkg/env"
"time"
)
type Config struct {
common.Config
HTTPHost string `env:"HTTP_HOST,default="`
HTTPPort string `env:"HTTP_PORT,required"`
HTTPTimeout time.Duration `env:"HTTP_TIMEOUT,default=60s"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
BeaconSizeLimit int64 `env:"BEACON_SIZE_LIMIT,required"`
JsonSizeLimit int64 `env:"JSON_SIZE_LIMIT,default=1000"`
FileSizeLimit int64 `env:"FILE_SIZE_LIMIT,default=10000000"`
AWSRegion string `env:"AWS_REGION,required"`
S3BucketIOSImages string `env:"S3_BUCKET_IOS_IMAGES,required"`
Postgres string `env:"POSTGRES_STRING,required"`
TokenSecret string `env:"TOKEN_SECRET,required"`
UAParserFile string `env:"UAPARSER_FILE,required"`
MaxMinDBFile string `env:"MAXMINDDB_FILE,required"`
WorkerID uint16
}
func New() *Config {
cfg := &Config{WorkerID: env.WorkerID()}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,19 @@
package integrations
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
)
type Config struct {
common.Config
TopicAnalytics string `env:"TOPIC_ANALYTICS,required"`
PostgresURI string `env:"POSTGRES_STRING,required"`
TokenSecret string `env:"TOKEN_SECRET,required"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,26 @@
package sink
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
)
type Config struct {
common.Config
FsDir string `env:"FS_DIR,required"`
FsUlimit uint16 `env:"FS_ULIMIT,required"`
GroupSink string `env:"GROUP_SINK,required"`
TopicRawWeb string `env:"TOPIC_RAW_WEB,required"`
TopicRawIOS string `env:"TOPIC_RAW_IOS,required"`
TopicCache string `env:"TOPIC_CACHE,required"`
TopicTrigger string `env:"TOPIC_TRIGGER,required"`
CacheAssets bool `env:"CACHE_ASSETS,required"`
AssetsOrigin string `env:"ASSETS_ORIGIN,required"`
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,30 @@
package storage
import (
"openreplay/backend/internal/config/common"
"openreplay/backend/internal/config/configurator"
"time"
)
type Config struct {
common.Config
S3Region string `env:"AWS_REGION_WEB,required"`
S3Bucket string `env:"S3_BUCKET_WEB,required"`
FSDir string `env:"FS_DIR,required"`
FSCleanHRS int `env:"FS_CLEAN_HRS,required"`
FileSplitSize int `env:"FILE_SPLIT_SIZE,required"`
RetryTimeout time.Duration `env:"RETRY_TIMEOUT,default=2m"`
GroupStorage string `env:"GROUP_STORAGE,required"`
TopicTrigger string `env:"TOPIC_TRIGGER,required"`
GroupFailover string `env:"GROUP_STORAGE_FAILOVER"`
TopicFailover string `env:"TOPIC_STORAGE_FAILOVER"`
DeleteTimeout time.Duration `env:"DELETE_TIMEOUT,default=48h"`
ProducerCloseTimeout int `env:"PRODUCER_CLOSE_TIMEOUT,default=15000"`
UseFailover bool `env:"USE_FAILOVER,default=false"`
}
func New() *Config {
cfg := &Config{}
configurator.Process(cfg)
return cfg
}

View file

@ -0,0 +1,80 @@
package datasaver
import (
"fmt"
. "openreplay/backend/pkg/messages"
)
func (mi *Saver) InsertMessage(sessionID uint64, msg Message) error {
switch m := msg.(type) {
// Common
case *Metadata:
if err := mi.pg.InsertMetadata(sessionID, m); err != nil {
return fmt.Errorf("insert metadata err: %s", err)
}
return nil
case *IssueEvent:
return mi.pg.InsertIssueEvent(sessionID, m)
//TODO: message adapter (transformer) (at the level of pkg/message) for types: *IOSMetadata, *IOSIssueEvent and others
// Web
case *SessionStart:
return mi.pg.HandleWebSessionStart(sessionID, m)
case *SessionEnd:
return mi.pg.HandleWebSessionEnd(sessionID, m)
case *UserID:
return mi.pg.InsertWebUserID(sessionID, m)
case *UserAnonymousID:
return mi.pg.InsertWebUserAnonymousID(sessionID, m)
case *CustomEvent:
return mi.pg.InsertWebCustomEvent(sessionID, m)
case *ClickEvent:
return mi.pg.InsertWebClickEvent(sessionID, m)
case *InputEvent:
return mi.pg.InsertWebInputEvent(sessionID, m)
// Unique Web messages
case *PageEvent:
return mi.pg.InsertWebPageEvent(sessionID, m)
case *ErrorEvent:
return mi.pg.InsertWebErrorEvent(sessionID, m)
case *FetchEvent:
return mi.pg.InsertWebFetchEvent(sessionID, m)
case *GraphQLEvent:
return mi.pg.InsertWebGraphQLEvent(sessionID, m)
case *IntegrationEvent:
return mi.pg.InsertWebErrorEvent(sessionID, &ErrorEvent{
MessageID: m.Meta().Index,
Timestamp: m.Timestamp,
Source: m.Source,
Name: m.Name,
Message: m.Message,
Payload: m.Payload,
})
// IOS
case *IOSSessionStart:
return mi.pg.InsertIOSSessionStart(sessionID, m)
case *IOSSessionEnd:
return mi.pg.InsertIOSSessionEnd(sessionID, m)
case *IOSUserID:
return mi.pg.InsertIOSUserID(sessionID, m)
case *IOSUserAnonymousID:
return mi.pg.InsertIOSUserAnonymousID(sessionID, m)
case *IOSCustomEvent:
return mi.pg.InsertIOSCustomEvent(sessionID, m)
case *IOSClickEvent:
return mi.pg.InsertIOSClickEvent(sessionID, m)
case *IOSInputEvent:
return mi.pg.InsertIOSInputEvent(sessionID, m)
// Unique IOS messages
case *IOSNetworkCall:
return mi.pg.InsertIOSNetworkCall(sessionID, m)
case *IOSScreenEnter:
return mi.pg.InsertIOSScreenEnter(sessionID, m)
case *IOSCrash:
return mi.pg.InsertIOSCrash(sessionID, m)
}
return nil // "Not implemented"
}

View file

@ -0,0 +1,11 @@
package datasaver
import "openreplay/backend/pkg/db/cache"
type Saver struct {
pg *cache.PGCache
}
func New(pg *cache.PGCache) *Saver {
return &Saver{pg: pg}
}

View file

@ -0,0 +1,27 @@
package datasaver
import (
. "openreplay/backend/pkg/db/types"
. "openreplay/backend/pkg/messages"
)
func (si *Saver) InitStats() {
// noop
}
func (si *Saver) InsertStats(session *Session, msg Message) error {
switch m := msg.(type) {
// Web
case *PerformanceTrackAggr:
return si.pg.InsertWebStatsPerformance(session.SessionID, m)
case *ResourceEvent:
return si.pg.InsertWebStatsResourceEvent(session.SessionID, m)
case *LongTask:
return si.pg.InsertWebStatsLongtask(session.SessionID, m)
}
return nil
}
func (si *Saver) CommitStats() error {
return nil
}

View file

@ -0,0 +1,138 @@
package ios
import (
"strings"
)
func MapIOSDevice(identifier string) string {
switch identifier {
case "iPod5,1":
return "iPod touch (5th generation)"
case "iPod7,1":
return "iPod touch (6th generation)"
case "iPod9,1":
return "iPod touch (7th generation)"
case "iPhone3,1", "iPhone3,2", "iPhone3,3":
return "iPhone 4"
case "iPhone4,1":
return "iPhone 4s"
case "iPhone5,1", "iPhone5,2":
return "iPhone 5"
case "iPhone5,3", "iPhone5,4":
return "iPhone 5c"
case "iPhone6,1", "iPhone6,2":
return "iPhone 5s"
case "iPhone7,2":
return "iPhone 6"
case "iPhone7,1":
return "iPhone 6 Plus"
case "iPhone8,1":
return "iPhone 6s"
case "iPhone8,2":
return "iPhone 6s Plus"
case "iPhone8,4":
return "iPhone SE"
case "iPhone9,1", "iPhone9,3":
return "iPhone 7"
case "iPhone9,2", "iPhone9,4":
return "iPhone 7 Plus"
case "iPhone10,1", "iPhone10,4":
return "iPhone 8"
case "iPhone10,2", "iPhone10,5":
return "iPhone 8 Plus"
case "iPhone10,3", "iPhone10,6":
return "iPhone X"
case "iPhone11,2":
return "iPhone XS"
case "iPhone11,4", "iPhone11,6":
return "iPhone XS Max"
case "iPhone11,8":
return "iPhone XR"
case "iPhone12,1":
return "iPhone 11"
case "iPhone12,3":
return "iPhone 11 Pro"
case "iPhone12,5":
return "iPhone 11 Pro Max"
case "iPhone12,8":
return "iPhone SE (2nd generation)"
case "iPhone13,1":
return "iPhone 12 mini"
case "iPhone13,2":
return "iPhone 12"
case "iPhone13,3":
return "iPhone 12 Pro"
case "iPhone13,4":
return "iPhone 12 Pro Max"
case "iPad2,1", "iPad2,2", "iPad2,3", "iPad2,4":
return "iPad 2"
case "iPad3,1", "iPad3,2", "iPad3,3":
return "iPad (3rd generation)"
case "iPad3,4", "iPad3,5", "iPad3,6":
return "iPad (4th generation)"
case "iPad6,11", "iPad6,12":
return "iPad (5th generation)"
case "iPad7,5", "iPad7,6":
return "iPad (6th generation)"
case "iPad7,11", "iPad7,12":
return "iPad (7th generation)"
case "iPad11,6", "iPad11,7":
return "iPad (8th generation)"
case "iPad4,1", "iPad4,2", "iPad4,3":
return "iPad Air"
case "iPad5,3", "iPad5,4":
return "iPad Air 2"
case "iPad11,3", "iPad11,4":
return "iPad Air (3rd generation)"
case "iPad13,1", "iPad13,2":
return "iPad Air (4th generation)"
case "iPad2,5", "iPad2,6", "iPad2,7":
return "iPad mini"
case "iPad4,4", "iPad4,5", "iPad4,6":
return "iPad mini 2"
case "iPad4,7", "iPad4,8", "iPad4,9":
return "iPad mini 3"
case "iPad5,1", "iPad5,2":
return "iPad mini 4"
case "iPad11,1", "iPad11,2":
return "iPad mini (5th generation)"
case "iPad6,3", "iPad6,4":
return "iPad Pro (9.7-inch)"
case "iPad7,3", "iPad7,4":
return "iPad Pro (10.5-inch)"
case "iPad8,1", "iPad8,2", "iPad8,3", "iPad8,4":
return "iPad Pro (11-inch) (1st generation)"
case "iPad8,9", "iPad8,10":
return "iPad Pro (11-inch) (2nd generation)"
case "iPad6,7", "iPad6,8":
return "iPad Pro (12.9-inch) (1st generation)"
case "iPad7,1", "iPad7,2":
return "iPad Pro (12.9-inch) (2nd generation)"
case "iPad8,5", "iPad8,6", "iPad8,7", "iPad8,8":
return "iPad Pro (12.9-inch) (3rd generation)"
case "iPad8,11", "iPad8,12":
return "iPad Pro (12.9-inch) (4th generation)"
case "AppleTV5,3":
return "Apple TV"
case "AppleTV6,2":
return "Apple TV 4K"
case "AudioAccessory1,1":
return "HomePod"
case "AudioAccessory5,1":
return "HomePod mini"
case "i386", "x86_64":
return "Simulator"
default:
return identifier
}
}
func GetIOSDeviceType(identifier string) string {
if strings.Contains(identifier, "iPhone") {
return "mobile" //"phone"
}
if strings.Contains(identifier, "iPad") {
return "tablet"
}
return "other"
}

View file

@ -0,0 +1,173 @@
package router
import (
"encoding/json"
"errors"
"log"
"math/rand"
"net/http"
"openreplay/backend/internal/http/ios"
"openreplay/backend/internal/http/util"
"openreplay/backend/internal/http/uuid"
"strconv"
"time"
"openreplay/backend/pkg/db/postgres"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/token"
)
func (e *Router) startSessionHandlerIOS(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
req := &StartIOSSessionRequest{}
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
body := http.MaxBytesReader(w, r.Body, e.cfg.JsonSizeLimit)
defer body.Close()
if err := json.NewDecoder(body).Decode(req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
return
}
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
ResponseWithError(w, http.StatusNotFound, errors.New("Project doesn't exist or is not active"))
} else {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
}
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
if dice >= p.SampleRate {
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err)
return
}
// TODO: if EXPIRED => send message for two sessions association
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{sessionID, expTime.UnixMilli()}
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
// The difference with web is mostly here:
e.services.Producer.Produce(e.cfg.TopicRawIOS, tokenData.ID, Encode(&IOSSessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserOS: "IOS",
UserOSVersion: req.UserOSVersion,
UserDevice: ios.MapIOSDevice(req.UserDevice),
UserDeviceType: ios.GetIOSDeviceType(req.UserDevice),
UserCountry: country,
}))
}
ResponseWithJSON(w, &StartIOSSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
})
}
func (e *Router) pushMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
}
func (e *Router) pushLateMessagesHandlerIOS(w http.ResponseWriter, r *http.Request) {
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil && err != token.EXPIRED {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
// Check timestamps here?
e.pushMessages(w, r, sessionData.ID, e.cfg.TopicRawIOS)
}
func (e *Router) imagesUploadHandlerIOS(w http.ResponseWriter, r *http.Request) {
log.Printf("recieved imagerequest")
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil { // Should accept expired token?
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
r.Body = http.MaxBytesReader(w, r.Body, e.cfg.FileSizeLimit)
defer r.Body.Close()
err = r.ParseMultipartForm(1e6) // ~1Mb
if err == http.ErrNotMultipart || err == http.ErrMissingBoundary {
ResponseWithError(w, http.StatusUnsupportedMediaType, err)
return
// } else if err == multipart.ErrMessageTooLarge // if non-files part exceeds 10 MB
} else if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
return
}
if r.MultipartForm == nil {
ResponseWithError(w, http.StatusInternalServerError, errors.New("Multipart not parsed"))
return
}
if len(r.MultipartForm.Value["projectKey"]) == 0 {
ResponseWithError(w, http.StatusBadRequest, errors.New("projectKey parameter missing")) // status for missing/wrong parameter?
return
}
prefix := r.MultipartForm.Value["projectKey"][0] + "/" + strconv.FormatUint(sessionData.ID, 10) + "/"
for _, fileHeaderList := range r.MultipartForm.File {
for _, fileHeader := range fileHeaderList {
file, err := fileHeader.Open()
if err != nil {
continue // TODO: send server error or accumulate successful files
}
key := prefix + fileHeader.Filename
log.Printf("Uploading image... %v", util.SafeString(key))
go func() { //TODO: mime type from header
if err := e.services.Storage.Upload(file, key, "image/jpeg", false); err != nil {
log.Printf("Upload ios screen error. %v", err)
}
}()
}
}
w.WriteHeader(http.StatusOK)
}

View file

@ -0,0 +1,223 @@
package router
import (
"encoding/json"
"errors"
"go.opentelemetry.io/otel/attribute"
"io"
"log"
"math/rand"
"net/http"
"openreplay/backend/internal/http/uuid"
"strconv"
"time"
"openreplay/backend/pkg/db/postgres"
. "openreplay/backend/pkg/messages"
"openreplay/backend/pkg/token"
)
func (e *Router) readBody(w http.ResponseWriter, r *http.Request, limit int64) ([]byte, error) {
body := http.MaxBytesReader(w, r.Body, limit)
bodyBytes, err := io.ReadAll(body)
if closeErr := body.Close(); closeErr != nil {
log.Printf("error while closing request body: %s", closeErr)
}
if err != nil {
return nil, err
}
reqSize := len(bodyBytes)
e.requestSize.Record(
r.Context(),
float64(reqSize),
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
)
return bodyBytes, nil
}
func (e *Router) startSessionHandlerWeb(w http.ResponseWriter, r *http.Request) {
startTime := time.Now()
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Parse request body
req := &StartSessionRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
// Handler's logic
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("ProjectKey value required"))
return
}
p, err := e.services.Database.GetProjectByKey(*req.ProjectKey)
if err != nil {
if postgres.IsNoRowsErr(err) {
ResponseWithError(w, http.StatusNotFound, errors.New("project doesn't exist or capture limit has been reached"))
} else {
log.Printf("can't get project by key: %s", err)
ResponseWithError(w, http.StatusInternalServerError, errors.New("can't get project by key"))
}
return
}
userUUID := uuid.GetUUID(req.UserUUID)
tokenData, err := e.services.Tokenizer.Parse(req.Token)
if err != nil || req.Reset { // Starting the new one
dice := byte(rand.Intn(100)) // [0, 100)
if dice >= p.SampleRate {
ResponseWithError(w, http.StatusForbidden, errors.New("cancel"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r)
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
sessionID, err := e.services.Flaker.Compose(uint64(startTime.UnixMilli()))
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err)
return
}
// TODO: if EXPIRED => send message for two sessions association
expTime := startTime.Add(time.Duration(p.MaxSessionDuration) * time.Millisecond)
tokenData = &token.TokenData{ID: sessionID, ExpTime: expTime.UnixMilli()}
sessionStart := &SessionStart{
Timestamp: req.Timestamp,
ProjectID: uint64(p.ProjectID),
TrackerVersion: req.TrackerVersion,
RevID: req.RevID,
UserUUID: userUUID,
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r),
UserDeviceMemorySize: req.DeviceMemory,
UserDeviceHeapSize: req.JsHeapSizeLimit,
UserID: req.UserID,
}
// Save sessionStart to db
if err := e.services.Database.InsertWebSessionStart(sessionID, sessionStart); err != nil {
log.Printf("can't insert session start: %s", err)
}
// Send sessionStart message to kafka
if err := e.services.Producer.Produce(e.cfg.TopicRawWeb, tokenData.ID, Encode(sessionStart)); err != nil {
log.Printf("can't send session start: %s", err)
}
}
ResponseWithJSON(w, &StartSessionResponse{
Token: e.services.Tokenizer.Compose(*tokenData),
UserUUID: userUUID,
SessionID: strconv.FormatUint(tokenData.ID, 10),
BeaconSizeLimit: e.cfg.BeaconSizeLimit,
})
}
func (e *Router) pushMessagesHandlerWeb(w http.ResponseWriter, r *http.Request) {
// Check authorization
sessionData, err := e.services.Tokenizer.ParseFromHTTPRequest(r)
if err != nil {
ResponseWithError(w, http.StatusUnauthorized, err)
return
}
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.BeaconSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Send processed messages to queue as array of bytes
// TODO: check bytes for nonsense crap
err = e.services.Producer.Produce(e.cfg.TopicRawWeb, sessionData.ID, bodyBytes)
if err != nil {
log.Printf("can't send processed messages to queue: %s", err)
}
w.WriteHeader(http.StatusOK)
}
func (e *Router) notStartedHandlerWeb(w http.ResponseWriter, r *http.Request) {
// Check request body
if r.Body == nil {
ResponseWithError(w, http.StatusBadRequest, errors.New("request body is empty"))
return
}
bodyBytes, err := e.readBody(w, r, e.cfg.JsonSizeLimit)
if err != nil {
log.Printf("error while reading request body: %s", err)
ResponseWithError(w, http.StatusRequestEntityTooLarge, err)
return
}
// Parse request body
req := &NotStartedRequest{}
if err := json.Unmarshal(bodyBytes, req); err != nil {
ResponseWithError(w, http.StatusBadRequest, err)
return
}
// Handler's logic
if req.ProjectKey == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("projectKey value required"))
return
}
ua := e.services.UaParser.ParseFromHTTPRequest(r) // TODO?: insert anyway
if ua == nil {
ResponseWithError(w, http.StatusForbidden, errors.New("browser not recognized"))
return
}
country := e.services.GeoIP.ExtractISOCodeFromHTTPRequest(r)
err = e.services.Database.InsertUnstartedSession(postgres.UnstartedSession{
ProjectKey: *req.ProjectKey,
TrackerVersion: req.TrackerVersion,
DoNotTrack: req.DoNotTrack,
Platform: "web",
UserAgent: r.Header.Get("User-Agent"),
UserOS: ua.OS,
UserOSVersion: ua.OSVersion,
UserBrowser: ua.Browser,
UserBrowserVersion: ua.BrowserVersion,
UserDevice: ua.Device,
UserDeviceType: ua.DeviceType,
UserCountry: country,
})
if err != nil {
log.Printf("Unable to insert Unstarted Session: %v\n", err)
}
w.WriteHeader(http.StatusOK)
}

View file

@ -0,0 +1,40 @@
package router
import (
gzip "github.com/klauspost/pgzip"
"io"
"io/ioutil"
"log"
"net/http"
)
func (e *Router) pushMessages(w http.ResponseWriter, r *http.Request, sessionID uint64, topicName string) {
body := http.MaxBytesReader(w, r.Body, e.cfg.BeaconSizeLimit)
defer body.Close()
var reader io.ReadCloser
var err error
switch r.Header.Get("Content-Encoding") {
case "gzip":
log.Println("Gzip", reader)
reader, err = gzip.NewReader(body)
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: stage-dependent response
return
}
//log.Println("Gzip reader init", reader)
defer reader.Close()
default:
reader = body
}
//log.Println("Reader after switch:", reader)
buf, err := ioutil.ReadAll(reader)
if err != nil {
ResponseWithError(w, http.StatusInternalServerError, err) // TODO: send error here only on staging
return
}
e.services.Producer.Produce(topicName, sessionID, buf) // What if not able to send?
w.WriteHeader(http.StatusOK)
}

View file

@ -0,0 +1,49 @@
package router
type StartSessionRequest struct {
Token string `json:"token"`
UserUUID *string `json:"userUUID"`
RevID string `json:"revID"`
Timestamp uint64 `json:"timestamp"`
TrackerVersion string `json:"trackerVersion"`
IsSnippet bool `json:"isSnippet"`
DeviceMemory uint64 `json:"deviceMemory"`
JsHeapSizeLimit uint64 `json:"jsHeapSizeLimit"`
ProjectKey *string `json:"projectKey"`
Reset bool `json:"reset"`
UserID string `json:"userID"`
}
type StartSessionResponse struct {
Timestamp int64 `json:"timestamp"`
Delay int64 `json:"delay"`
Token string `json:"token"`
UserUUID string `json:"userUUID"`
SessionID string `json:"sessionID"`
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
}
type NotStartedRequest struct {
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
DoNotTrack bool `json:"DoNotTrack"`
}
type StartIOSSessionRequest struct {
Token string `json:"token"`
ProjectKey *string `json:"projectKey"`
TrackerVersion string `json:"trackerVersion"`
RevID string `json:"revID"`
UserUUID *string `json:"userUUID"`
UserOSVersion string `json:"userOSVersion"`
UserDevice string `json:"userDevice"`
Timestamp uint64 `json:"timestamp"`
}
type StartIOSSessionResponse struct {
Token string `json:"token"`
ImagesHashList []string `json:"imagesHashList"`
UserUUID string `json:"userUUID"`
BeaconSizeLimit int64 `json:"beaconSizeLimit"`
SessionID string `json:"sessionID"`
}

View file

@ -1,4 +1,4 @@
package main
package router
import (
"encoding/json"
@ -6,7 +6,7 @@ import (
"net/http"
)
func responseWithJSON(w http.ResponseWriter, res interface{}) {
func ResponseWithJSON(w http.ResponseWriter, res interface{}) {
body, err := json.Marshal(res)
if err != nil {
log.Println(err)
@ -15,10 +15,10 @@ func responseWithJSON(w http.ResponseWriter, res interface{}) {
w.Write(body)
}
func responseWithError(w http.ResponseWriter, code int, err error) {
func ResponseWithError(w http.ResponseWriter, code int, err error) {
type response struct {
Error string `json:"error"`
}
w.WriteHeader(code)
responseWithJSON(w, &response{err.Error()})
ResponseWithJSON(w, &response{err.Error()})
}

View file

@ -0,0 +1,121 @@
package router
import (
"context"
"fmt"
"github.com/gorilla/mux"
"go.opentelemetry.io/otel/attribute"
"go.opentelemetry.io/otel/metric/instrument/syncfloat64"
"log"
"net/http"
http3 "openreplay/backend/internal/config/http"
http2 "openreplay/backend/internal/http/services"
"openreplay/backend/internal/http/util"
"openreplay/backend/pkg/monitoring"
"time"
)
type Router struct {
router *mux.Router
cfg *http3.Config
services *http2.ServicesBuilder
requestSize syncfloat64.Histogram
requestDuration syncfloat64.Histogram
totalRequests syncfloat64.Counter
}
func NewRouter(cfg *http3.Config, services *http2.ServicesBuilder, metrics *monitoring.Metrics) (*Router, error) {
switch {
case cfg == nil:
return nil, fmt.Errorf("config is empty")
case services == nil:
return nil, fmt.Errorf("services is empty")
case metrics == nil:
return nil, fmt.Errorf("metrics is empty")
}
e := &Router{
cfg: cfg,
services: services,
}
e.initMetrics(metrics)
e.init()
return e, nil
}
func (e *Router) init() {
e.router = mux.NewRouter()
// Root path
e.router.HandleFunc("/", e.root)
handlers := map[string]func(http.ResponseWriter, *http.Request){
"/v1/web/not-started": e.notStartedHandlerWeb,
"/v1/web/start": e.startSessionHandlerWeb,
"/v1/web/i": e.pushMessagesHandlerWeb,
"/v1/ios/start": e.startSessionHandlerIOS,
"/v1/ios/i": e.pushMessagesHandlerIOS,
"/v1/ios/late": e.pushLateMessagesHandlerIOS,
"/v1/ios/images": e.imagesUploadHandlerIOS,
}
prefix := "/ingest"
for path, handler := range handlers {
e.router.HandleFunc(path, handler).Methods("POST", "OPTIONS")
e.router.HandleFunc(prefix+path, handler).Methods("POST", "OPTIONS")
}
// CORS middleware
e.router.Use(e.corsMiddleware)
}
func (e *Router) initMetrics(metrics *monitoring.Metrics) {
var err error
e.requestSize, err = metrics.RegisterHistogram("requests_body_size")
if err != nil {
log.Printf("can't create requests_body_size metric: %s", err)
}
e.requestDuration, err = metrics.RegisterHistogram("requests_duration")
if err != nil {
log.Printf("can't create requests_duration metric: %s", err)
}
e.totalRequests, err = metrics.RegisterCounter("requests_total")
if err != nil {
log.Printf("can't create requests_total metric: %s", err)
}
}
func (e *Router) root(w http.ResponseWriter, r *http.Request) {
w.WriteHeader(http.StatusOK)
}
func (e *Router) corsMiddleware(next http.Handler) http.Handler {
return http.HandlerFunc(func(w http.ResponseWriter, r *http.Request) {
// Prepare headers for preflight requests
w.Header().Set("Access-Control-Allow-Origin", "*")
w.Header().Set("Access-Control-Allow-Methods", "POST")
w.Header().Set("Access-Control-Allow-Headers", "Content-Type,Authorization")
if r.Method == http.MethodOptions {
w.Header().Set("Cache-Control", "max-age=86400")
w.WriteHeader(http.StatusOK)
return
}
log.Printf("Request: %v - %v ", r.Method, util.SafeString(r.URL.Path))
requestStart := time.Now()
// Serve request
next.ServeHTTP(w, r)
metricsContext, _ := context.WithTimeout(context.Background(), time.Millisecond*100)
e.totalRequests.Add(metricsContext, 1)
e.requestDuration.Record(metricsContext,
float64(time.Now().Sub(requestStart).Milliseconds()),
[]attribute.KeyValue{attribute.String("method", r.URL.Path)}...,
)
})
}
func (e *Router) GetHandler() http.Handler {
return e.router
}

View file

@ -0,0 +1,46 @@
package server
import (
"context"
"errors"
"fmt"
"golang.org/x/net/http2"
"log"
"net/http"
"time"
)
type Server struct {
server *http.Server
}
func New(handler http.Handler, host, port string, timeout time.Duration) (*Server, error) {
switch {
case port == "":
return nil, errors.New("empty server port")
case handler == nil:
return nil, errors.New("empty handler")
case timeout < 1:
return nil, fmt.Errorf("invalid timeout %d", timeout)
}
server := &http.Server{
Addr: fmt.Sprintf("%s:%s", host, port),
Handler: handler,
ReadTimeout: timeout,
WriteTimeout: timeout,
}
if err := http2.ConfigureServer(server, nil); err != nil {
log.Printf("can't configure http2 server: %s", err)
}
return &Server{
server: server,
}, nil
}
func (s *Server) Start() error {
return s.server.ListenAndServe()
}
func (s *Server) Stop() {
s.server.Shutdown(context.Background())
}

View file

@ -0,0 +1,34 @@
package services
import (
"openreplay/backend/internal/config/http"
"openreplay/backend/internal/http/geoip"
"openreplay/backend/internal/http/uaparser"
"openreplay/backend/pkg/db/cache"
"openreplay/backend/pkg/flakeid"
"openreplay/backend/pkg/queue/types"
"openreplay/backend/pkg/storage"
"openreplay/backend/pkg/token"
)
type ServicesBuilder struct {
Database *cache.PGCache
Producer types.Producer
Flaker *flakeid.Flaker
UaParser *uaparser.UAParser
GeoIP *geoip.GeoIP
Tokenizer *token.Tokenizer
Storage *storage.S3
}
func New(cfg *http.Config, producer types.Producer, pgconn *cache.PGCache) *ServicesBuilder {
return &ServicesBuilder{
Database: pgconn,
Producer: producer,
Storage: storage.NewS3(cfg.AWSRegion, cfg.S3BucketIOSImages),
Tokenizer: token.NewTokenizer(cfg.TokenSecret),
UaParser: uaparser.NewUAParser(cfg.UAParserFile),
GeoIP: geoip.NewGeoIP(cfg.MaxMinDBFile),
Flaker: flakeid.NewFlaker(cfg.WorkerID),
}
}

View file

@ -0,0 +1,8 @@
package util
import "strings"
func SafeString(s string) string {
safe := strings.ReplaceAll(s, "\n", "")
return strings.ReplaceAll(safe, "\r", "")
}

View file

@ -1,10 +1,10 @@
package main
package uuid
import (
"github.com/google/uuid"
)
func getUUID(u *string) string {
func GetUUID(u *string) string {
if u != nil {
_, err := uuid.Parse(*u)
if err == nil {

View file

@ -0,0 +1,49 @@
package clientManager
import (
"openreplay/backend/internal/integrations/integration"
"strconv"
"openreplay/backend/pkg/db/postgres"
)
type manager struct {
clientMap integration.ClientMap
Events chan *integration.SessionErrorEvent
Errors chan error
RequestDataUpdates chan postgres.Integration // not pointer because it could change in other thread
}
func NewManager() *manager {
return &manager{
clientMap: make(integration.ClientMap),
RequestDataUpdates: make(chan postgres.Integration, 100),
Events: make(chan *integration.SessionErrorEvent, 100),
Errors: make(chan error, 100),
}
}
func (m *manager) Update(i *postgres.Integration) error {
key := strconv.Itoa(int(i.ProjectID)) + i.Provider
if i.Options == nil {
delete(m.clientMap, key)
return nil
}
c, exists := m.clientMap[key]
if !exists {
c, err := integration.NewClient(i, m.RequestDataUpdates, m.Events, m.Errors)
if err != nil {
return err
}
m.clientMap[key] = c
return nil
}
return c.Update(i)
}
func (m *manager) RequestAll() {
for _, c := range m.clientMap {
go c.Request()
}
}

View file

@ -97,7 +97,7 @@ func (b *bugsnag) Request(c *client) error {
c.evChan <- &SessionErrorEvent{
SessionID: sessionID,
Token: token,
RawErrorEvent: &messages.RawErrorEvent{
IntegrationEvent: &messages.IntegrationEvent{
Source: "bugsnag",
Timestamp: timestamp,
Name: e.Exceptions[0].Message,

View file

@ -40,7 +40,7 @@ type client struct {
type SessionErrorEvent struct {
SessionID uint64
Token string
*messages.RawErrorEvent
*messages.IntegrationEvent
}
type ClientMap map[string]*client

View file

@ -2,16 +2,15 @@ package integration
import (
"github.com/aws/aws-sdk-go/aws"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"github.com/aws/aws-sdk-go/aws/credentials"
"github.com/aws/aws-sdk-go/aws/session"
"github.com/aws/aws-sdk-go/service/cloudwatchlogs"
"strings"
"regexp"
"openreplay/backend/pkg/messages"
"regexp"
"strings"
)
var reIsException = regexp.MustCompile(`(?i)exception|error`)
type cloudwatch struct {
@ -21,7 +20,6 @@ type cloudwatch struct {
Region string // `json:"region"`
}
func (cw *cloudwatch) Request(c *client) error {
startTs := int64(c.getLastMessageTimestamp() + 1) // From next millisecond
//endTs := utils.CurrentTimestamp()
@ -36,7 +34,6 @@ func (cw *cloudwatch) Request(c *client) error {
}
svc := cloudwatchlogs.New(sess)
filterOptions := new(cloudwatchlogs.FilterLogEventsInput).
SetStartTime(startTs). // Inclusively both startTime and endTime
// SetEndTime(endTs). // Default nil?
@ -71,7 +68,7 @@ func (cw *cloudwatch) Request(c *client) error {
c.evChan <- &SessionErrorEvent{
//SessionID: sessionID,
Token: token,
RawErrorEvent: &messages.RawErrorEvent{
IntegrationEvent: &messages.IntegrationEvent{
Source: "cloudwatch",
Timestamp: timestamp, // e.IngestionTime ??
Name: name,
@ -81,7 +78,7 @@ func (cw *cloudwatch) Request(c *client) error {
}
if output.NextToken == nil {
break;
break
}
filterOptions.NextToken = output.NextToken
}

View file

@ -115,7 +115,7 @@ func (d *datadog) Request(c *client) error {
c.evChan <- &SessionErrorEvent{
//SessionID: sessionID,
Token: token,
RawErrorEvent: &messages.RawErrorEvent{
IntegrationEvent: &messages.IntegrationEvent{
Source: "datadog",
Timestamp: timestamp,
Name: ddLog.Content.Attributes.Error.Message,

Some files were not shown because too many files have changed in this diff Show more